<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Archiving and Interchange DTD v2.3 20070202//EN" "archivearticle.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="methods-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neuroinform.</journal-id>
<journal-title>Frontiers in Neuroinformatics</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neuroinform.</abbrev-journal-title>
<issn pub-type="epub">1662-5196</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fninf.2024.1354436</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Methods</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Epileptic seizure prediction based on EEG using pseudo-three-dimensional CNN</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author"><name><surname>Liu</surname> <given-names>Xin</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2510514/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author"><name><surname>Li</surname> <given-names>Chunyang</given-names></name><xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author"><name><surname>Lou</surname> <given-names>Xicheng</given-names></name><xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author"><name><surname>Kong</surname> <given-names>Haohuan</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author"><name><surname>Li</surname> <given-names>Xinwei</given-names></name><xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/783705/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Li</surname> <given-names>Zhangyong</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref><xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Zhong</surname> <given-names>Lisha</given-names></name><xref ref-type="aff" rid="aff5"><sup>5</sup></xref><xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1443475/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Research Center of Biomedical Engineering, Chongqing University of Posts and Telecommunications</institution>, <addr-line>Chongqing</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>School of Automation, Chongqing University of Posts and Telecommunications</institution>, <addr-line>Chongqing</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>School of Communication and Information Engineering, Chongqing University of Posts and Telecommunications</institution>, <addr-line>Chongqing</addr-line>, <country>China</country></aff>
<aff id="aff4"><sup>4</sup><institution>School of Bioinformatics, Chongqing University of Posts and Telecommunications</institution>, <addr-line>Chongqing</addr-line>, <country>China</country></aff>
<aff id="aff5"><sup>5</sup><institution>School of Medical Information and Engineering, Southwest Medical University Luzhou</institution>, <addr-line>Luzhou</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0001">
<p>Edited by: Ra&#x00FA;l Alcaraz, University of Castilla-La Mancha, Spain</p>
</fn>
<fn fn-type="edited-by" id="fn0002">
<p>Reviewed by: Mario Versaci, Mediterranea University of Reggio Calabria, Italy</p>
<p>Israel Rom&#x00E1;n-God&#x00ED;nez, University of Guadalajara, Mexico</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Zhangyong Li, <email>lizy@cqupt.edu.cn</email>; Lisha Zhong, <email>zhonglisha@swmu.edu.cn</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>19</day>
<month>03</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>18</volume>
<elocation-id>1354436</elocation-id>
<history>
<date date-type="received">
<day>12</day>
<month>12</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>29</day>
<month>02</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2024 Liu, Li, Lou, Kong, Li, Li and Zhong.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Liu, Li, Lou, Kong, Li, Li and Zhong</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Epileptic seizures are characterized by their sudden and unpredictable nature, posing significant risks to a patient&#x2019;s daily life. Accurate and reliable seizure prediction systems can provide alerts before a seizure occurs, as well as give the patient and caregivers provider enough time to take appropriate measure. This study presents an effective seizure prediction method based on deep learning that combine with handcrafted features. The handcrafted features were selected by Max-Relevance and Min-Redundancy (mRMR) to obtain the optimal set of features. To extract the epileptic features from the fused multidimensional structure, we designed a P3D-BiConvLstm3D model, which is a combination of pseudo-3D convolutional neural network (P3DCNN) and bidirectional convolutional long short-term memory 3D (BiConvLstm3D). We also converted EEG signals into a multidimensional structure that fused spatial, manual features, and temporal information. The multidimensional structure is then fed into a P3DCNN to extract spatial and manual features and feature-to-feature dependencies, followed by a BiConvLstm3D input to explore temporal dependencies while preserving the spatial features, and finally, a channel attention mechanism is implemented to emphasize the more representative information in the multichannel output. The proposed has an average accuracy of 98.13%, an average sensitivity of 98.03%, an average precision of 98.30% and an average specificity of 98.23% for the CHB-MIT scalp EEG database. A comparison of the proposed model with other baseline methods was done to confirm the better performance of features through time&#x2013;space nonlinear feature fusion. The results show that the proposed P3DCNN-BiConvLstm3D-Attention3D method for epilepsy prediction by time&#x2013;space nonlinear feature fusion is effective.</p>
</abstract>
<kwd-group>
<kwd>epilepsy</kwd>
<kwd>feature selection</kwd>
<kwd>MRMR</kwd>
<kwd>pseudo-3D CNN</kwd>
<kwd>seizure prediction</kwd>
</kwd-group>
<contract-num rid="cn1">62171073</contract-num>
<contract-num rid="cn1">62311530103</contract-num>
<contract-num rid="cn1">62106032</contract-num>
<contract-num rid="cn2">HZKY20220209</contract-num>
<contract-num rid="cn3">2023JYJ047</contract-num>
<contract-sponsor id="cn1">National Natural Science Foundation of China<named-content content-type="fundref-id">10.13039/501100001809</named-content></contract-sponsor>
<contract-sponsor id="cn2">&#x201C;Chunhui Plan&#x201D; Collaborative Research Project of the Ministry of Education, China</contract-sponsor>
<contract-sponsor id="cn3">Science and Technology Program of Luzhou</contract-sponsor>
<counts>
<fig-count count="12"/>
<table-count count="4"/>
<equation-count count="28"/>
<ref-count count="31"/>
<page-count count="14"/>
<word-count count="7994"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Neuroscience</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>Epilepsy is a neurological disorder characterized by epileptic seizures (<xref ref-type="bibr" rid="ref3">Chang and Lowenstein, 2003</xref>; <xref ref-type="bibr" rid="ref6">Fisher et al., 2014</xref>), which are often accompanied by intense shaking or convulsions. According to statistics from the World Health Organization, neurological disorders rank as the second leading cause of global mortality. It is estimated that an additional 5 million individuals are diagnosed with epilepsy worldwide annually (<xref ref-type="bibr" rid="ref26">World Health Organization, 2022</xref>). Therefore, epilepsy deserves significant attention and focus to improve its prevention and treatment efforts. The unpredictability, suddenness, and recurrence of epileptic seizures can cause additional anxiety for individuals with epilepsy and their families. Epilepsy also has a negative impact on society, as the stigma and bias against individuals with epilepsy can lead to feelings of shame and social isolation for the affected individuals. This stigma can hinder societal development and progress. Therefore, epilepsy prediction and treatment became particularly important. Seizures are controllable with medication in about 70% of cases, so early prediction of epilepsy reduces the worry about epilepsy, as having enough period to stop a seizure before it occurs reduces the patient&#x2019;s suffering to a great extent.</p>
<p>Seizure prediction is one of the hot topics in clinical research, which is a challenging task. Seizures are the result of excessive and abnormal neuronal activity in the cerebral cortex, so epilepsy can usually be detected by electroencephalography (EEG). EEG reflects the electrical activity of neurons in the brain, and more than 80% of people with epilepsy can be monitored for abnormalities by EEG. Therefore, it is of great value to analyze EEG in the diagnosis of epilepsy. With the development of modern science, a variety of methods have been developed to automatically predict seizures. Most of these methods are based on EEG analysis.</p>
<p>In the literature, there are several prediction methods that can be used to confirm the challenge of predicting seizures. The combination of manual feature extraction from time-series signals and traditional machine learning classifiers has indeed made significant contribution to epilepsy detection (<xref ref-type="bibr" rid="ref20">Sharma et al., 2019</xref>). <xref ref-type="bibr" rid="ref11">Lu et al. (2021)</xref> employed support vector machines for automatic classification of epileptic EEG signals. They chose sample entropy and Higuchi fractal dimension as features, and achieved 89.8% accuracy. Non-linear features show effectiveness in epilepsy detection or prediction. Manual feature makes the model easier to interpret and better able to capture the essential features of the data. It can also be customized for different research tasks and applications. Our purpose is to explore the prediction of epilepsy based on nonlinear features. Appropriate feature selection determines the accuracy of the system, but relying only on features and SVM cannot adequately access the hidden information of the data, requiring a combination of other techniques and methods. With the development of neural networks, various neural network methods are gradually being applied to the detection and prediction of epilepsy. Among these neural networks, recurrent neural networks, convolutional neural networks and graphical neural networks have become prominent. <xref ref-type="bibr" rid="ref7">He et al. (2022)</xref> utilized a graph attention network as the front end to extract spatial features, and used a bidirectional long short-term memory network as the back end to capture temporal relationships. As a result, the seizure detection accuracy on CHB-MIT is 98.52%. <xref ref-type="bibr" rid="ref29">Yu et al. (2022)</xref> utilized manual features and hidden deep features for complementary fusion through the feature fusion module. These fused features were then input into a Multiplicative Long Short-Term Memory network, achieving an average sensitivity of 95.56% and a false positive rate of 0.27/h. In addition, neural networks have been proved to be effective in epilepsy detection or prediction. we will further study neural network epilepsy prediction. <xref ref-type="bibr" rid="ref21">Singh and Malhotra (2022)</xref> using the spectral power and average spectral amplitude of each band as the characteristic inputs of the two-layer LSTM, and achieved 98.14% accuracy, 98.51% sensitivity and 97.78% specificity. <xref ref-type="bibr" rid="ref31">Zhang et al. (2021)</xref> combined with multidimensional sample entropy and Bi-LSTM, the seizure prediction accuracy was 80.09% and the FPR was 0.26/h. <xref ref-type="bibr" rid="ref24">Tuncer and Bolat (2022)</xref> using EEG instantaneous frequency and spectral entropy as features, Bi-LSTM can also be used to classify seizures well. The results show that the combination of artificial features and Bi-LSTM still has high efficiency in predicting seizures. <xref ref-type="bibr" rid="ref16">Prathaban and Balasubramanian (2021)</xref> reconstructed the EEG with sparse and converted it into a two-dimensional image. Then, in order to explain the relationship between channels, the 2D image is transformed into a three-dimensional image of time, signal value and channel representation, and a 3D optimized convolutional neural network was used to predict epileptic seizures. It shows that epilepsy prediction based on the 3D neural network can be realized. However, it should be noted that features with high redundancy can affect the performance of the model. Only by selecting features and reducing redundancy between features can we improve the calculation efficiency of the model and optimize the performance of the model. <xref ref-type="bibr" rid="ref28">Xing et al. (2022)</xref> segmented EEG signals into five frequency bands: &#x03B1;, &#x03B2;, &#x03B3;, &#x03B8;, and &#x03B4;, calculated their power spectral density values, merged spatial information from multiple electrodes, and then applied them to a 3D neural network, a bidirectional long and short-term memory network. This method successfully realized the emotion classification. The study also incorporated spatial information from electrodes into the analysis of emotion recognition. The principle of EEG acquisition is the waveform of the potential difference between two electrodes on the scalp, so the position of the electrodes reflects the state of other adjacent electrodes. This means that we can get some information about the EEG signal from the spatial information of the electrodes. Therefore, we will select features, combines manual features, 3D neural network, and Bi-ConvLSTM3D to form a neural network structure model that preserves spatial information: P3DCNN-BiConvLSTM3D-Attention3D. Using this model can better intervene epileptic seizures and reduce the negative impact of epilepsy.</p>
<p>The article is structured as follows: In Section 2, we provide a brief description of the dataset, signal pre-processing, selected feature types, mRMR algorithm, and 3D feature construction. Section 3 presents an overview of the EEG spatial information modelling, P3DCNN-BiConvLSTM3D-Attention3D model application, and evaluation metrics. We then discuss and compare the results with previous studies. Finally, we provide our conclusions.</p>
</sec>
<sec sec-type="materials|methods" id="sec2">
<label>2</label>
<title>Materials and methods</title>
<p>Epileptic signals are essentially nonlinear, so nonlinear characteristics are part of the research. Using a single feature may not be able to effectively capture epilepsy-related information, and too many features will reduce the efficiency of the algorithm. Therefore, multiple features are used to represent the features of epileptic signals. The Max-Relevance and Min-Redundancy algorithm (mRMR) is used to select important non-linear features while maximizing their relevance and minimizing redundancy.</p>
<p><xref ref-type="fig" rid="fig1">Figure 1</xref> shows the algorithmic process of this study. The process begins with the selecting and preprocessing of EEG signals from the dataset. Following this, several feature are extracted. Apply the mRMR algorithm to obtain highly significant features and then combine them with the spatial relationship of the electrode channels to create 3D features with spatial features. Input 3D features into the P3DCNN Biconvlstm3D model, and finally add the channel attention mechanism to improve the performance and efficiency of the model. The KNN and SVM are used to conduct synchronous comparison experiments.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Flow chart of multi feature selection and temporal spatial epilepsy prediction.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g001.tif"/>
</fig>
<sec id="sec3">
<label>2.1</label>
<title>CHB-MIT dataset</title>
<p>The present work used a public dataset created jointly by Children&#x2019;s Hospital Boston (CHB) and the Massachusetts Institute of Technology (MIT). The dataset is called CHB-MIT. The dataset contains 24 cases, of which 21 cases and 1 cases were from the same female patient, and an interval between data collection was one and a half years. The participants included 5 males ranging in age from 3 to 22 and 17 females ranging in age from 1.5 to 19. The dataset consists of 967.55&#x2009;h of scalp EEG records, including 178 recorded seizures.</p>
</sec>
<sec id="sec4">
<label>2.2</label>
<title>Preprocessing of EEG signals</title>
<p>Due to the low amplitude of EEG signals, they are susceptible to external environmental interference, such as powerline frequency (60&#x2009;Hz or 50&#x2009;Hz) interference. In addition, physiological activity can introduce artifacts into EEG signals, mainly including eye artifacts and muscle artifacts caused by eye movement and blinking. Therefore, in order to obtain relatively clean EEG signals, signal preprocessing must be done before feature extraction. Firstly, the EEG signals is filtered by using a band-pass filter in the range of 0.5 to 75&#x2009;Hz. Because this study needed to consider the influence of electrode placement, it is very important to locate the electrodes in the preprocessing phase. Use the pop_chanedit function in EEGLAB to locate scalp electrode. The EEG signals were processed using EEGLAB&#x2019;s Independent Component Analysis (ICA) through the pop_runica function. Use pop_selectcomps function to remove these components manually to obtain relatively clean EEG. The processed EEG signals were subsequently segmented.</p>
</sec>
<sec id="sec5">
<label>2.3</label>
<title>Feature type selection</title>
<p>Nonlinear dynamics analysis methods may better suit for analysis of the complex and nonlinear EEG waveform recorded from the brain than traditional linear methods, such as time and frequency domain analysis. Nonlinear features can effectively capture the characteristics of biological systems, and can also be used in the analysis of EEG (<xref ref-type="bibr" rid="ref1">Acharya et al., 2013</xref>). Due to the instability and non-stationary of epileptic signals, we extract the following non-linear features from the EEG signals: Higuchi Fractal Dimension (HFD) (<xref ref-type="bibr" rid="ref19">Sharma and Joshi, 2022</xref>), Approximate Entropy (ApEn) (<xref ref-type="bibr" rid="ref22">Srinivasan et al., 2007</xref>), Sample Entropy (SampEn) (<xref ref-type="bibr" rid="ref2">Arunkumar et al., 2016</xref>), and Fuzzy Entropy (FuzzyEn) (<xref ref-type="bibr" rid="ref27">Xiang et al., 2015</xref>), FuzzyEn works equally well for fuzzy time series and can describe the degree of ambiguity of the series (<xref ref-type="bibr" rid="ref25">Versaci and Morabito, 2003</xref>). These nonlinear features were subjected to feature selection.</p>
<p>Fractal dimension is a measure used to quantify the complexity of signals. In this study, we used HFD to characterize the fractal dimension of the signal. HFD is computed by the following steps (<xref ref-type="bibr" rid="ref8">Higuchi, 1988</xref>):</p>
<p>Step 1: constructing a new time series as <xref ref-type="disp-formula" rid="EQ1">Eq. (1)</xref>:</p>
<disp-formula id="EQ1">
<label>(1)</label>
<mml:math id="M1">
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>k</mml:mi>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mfenced open="{" close="}">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>m</mml:mi>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>2</mml:mn>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mfrac>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
<mml:mi>k</mml:mi>
</mml:mfrac>
</mml:mfenced>
<mml:mo>&#x00D7;</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>Step 2: <xref ref-type="disp-formula" rid="EQ2">Eqs. (2)</xref> and <xref ref-type="disp-formula" rid="EQ3">(3)</xref>can be used to calculate the duration of the time series.</p>
<disp-formula id="EQ2">
<label>(2)</label>
<mml:math id="M2">
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>m</mml:mi>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mi>k</mml:mi>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>k</mml:mi>
</mml:mfrac>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:munderover>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mfenced open="[" close="]">
<mml:mfrac>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
<mml:mi>k</mml:mi>
</mml:mfrac>
</mml:mfenced>
</mml:munderover>
<mml:mfenced open="|" close="|">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mfrac>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mfenced open="[" close="]">
<mml:mfrac>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
<mml:mi>k</mml:mi>
</mml:mfrac>
</mml:mfenced>
<mml:mo>&#x00B7;</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="EQ3">
<label>(3)</label>
<mml:math id="M3">
<mml:mi>L</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>k</mml:mi>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>k</mml:mi>
</mml:mfrac>
<mml:munderover>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>k</mml:mi>
</mml:munderover>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>m</mml:mi>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mi>k</mml:mi>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>Step 3: the HFD is calculated as follows <xref ref-type="disp-formula" rid="EQ4">Eq. (4)</xref>:</p>
<disp-formula id="EQ4">
<label>(4)</label>
<mml:math id="M4">
<mml:mi>D</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mo>ln</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>L</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>k</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mo>ln</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mi>k</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<p>ApEn is an index to measure the complexity of time series. It is a nonlinear dynamics parameter, which is used to measure regularity and volatility of time series by comparing the similarity of template vectors. ApEn is computed as below (<xref ref-type="bibr" rid="ref15">Pincus, 1991</xref>):</p>
<p>In general, for a time series <inline-formula>
<mml:math id="M5">
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>n</mml:mi>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mn>1</mml:mn>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mn>2</mml:mn>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mo>..</mml:mo>
<mml:mo>,</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>N</mml:mi>
</mml:mfenced>
</mml:math>
</inline-formula> consisting of <inline-formula>
<mml:math id="M6">
<mml:mi>N</mml:mi>
</mml:math>
</inline-formula> data points, the method for calculating ApEn is as follows:</p>
<p>First, constructing an m-dimensional vector <inline-formula>
<mml:math id="M7">
<mml:msubsup>
<mml:mi>X</mml:mi>
<mml:mn>1</mml:mn>
<mml:mi>m</mml:mi>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:msubsup>
<mml:mi>X</mml:mi>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>m</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula>, where <inline-formula>
<mml:math id="M8">
<mml:msubsup>
<mml:mi>X</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>m</mml:mi>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mfenced open="{" close="}">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>i</mml:mi>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2264;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2264;</mml:mo>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:math>
</inline-formula>.</p>
<p>Second, define the distance <inline-formula>
<mml:math id="M9">
<mml:msubsup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>m</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula> between vectors <inline-formula>
<mml:math id="M10">
<mml:msubsup>
<mml:mi>X</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>m</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M11">
<mml:msubsup>
<mml:mi>X</mml:mi>
<mml:mi>j</mml:mi>
<mml:mi>m</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula> as the Chebyshev distance as <xref ref-type="disp-formula" rid="EQ5">Eq. (5)</xref> which is the maximum absolute difference between their corresponding elements.</p>
<disp-formula id="EQ5">
<label>(5)</label>
<mml:math id="M12">
<mml:msubsup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>m</mml:mi>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mi mathvariant="italic">chebychev</mml:mi>
</mml:msub>
<mml:mfenced open="(" close=")" separators=",">
<mml:msubsup>
<mml:mi>X</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>m</mml:mi>
</mml:msubsup>
<mml:msubsup>
<mml:mi>X</mml:mi>
<mml:mi>j</mml:mi>
<mml:mi>m</mml:mi>
</mml:msubsup>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:msub>
<mml:mo>max</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="true">|</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo stretchy="true">|</mml:mo>
</mml:math>
</disp-formula>
<p>Third, count the number of <inline-formula>
<mml:math id="M13">
<mml:mi>j</mml:mi>
</mml:math>
</inline-formula> for which <inline-formula>
<mml:math id="M14">
<mml:msubsup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>m</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula> is less than or equal to the similarity threshold <inline-formula>
<mml:math id="M15">
<mml:mi>r</mml:mi>
</mml:math>
</inline-formula>, and define the approximate count <inline-formula>
<mml:math id="M16">
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>. For <inline-formula>
<mml:math id="M17">
<mml:mn>1</mml:mn>
<mml:mo>&#x2264;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2264;</mml:mo>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:math>
</inline-formula>, <inline-formula>
<mml:math id="M18">
<mml:msubsup>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> is designated as the ratio of the approximate count to the total count as <xref ref-type="disp-formula" rid="EQ6">Eq. (6)</xref>.</p>
<disp-formula id="EQ6">
<label>(6)</label>
<mml:math id="M19">
<mml:msubsup>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</disp-formula>
<p>Fourth, define <inline-formula>
<mml:math id="M20">
<mml:msup>
<mml:mi>&#x03D5;</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> as <xref ref-type="disp-formula" rid="EQ7">Eq. (7)</xref>:</p>
<disp-formula id="EQ7">
<label>(7)</label>
<mml:math id="M21">
<mml:msup>
<mml:mi>&#x03D5;</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msup>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:munderover>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munderover>
<mml:mo>ln</mml:mo>
<mml:msubsup>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</disp-formula>
<p>Fifth, increase the dimension to <inline-formula>
<mml:math id="M22">
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:math>
</inline-formula> and obtain <inline-formula>
<mml:math id="M23">
<mml:msup>
<mml:mi>&#x03D5;</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula>.</p>
<p>Sixth, define ApEn as <xref ref-type="disp-formula" rid="EQ8">Eq. (8)</xref>:</p>
<disp-formula id="EQ8">
<label>(8)</label>
<mml:math id="M24">
<mml:mi mathvariant="italic">ApEn</mml:mi>
<mml:mfenced open="(" close=")" separators=",">
<mml:mi>m</mml:mi>
<mml:mi>r</mml:mi>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:msup>
<mml:mi>&#x03D5;</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msup>
<mml:mo>&#x2212;</mml:mo>
<mml:msup>
<mml:mi>&#x03D5;</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msup>
</mml:math>
</disp-formula>
<p>SampEn is an improvement on ApEn. The following steps are used to calculate SampEn (<xref ref-type="bibr" rid="ref18">Richman and Moorman, 2000</xref>):</p>
<p>For a sequence <inline-formula>
<mml:math id="M25">
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>n</mml:mi>
</mml:mfenced>
</mml:math>
</inline-formula>, calculate the maximum distance between <inline-formula>
<mml:math id="M26">
<mml:mi>X</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>i</mml:mi>
</mml:mfenced>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M27">
<mml:mi>X</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>j</mml:mi>
</mml:mfenced>
</mml:math>
</inline-formula>as <inline-formula>
<mml:math id="M28">
<mml:mi>d</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>X</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>i</mml:mi>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mi>X</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>j</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:munder>
<mml:mo>max</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:munder>
<mml:mfenced open="|" close="|">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:math>
</inline-formula>. Then calculate the ratio relationship: <inline-formula>
<mml:math id="M29">
<mml:msubsup>
<mml:mi>B</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>m</mml:mi>
</mml:msubsup>
<mml:mfenced open="(" close=")">
<mml:mi>r</mml:mi>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mi>n</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>m</mml:mi>
<mml:mfenced open="{" close="}">
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>X</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>i</mml:mi>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mi>X</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>j</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x003C;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:math>
</inline-formula>, where <inline-formula>
<mml:math id="M30">
<mml:mi>r</mml:mi>
</mml:math>
</inline-formula> is the similarity threshold. <inline-formula>
<mml:math id="M31">
<mml:msup>
<mml:mi>B</mml:mi>
<mml:mi>m</mml:mi>
</mml:msup>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:msubsup>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:msubsup>
<mml:mi>B</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>m</mml:mi>
</mml:msubsup>
<mml:mfenced open="(" close=")">
<mml:mi>r</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
</mml:math>
</inline-formula>. Next, increase the dimension to <inline-formula>
<mml:math id="M32">
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:math>
</inline-formula> and obtain <inline-formula>
<mml:math id="M33">
<mml:msup>
<mml:mi>B</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mfenced open="(" close=")">
<mml:mi>r</mml:mi>
</mml:mfenced>
</mml:math>
</inline-formula>. The formula for calculating SampEn is as <xref ref-type="disp-formula" rid="EQ9">Eq. (9)</xref>:</p>
<disp-formula id="EQ9">
<label>(9)</label>
<mml:math id="M34">
<mml:mi mathvariant="italic">SampEn</mml:mi>
<mml:mfenced open="(" close=")" separators=",,">
<mml:mi>m</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>N</mml:mi>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mo>ln</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi>B</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mfenced open="(" close=")">
<mml:mi>r</mml:mi>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:msup>
<mml:mi>B</mml:mi>
<mml:mi>m</mml:mi>
</mml:msup>
<mml:mfenced open="(" close=")">
<mml:mi>r</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
</mml:mfenced>
</mml:math>
</disp-formula>
<p><inline-formula>
<mml:math id="M35">
<mml:msup>
<mml:mi>B</mml:mi>
<mml:mi>m</mml:mi>
</mml:msup>
</mml:math>
</inline-formula> is never equal to zero. This is because the distance between each pair of vectors <inline-formula>
<mml:math id="M36">
<mml:mi>X</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>i</mml:mi>
</mml:mfenced>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M37">
<mml:mi>X</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>j</mml:mi>
</mml:mfenced>
</mml:math>
</inline-formula> is greater than zero, and the value of <inline-formula>
<mml:math id="M38">
<mml:msubsup>
<mml:mi>B</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>m</mml:mi>
</mml:msubsup>
<mml:mfenced open="(" close=")">
<mml:mi>r</mml:mi>
</mml:mfenced>
</mml:math>
</inline-formula> is always greater than zero. So the value of <inline-formula>
<mml:math id="M39">
<mml:msup>
<mml:mi>B</mml:mi>
<mml:mi>m</mml:mi>
</mml:msup>
</mml:math>
</inline-formula> is always greater than zero.</p>
<p>FuzzyEn is used to measure the uncertainty or information content of fuzzy sets or fuzzy systems. FuzzyEn is defined as <xref ref-type="bibr" rid="ref4">Chen et al. (2007)</xref>:</p>
<p>To calculate the mean-removed template vector, <inline-formula>
<mml:math id="M40">
<mml:mi>X</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mi>i</mml:mi>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mi>i</mml:mi>
</mml:mfenced>
<mml:mi>x</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2026;</mml:mo>
<mml:mi>x</mml:mi>
<mml:msup>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:mi>T</mml:mi>
</mml:msup>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mi>x</mml:mi>
<mml:mo stretchy="true">&#x00AF;</mml:mo>
</mml:mover>
<mml:mfenced open="[" close="]">
<mml:mi>i</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mtext>,</mml:mtext>
</mml:math>
</inline-formula> where <inline-formula>
<mml:math id="M41">
<mml:mover accent="true">
<mml:mi>x</mml:mi>
<mml:mo stretchy="true">&#x00AF;</mml:mo>
</mml:mover>
<mml:mfenced open="[" close="]">
<mml:mi>i</mml:mi>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo stretchy="true">/</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:munderover>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munderover>
<mml:mi>x</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:math>
</inline-formula>. The Gaussian function definition is employed <xref ref-type="disp-formula" rid="EQ10">Eqs. (10</xref>&#x2013;<xref ref-type="disp-formula" rid="EQ11">12)</xref>:</p>
<disp-formula id="EQ10">
<label>(10)</label>
<mml:math id="M42">
<mml:msup>
<mml:mi>&#x03A6;</mml:mi>
<mml:mi>m</mml:mi>
</mml:msup>
<mml:mfenced open="(" close=")">
<mml:mi>r</mml:mi>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:munderover>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munderover>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:munderover>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>&#x2260;</mml:mo>
<mml:mi>i</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munderover>
<mml:msubsup>
<mml:mi>D</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>m</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<disp-formula id="EQ11">
<label>(11)</label>
<mml:math id="M43">
<mml:msubsup>
<mml:mi>D</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>m</mml:mi>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mo>exp</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:msup>
<mml:mfenced open="(" close=")">
<mml:msubsup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>m</mml:mi>
</mml:msubsup>
</mml:mfenced>
<mml:mi>n</mml:mi>
</mml:msup>
<mml:mi>r</mml:mi>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<disp-formula id="EQ12">
<label>(12)</label>
<mml:math id="M44">
<mml:msubsup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>m</mml:mi>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mi>d</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>X</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mi>i</mml:mi>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mi>X</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mi>j</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:munder>
<mml:mo>max</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:munder>
<mml:mo stretchy="true">|</mml:mo>
<mml:mtable columnalign="left">
<mml:mtr>
<mml:mtd>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mi>x</mml:mi>
<mml:mo stretchy="true">&#x00AF;</mml:mo>
</mml:mover>
<mml:mfenced open="[" close="]">
<mml:mi>i</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mo>&#x2212;</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mi>x</mml:mi>
<mml:mo stretchy="true">&#x00AF;</mml:mo>
</mml:mover>
<mml:mfenced open="[" close="]">
<mml:mi>j</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
</mml:mtr>
</mml:mtable>
<mml:mo stretchy="true">|</mml:mo>
</mml:math>
</disp-formula>
<p>The formula for computing FuzzyEn is as follows <xref ref-type="disp-formula" rid="EQ13">Eq. (13)</xref>:</p>
<disp-formula id="EQ13">
<label>(13)</label>
<mml:math id="M45">
<mml:mi mathvariant="italic">FuzzyEn</mml:mi>
<mml:mfenced open="(" close=")" separators=",,">
<mml:mi>X</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>r</mml:mi>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mo>log</mml:mo>
<mml:msup>
<mml:mi>&#x03A6;</mml:mi>
<mml:mi>m</mml:mi>
</mml:msup>
<mml:mfenced open="(" close=")">
<mml:mi>r</mml:mi>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:mo>log</mml:mo>
<mml:msup>
<mml:mi>&#x03A6;</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mfenced open="(" close=")">
<mml:mi>r</mml:mi>
</mml:mfenced>
</mml:math>
</disp-formula>
</sec>
<sec id="sec6">
<label>2.4</label>
<title>Feature filtering based on mRMR</title>
<p>This algorithm is to find a set of features in the original feature set that have the max-relevance with the final output result, but have the min-redundancy between the features. In order to minimize the redundancy of features and obtain the most information with the least features, we use the mRMR method to select feature (<xref ref-type="bibr" rid="ref14">Peng et al., 2005</xref>). Using this algorithm, we can choose the features with the highest information, thus improving the performance and accuracy of the model. The experimental process is as follows: we subject pre-processed EEG signals to feature extraction and use the mRMR method to select feature group with the max-relevance and min-redundancy. Effective feature selection can extract highly correlated features for epilepsy detection, while eliminating those features with poor correlation. The combination of these features better captures the integrity of the signal, reduces the complexity and improving the efficiency of network learning. This method achieves the highest accuracy with fewer features, mRMR is computed by the following steps (<xref ref-type="bibr" rid="ref14">Peng et al., 2005</xref>):</p>
<p>Define the mutual information between <inline-formula>
<mml:math id="M46">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M47">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> as <xref ref-type="disp-formula" rid="EQ14">Eq. (14)</xref>:</p>
<disp-formula id="EQ14">
<label>(14)</label>
<mml:math id="M48">
<mml:mi>I</mml:mi>
<mml:mfenced open="(" close=")" separators=";">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mo stretchy="true">&#x222C;</mml:mo>
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
<mml:mfenced open="(" close=")" separators=",">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
<mml:mo>log</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mfenced open="(" close=")" separators=",">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mfenced open="(" close=")">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mfenced>
<mml:mi>p</mml:mi>
<mml:mfenced open="(" close=")">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
<mml:mi>d</mml:mi>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mi>d</mml:mi>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:math>
</disp-formula>
<p>Using mutual information, the mRMR criterion can be obtained as <xref ref-type="disp-formula" rid="EQ15">Eqs. (15</xref>, <xref ref-type="disp-formula" rid="EQ16">16)</xref>:</p>
<disp-formula id="EQ15">
<label>(15)</label>
<mml:math id="M49">
<mml:mo>max</mml:mo>
<mml:mi>D</mml:mi>
<mml:mfenced open="(" close=")" separators=",">
<mml:mi>S</mml:mi>
<mml:mi>c</mml:mi>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mi>D</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mfenced open="|" close="|">
<mml:mi>S</mml:mi>
</mml:mfenced>
</mml:mfrac>
<mml:munder>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:mi>S</mml:mi>
</mml:mrow>
</mml:munder>
<mml:mi>I</mml:mi>
<mml:mfenced open="(" close=")" separators=";">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mi>c</mml:mi>
</mml:mfenced>
</mml:math>
</disp-formula>
<disp-formula id="EQ16">
<label>(16)</label>
<mml:math id="M50">
<mml:mo>min</mml:mo>
<mml:mi>R</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>S</mml:mi>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mi>R</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mfenced open="|" close="|">
<mml:mi>S</mml:mi>
</mml:mfenced>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mfrac>
<mml:munder>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:mi>S</mml:mi>
</mml:mrow>
</mml:munder>
<mml:mi>I</mml:mi>
<mml:mfenced open="(" close=")" separators=";">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>Where <inline-formula>
<mml:math id="M51">
<mml:mi>S</mml:mi>
</mml:math>
</inline-formula> represents the feature set, with <inline-formula>
<mml:math id="M52">
<mml:mo stretchy="true">|</mml:mo>
<mml:mi>S</mml:mi>
<mml:mo stretchy="true">|</mml:mo>
</mml:math>
</inline-formula> being the dimensionality. <inline-formula>
<mml:math id="M53">
<mml:mi>I</mml:mi>
<mml:mfenced open="(" close=")" separators=";">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mi>c</mml:mi>
</mml:mfenced>
</mml:math>
</inline-formula> represents for the mutual information between feature <inline-formula>
<mml:math id="M54">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> and target <inline-formula>
<mml:math id="M55">
<mml:mi>c</mml:mi>
</mml:math>
</inline-formula>, while <inline-formula>
<mml:math id="M56">
<mml:mi>I</mml:mi>
<mml:mfenced open="(" close=")" separators=";">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
</mml:math>
</inline-formula> represents for the mutual information between <inline-formula>
<mml:math id="M57">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M58">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>. <inline-formula>
<mml:math id="M59">
<mml:mi>D</mml:mi>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M60">
<mml:mi>R</mml:mi>
</mml:math>
</inline-formula> denote the relevance and redundancy, respectively.</p>
<p>The mRMR algorithm considers both of the above criteria as <xref ref-type="disp-formula" rid="EQ17">Eq. (17)</xref>:</p>
<disp-formula id="EQ17">
<label>(17)</label>
<mml:math id="M61">
<mml:mo>max</mml:mo>
<mml:mi>&#x03A6;</mml:mi>
<mml:mfenced open="(" close=")" separators=",">
<mml:mi>D</mml:mi>
<mml:mi>R</mml:mi>
</mml:mfenced>
<mml:mo>,</mml:mo>
<mml:mi>&#x03A6;</mml:mi>
<mml:mo>=</mml:mo>
<mml:mi>D</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>R</mml:mi>
</mml:math>
</disp-formula>
<p>To solve the equation above, we use an incremental search algorithm. That is, on the basis of the features that have been selected, find the one that maximises the <xref ref-type="disp-formula" rid="EQ18">Eq. (18)</xref> in the remaining feature space. In fact, it is equivalent to computing and then sorting each of the remaining features.</p>
<disp-formula id="EQ18">
<label>(18)</label>
<mml:math id="M62">
<mml:munder>
<mml:mo>max</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:mi>X</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:munder>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mfenced open="(" close=")" separators=";">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mi>c</mml:mi>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:munder>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:munder>
<mml:mi>I</mml:mi>
<mml:mfenced open="(" close=")" separators=";">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>In the process of feature selection, the mRMR algorithm calculates feature importance based on HFD, ApEn, SampEn and fuzzy, and selects the most important variables one by one. <xref ref-type="fig" rid="fig2">Figure 2</xref> shows the feature importance scores obtained by using the mRMR algorithm.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Importance score of each feature.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g002.tif"/>
</fig>
<p>According to the results in <xref ref-type="fig" rid="fig2">Figure 2</xref>, it is obvious that this group of features are arranged in descending order of importance score: HFD, FuzzyEn, ApEn, and SampEn. It should be noted that SampEn has the lowest score, indicating that it has higher redundancy and relatively low correlation with other characteristics.</p>
<p>In order to find the appropriate number of features, this article conducted experiments using different numbers of feature sets from high to low importance scores in HFD, FuzzyEn, ApEn, and SampEn. By comparing the effects of different feature numbers on the accuracy of the model, the optimal feature number is determined, as shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>. It can be observed that with the increase of the number of features, the accuracy of the model is also improves, reaching the highest point at three features. However, with the addition of the fourth feature, the accuracy of the model drops. Therefore, it is very important to strike a balance between minimizing redundancy and maximizing relevance in the process of feature selection to ensure the best prediction performance. The effect of <xref ref-type="fig" rid="fig3">Figure 3</xref> also reflects the correctness of the results of <xref ref-type="fig" rid="fig2">Figure 2</xref>. The model chosen is also the P3DCNN-BiConvLSTM3D-Attention3D model proposed in present work, which is selected using accuracy as an evaluation metric. The parameter settings are shown in <xref ref-type="table" rid="tab1">Tables 1</xref>, <xref ref-type="table" rid="tab2">2</xref>.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Accuracy values under different features.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g003.tif"/>
</fig>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Pseudo 3D convolution feature extraction architecture.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Layer</th>
<th align="center" valign="top">Kernel size</th>
<th align="center" valign="top">Output size</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">P3DConv1</td>
<td align="center" valign="middle">1&#x2009;&#x00D7;&#x2009;3&#x2009;&#x00D7;&#x2009;3&#x2009;&#x00D7;&#x2009;64</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;4&#x2009;&#x00D7;&#x2009;7&#x2009;&#x00D7;&#x2009;1</td>
</tr>
<tr>
<td align="left" valign="middle">P3DConv2</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;1&#x2009;&#x00D7;&#x2009;1&#x2009;&#x00D7;&#x2009;64</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;4&#x2009;&#x00D7;&#x2009;7&#x2009;&#x00D7;&#x2009;64</td>
</tr>
<tr>
<td align="left" valign="middle">P3DConv3</td>
<td align="center" valign="middle">1&#x2009;&#x00D7;&#x2009;3&#x2009;&#x00D7;&#x2009;3&#x2009;&#x00D7;&#x2009;128</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;4&#x2009;&#x00D7;&#x2009;7&#x2009;&#x00D7;&#x2009;128</td>
</tr>
<tr>
<td align="left" valign="middle">P3DConv4</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;1&#x2009;&#x00D7;&#x2009;1&#x2009;&#x00D7;&#x2009;128</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;4&#x2009;&#x00D7;&#x2009;7&#x2009;&#x00D7;&#x2009;128</td>
</tr>
<tr>
<td align="left" valign="middle">P3DConv5</td>
<td align="center" valign="middle">1&#x2009;&#x00D7;&#x2009;3&#x2009;&#x00D7;&#x2009;3&#x2009;&#x00D7;&#x2009;256</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;4&#x2009;&#x00D7;&#x2009;7&#x2009;&#x00D7;&#x2009;256</td>
</tr>
<tr>
<td align="left" valign="middle">P3DConv6</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;1&#x2009;&#x00D7;&#x2009;1&#x2009;&#x00D7;&#x2009;256</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;4&#x2009;&#x00D7;&#x2009;7&#x2009;&#x00D7;&#x2009;256</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>3D RNN feature extraction architecture.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="middle">Layer</th>
<th align="center" valign="middle">Size of hidden state</th>
<th align="center" valign="middle">Output size</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Input sequence</td>
<td align="center" valign="middle">&#x2013;</td>
<td align="center" valign="middle">2&#x2009;&#x00D7;&#x2009;3&#x2009;&#x00D7;&#x2009;4&#x2009;&#x00D7;&#x2009;7&#x2009;&#x00D7;&#x2009;512</td>
</tr>
<tr>
<td align="left" valign="middle">Bi-ConvLSTM3D layer</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;1&#x2009;&#x00D7;&#x2009;1&#x2009;&#x00D7;&#x2009;64</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;4&#x2009;&#x00D7;&#x2009;7&#x2009;&#x00D7;&#x2009;64</td>
</tr>
<tr>
<td align="left" valign="middle">Fully connected layer</td>
<td align="center" valign="middle">1&#x2009;&#x00D7;&#x2009;3&#x2009;&#x00D7;&#x2009;3&#x2009;&#x00D7;&#x2009;128</td>
<td align="center" valign="middle">3&#x2009;&#x00D7;&#x2009;4&#x2009;&#x00D7;&#x2009;7&#x2009;&#x00D7;&#x2009;128</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In conclusion, HFD, FuzzyEn and ApEn have been chosen as the features used in the experiments. The selected multiple features were combined with the 2D electrode channel spatial feature matrix. To normalize the data, import the StandardScaler class from the scikit-learn library and use the fit_transform method.</p>
</sec>
</sec>
<sec id="sec7">
<label>3</label>
<title>Epileptic-states classification</title>
<p>According to the EEG records of epileptic patients, their condition can be divided into two periods: the interictal period and the Seizure Occurrence Period (SOP). The interictal period represents to the time when the patient is in a normal state, and the SOP represents to the time range when the patient has epileptic symptoms. The main goal of epilepsy prediction is to detect seizures within the range of Seizure Prediction Horizon (SPH). An appropriate SPH should include an appropriate time range for taking adequate intervention or preventive measures before actual seizures. A long prediction range can cause patient anxiety and pose a challenge to using neural network prediction models, while a short prediction range may result in insufficient preparation time for patients and healthcare providers, ultimately failing to achieve the goal of epilepsy prediction. Achieving an appropriate balance is crucial in epilepsy prediction.</p>
<p><xref ref-type="bibr" rid="ref23">Truong et al. (2018)</xref> established the time interval of SPH from 5&#x2009;min before the actual seizure occurrence. Within this period, patients were provided only 5&#x2009;min to prepare. To provide ample time for both the physician and the patient, we defines the SPH range as 15&#x2009;min prior to the seizure up to 5&#x2009;min before the seizure itself, as illustrated in <xref ref-type="fig" rid="fig4">Figure 4</xref>.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Epilepsy different stages state diagram.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g004.tif"/>
</fig>
<p>In present work, positive samples are retrieved during the 15 to 5&#x2009;min interval leading up to seizure onset, as seizures are likely to occur during the following 15&#x2009;min. Negative samples are segments without signs of imminent seizure within 15&#x2009;min. The number of positive and negative samples of the dataset used in CHB-MIT was 11,300:11,400, which was generated because the time window used in this study was 6&#x2009;s and each patient had a large number of EEG recordings, each recordings is at least two hours. To balance the positive and negative samples, the same 1:1 positive and negative samples were used for each patient.</p>
<sec id="sec8">
<label>3.1</label>
<title>Three-dimensional feature construction of EEG</title>
<p>The CHB-MIT uses 23 electrodes for recording, which conforms to the positioning and naming of the international 10&#x2013;20 system for EEG electrode placement standards. The dataset&#x2019;s electrode names are as follows: AF7, AF3, AF4, AF8, FT9, FT7, FC3, FCz, FC4, FT8, FT10, T7, T8, TP7, CP3, CPz, CP4, TP8, P8, PO7, PO3, PO4, and PO8. <xref ref-type="fig" rid="fig5">Figure 5</xref> shows a mapping of the actual spatial distribution of these scalp electrodes on the head.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>23 electrodes scalp localization map.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g005.tif"/>
</fig>
<p>To extract spatial features between scalp electrodes, a spatial feature matrix with a 4&#x2009;&#x00D7;&#x2009;7 two-dimensional electrode channel is designed based on <xref ref-type="fig" rid="fig5">Figure 5</xref>, as illustrated in <xref ref-type="fig" rid="fig6">Figure 6</xref>. From <xref ref-type="fig" rid="fig6">Figure 6</xref>, the relative spatial relationships between different electrodes can be clearly understood.</p>
<fig position="float" id="fig6">
<label>Figure 6</label>
<caption>
<p>2D electrode channel positioning matrix diagram.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g006.tif"/>
</fig>
<p>To represent the EEG signals from a multi-feature perspective, the selected features were combined into a feature set: By arranging the 2D matrices of each feature after StandardScaler transformation, can obtain a 3D feature input composed of three features as illustrated in <xref ref-type="fig" rid="fig7">Figure 7</xref>. H represents the height of the matrix set to 4, W represents the width of the matrix set to 7, and N represents the number of selected important features, which is 3 in this case.</p>
<fig position="float" id="fig7">
<label>Figure 7</label>
<caption>
<p>3D feature map.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g007.tif"/>
</fig>
</sec>
<sec id="sec9">
<label>3.2</label>
<title>Pseudo-3DCNN structure learning</title>
<p>The proposed work utilizes a pseudo-3D CNN merged with a bidirectional ConvLSTM3D as the primary algorithm. By using 3D neural networks, the algorithm can preserve the electrode space information and information from multiple features. Assuming a conventional 3D convolutional kernel size is <inline-formula>
<mml:math id="M63">
<mml:mi>k</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>b</mml:mi>
</mml:math>
</inline-formula>, where <inline-formula>
<mml:math id="M64">
<mml:mi>k</mml:mi>
</mml:math>
</inline-formula> is the spatial dimension of the filter and <inline-formula>
<mml:math id="M65">
<mml:mi>b</mml:mi>
</mml:math>
</inline-formula> is the feature dimension of the filter, 3D convolution is computationally expensive and memory-intensive when learning features. In order to solve this problem, we can understand a 3D convolutional filter with a size of <inline-formula>
<mml:math id="M66">
<mml:mi>k</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>b</mml:mi>
</mml:math>
</inline-formula> as a <inline-formula>
<mml:math id="M67">
<mml:mi>k</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mn>1</mml:mn>
</mml:math>
</inline-formula> convolutional filter for 2DCNN and a <inline-formula>
<mml:math id="M68">
<mml:mn>1</mml:mn>
<mml:mo>&#x2217;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>b</mml:mi>
</mml:math>
</inline-formula> convolutional filter for 1DCNN. The <inline-formula>
<mml:math id="M69">
<mml:mi>k</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mn>1</mml:mn>
</mml:math>
</inline-formula> convolution filter is used to obtain spatial information, while the <inline-formula>
<mml:math id="M70">
<mml:mn>1</mml:mn>
<mml:mo>&#x2217;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>b</mml:mi>
</mml:math>
</inline-formula> convolution filter is used to obtain information about nonlinear characteristics. This method is called pseudo-3D (<xref ref-type="bibr" rid="ref17">Qiu et al., 2017</xref>). In this study, we used pseudo-3D to extract EEG information from multiple dimensions, including spatial, nonlinear, and temporal. This not only reduces the computation and complexity of 3D convolution, but also realizes a more sensible feature extraction process. In order to extract the feature information of each nonlinear feature, each feature uses a convolution kernel with the size of <inline-formula>
<mml:math id="M71">
<mml:mi>k</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mn>1</mml:mn>
</mml:math>
</inline-formula>. Then, a convolution kernel of the size <inline-formula>
<mml:math id="M72">
<mml:mn>1</mml:mn>
<mml:mo>&#x2217;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>b</mml:mi>
</mml:math>
</inline-formula> is used to extract the information between these features. Pseudo 3D networks can adopt different convolution kernel sizes, stride sizes, and padding methods in both temporal and spatial dimensions to meet different needs.</p>
</sec>
<sec id="sec10">
<label>3.3</label>
<title>P3DCNN-BiConvLstm3D-Attention3D model</title>
<p>It is equally important to extract temporal information for EEG while extracting spatial and feature information. Traditional BiLSTM is usually used to capture temporal correlations when processing temporal data, but it cannot effectively preserve spatial information features in the data. In contrast, Bi-ConvLSTM3D can simultaneously extract spatial relationships and temporal correlations from the data. This type of network is particularly suitable for the data in this study and can better handle 3D type data. Therefore, this study used Bi-ConvLSTM3D.</p>
<p>The purpose of this study was to determine whether the EEG signals belonged to SPH segments. Segments from 15&#x2009;min before the seizure to 5&#x2009;min before the seizure were designated as positive samples, and the remaining segments were designated as negative samples. For continuously recorded EEG signals, the data need to be segmented. In this paper, the EEG data were segmented into 6-s segments using a non-overlapping sliding window method.</p>
<p>The algorithm flow is illustrated in <xref ref-type="fig" rid="fig8">Figures 8</xref>&#x2013;<xref ref-type="fig" rid="fig10">10</xref> and the steps of alogrithm as <xref ref-type="disp-formula" rid="EQ19">Eqs. (19</xref>&#x2013;<xref ref-type="disp-formula" rid="EQ24">24)</xref>.</p>
<fig position="float" id="fig8">
<label>Figure 8</label>
<caption>
<p>EEG segmentation and 3D feature construction.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g008.tif"/>
</fig>
<fig position="float" id="fig9">
<label>Figure 9</label>
<caption>
<p>the structure of ConvLSTM3D cells.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g009.tif"/>
</fig>
<fig position="float" id="fig10">
<label>Figure 10</label>
<caption>
<p>Overall architecture of the model.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g010.tif"/>
</fig>
<p>ConvLSTM3D is defined as <xref ref-type="bibr" rid="ref10">Li et al. (2022)</xref>:</p>
<disp-formula id="EQ19">
<label>(19)</label>
<mml:math id="M73">
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mi>&#x03C3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi mathvariant="italic">Xi</mml:mi>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:msub>
<mml:mi>X</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<disp-formula id="EQ20">
<label>(20)</label>
<mml:math id="M74">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mi>&#x03C3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>X</mml:mi>
<mml:mi>f</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:msub>
<mml:mi>X</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mi>f</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>f</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<disp-formula id="EQ21">
<label>(21)</label>
<mml:math id="M75">
<mml:msub>
<mml:mi>g</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mo>tanh</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:msub>
<mml:mi>X</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:msub>
<mml:mi>X</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<disp-formula id="EQ22">
<label>(22)</label>
<mml:math id="M76">
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi>g</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:math>
</disp-formula>
<disp-formula id="EQ23">
<label>(23)</label>
<mml:math id="M77">
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mi>&#x03C3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>X</mml:mi>
<mml:mi>o</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:msub>
<mml:mi>X</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mi>o</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>o</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<disp-formula id="EQ24">
<label>(24)</label>
<mml:math id="M78">
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:mo>tanh</mml:mo>
<mml:mfenced open="(" close=")">
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>Where <inline-formula>
<mml:math id="M79">
<mml:msub>
<mml:mi>X</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> represents the three-dimensional characteristics of each time window, and the <inline-formula>
<mml:math id="M80">
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> represents the hidden state. <inline-formula>
<mml:math id="M81">
<mml:mi>&#x03C3;</mml:mi>
</mml:math>
</inline-formula> represents the Sigmoid function, and <inline-formula>
<mml:math id="M82">
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>f</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>o</mml:mi>
</mml:math>
</inline-formula> correspond to the input gate, forget gate, cell gate (<xref ref-type="bibr" rid="ref10">Li et al., 2022</xref>). The weights and biases indexed by <inline-formula>
<mml:math id="M83">
<mml:mi>X</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>H</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>f</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>o</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>c</mml:mi>
</mml:math>
</inline-formula> are learned through backpropagation. The symbol <inline-formula>
<mml:math id="M84">
<mml:mo>&#x2299;</mml:mo>
</mml:math>
</inline-formula>represents matrix multiplication. The symbol <inline-formula>
<mml:math id="M85">
<mml:mo>&#x2217;</mml:mo>
</mml:math>
</inline-formula>represents the convolution operation. <xref ref-type="fig" rid="fig9">Figure 9</xref> illustrates the details of this implementation.</p>
<p>The whole model includes the construction and input of three-dimensional EEG, spatial and nonlinear feature extraction, temporal feature extraction, channel attention mechanism and classifier. The training process is as follows: Use the features selected by mRMR to construct 3D features. The time-step is set to 2 and each step lasts for 3&#x2009;s. The time distribution layer is used to wrap the pseudo-3DCNN, which can independently apply the layers or networks of the neural network to each time step of the sequence. We use Keras&#x2019; TimeDistributed layer to achieve this function. Then, the data is extracted through a temporal feature extraction layer to obtain temporal correlations. Finally, useful channels are enhanced by the 3D channel attention mechanism, and then it is sent to the full connection layer to determine the category of data segments. <xref ref-type="table" rid="tab1">Table 1</xref> illustrates the architecture of the pseudo-3D CNN layers; the temporal feature extraction layer takes two 3D blocks from the output of the convolutional layers as input, and the Bi-ConvLSTM3D layer captures temporal features and spatial information while preserving spatial information. <xref ref-type="table" rid="tab2">Table 2</xref> illustrates the architecture of RNN.</p>
<p>The attention mechanism was also changed to a 3D type of attention mechanism, as the focus of this paper is on the 3D module. The specific operation of the channel attention mechanism is as follows: Squeeze-Excitation-Scale (<xref ref-type="bibr" rid="ref9">Hu et al., 2018</xref>). The squeeze is the use of global average pooling to compress the three-dimensional features of each channel into a real number. The excitation is to generate a weight value for each channel, use two fully connected layers and an activation layer to construct the correlation between channels, and output the same number of weights and channels. The Scale is the process of weighting the active weights to each channel. <xref ref-type="fig" rid="fig11">Figure 11</xref> shows the network structure of the channel attention mechanism.</p>
<fig position="float" id="fig11">
<label>Figure 11</label>
<caption>
<p>The architecture of convolutional block attention module.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g011.tif"/>
</fig>
<p>In the P3DCNN model, batch normalization is performed after every two pseudo-3DCNN layers. In this paper, the number of parameters for pseudo-3DCNN is 364,720, the number of parameters for Bi-ConvLSTM3D is 1,050,624, and the number of parameters for Attention3D is 66,112.</p>
</sec>
<sec id="sec11">
<label>3.4</label>
<title>Evaluation criteria</title>
<p>In present work, the performance of the classification model is evaluated using the cross-validation method, the main idea of which is as follows: for all the data, divide it into <inline-formula>
<mml:math id="M86">
<mml:mi>k</mml:mi>
</mml:math>
</inline-formula> subsets of unrelated samples, i.e., <inline-formula>
<mml:math id="M87">
<mml:mi>D</mml:mi>
<mml:mo>=</mml:mo>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>&#x222A;</mml:mo>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>&#x222A;</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>&#x222A;</mml:mo>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:msub>
<mml:mi>k</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x222A;</mml:mo>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mo>&#x2205;</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2260;</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:math>
</inline-formula>, and then each time select a subset of samples to be used for testing, and the rest of the data are all used for training, and after <inline-formula>
<mml:math id="M88">
<mml:mi>k</mml:mi>
</mml:math>
</inline-formula> times of training in this way, average the <inline-formula>
<mml:math id="M89">
<mml:mi>k</mml:mi>
</mml:math>
</inline-formula> results and calculate the final result. In this paper sets <inline-formula>
<mml:math id="M90">
<mml:mi>k</mml:mi>
</mml:math>
</inline-formula> to 5. And independently conduct five 5-fold cross-validation to obtain the average ACC and STD to evaluate the performance of the model.</p>
<p>Choosing appropriate evaluation criteria is beneficial for enhancing the credibility of model performance. In addition, to evaluate the effectiveness of the epilepsy prediction models in this study, accuracy (Acc), sensitivity, precision and specificity were used as evaluation metrics, these metrics as <xref ref-type="disp-formula" rid="EQ25">Eqs. (25</xref>&#x2013;<xref ref-type="disp-formula" rid="EQ28">28)</xref>.</p>
<disp-formula id="EQ25">
<label>(25)</label>
<mml:math id="M91">
<mml:mi>A</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>c</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="EQ26">
<label>(26)</label>
<mml:math id="M92">
<mml:mi mathvariant="italic">Sensitivity</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="EQ27">
<label>(27)</label>
<mml:math id="M93">
<mml:mi mathvariant="italic">Precision</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="EQ28">
<label>(28)</label>
<mml:math id="M94">
<mml:mi mathvariant="italic">Specificity</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<p>Where TP, TN, FP, FN represents true positives, true negatives, false positives, and false negatives.</p>
</sec>
</sec>
<sec sec-type="results" id="sec12">
<label>4</label>
<title>Results and discussion</title>
<p>The proposed model in this paper is compared with other baseline methods on the CHB-MIT dataset. The details and parameters of these methods are as follows.</p>
<p>SVM: Support Vector Machine is a supervised machine learning model for general linear classification. It is widely used for both classification and regression tasks. SVM maps the feature vectors of instances into points in space and then separates these points with a hyperplane for classification. SVM is suitable for small to medium-sized datasets, as well as nonlinear and high-dimensional classification problems. Since this study involves a three-dimensional feature structure, SVM was chosen as one of the baseline methods. The kernel of SVM is set to &#x2018;rbf&#x2019;, and the decision_function_shape is set to &#x201C;ovo&#x201D;.</p>
<p>K-Nearest Neighbors (KNN): The basic idea of this algorithm is to compare the attribute features of the test dataset with the corresponding attribute features in the training dataset. In the training dataset, it finds the k nearest &#x201C;neighbors&#x201D; and determines the class of the test dataset sample based on the majority class among these k neighbors. In this comparative experiment, the value of n_neighors used for KNN is set to 5, and the metric is set to &#x2018;minkowski&#x2019;.</p>
<p><xref ref-type="fig" rid="fig12">Figure 12</xref> shows the performance of KNN, SVM, and P3D-BiConvLstm3D-Attention3D on the CHB-MIT dataset. From <xref ref-type="fig" rid="fig12">Figure 12</xref>, it can be observed that the proposed model in this study demonstrates effectiveness and generalizability on the CHB-MIT dataset. The top two rows and the last row of <xref ref-type="table" rid="tab3">Table 3</xref> depict the average ACC and STD of these methods. We can observe that compared with the other baseline methods, our model achieves the highest ACC and the lowest STD. Our model has an accuracy of 98.13%, which is 1.72% and 0.74% higher than KNN and SVM, respectively.</p>
<fig position="float" id="fig12">
<label>Figure 12</label>
<caption>
<p>Shows the metrics for each subject under different models. <bold>(A)</bold> Accuracy <bold>(B)</bold> Sensitivity <bold>(C)</bold> Precision <bold>(D)</bold> Specificity. <bold>(A)</bold> The Accuracy metric is used to compare KNN, SVM, and the research proposed in present work. <bold>(B)</bold> The Sensitivity metric is used to compare KNN, SVM, and the research proposed in present work. <bold>(C)</bold> The Precision metric is used to compare KNN, SVM, and the research proposed in present work. <bold>(D)</bold> The Specificity metric is used to compare KNN, SVM, and the research proposed in present work.</p>
</caption>
<graphic xlink:href="fninf-18-1354436-g012.tif"/>
</fig>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Performance (<inline-formula>
<mml:math id="M95">
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>c</mml:mi>
</mml:mrow>
<mml:mo stretchy="true">&#x00AF;</mml:mo>
</mml:mover>
<mml:mo>&#x00B1;</mml:mo>
</mml:math>
</inline-formula>Std(%)) of traditional machine learning models and the proposed model on the CHB-MIT dataset.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="middle">Method</th>
<th align="left" valign="middle">Input feature</th>
<th align="center" valign="middle">Window size</th>
<th align="center" valign="middle">CHB-MIT <inline-formula>
<mml:math id="M96">
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>c</mml:mi>
</mml:mrow>
<mml:mo stretchy="true">&#x00AF;</mml:mo>
</mml:mover>
<mml:mo>&#x00B1;</mml:mo>
</mml:math>
</inline-formula>Std(%)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">KNN</td>
<td align="left" valign="top">1D vector</td>
<td align="center" valign="top">6&#x2009;s</td>
<td align="center" valign="top">96.41&#x2009;&#x00B1;&#x2009;2.81</td>
</tr>
<tr>
<td align="left" valign="top">SVM</td>
<td align="left" valign="top">1D vector</td>
<td align="center" valign="top">6&#x2009;s</td>
<td align="center" valign="top">97.39&#x2009;&#x00B1;&#x2009;3.17</td>
</tr>
<tr>
<td align="left" valign="top">The proposed work</td>
<td align="left" valign="top">4D mesh sequence</td>
<td align="center" valign="top">6&#x2009;s</td>
<td align="center" valign="top">98.13&#x2009;&#x00B1;&#x2009;0.00</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Our model outperforms KNN and SVM in most patient metrics, with accuracy, sensitivity, specificity, and precision metrics of over 95% across all patients. Regarding the performance of the method proposed in this study on the dataset, the accuracy of this method is 98.13%, sensitivity is 98.03%, precision is 98.30% and specificity is 98.23%. Therefore, we have reason to believe that our proposed model is reliable. The experimental results show that our p3DCNN-BiConvLSTM3D-Attention3D model achieves better performance than traditional machine learning algorithms (SVM and KNN).</p>
<p><xref ref-type="table" rid="tab4">Table 4</xref> presents a comparison of the results between this study and other research papers. <xref ref-type="bibr" rid="ref30">Zhang et al. (2020)</xref> used Pearson correlation coefficients as features and then employed a convolutional neural network for prediction, achieving an accuracy of 89.98%, which is lower than the accuracy achieved in this study. This suggests that incorporating spatiotemporal features can indeed improve the accuracy of epilepsy prediction. <xref ref-type="bibr" rid="ref5">Das et al. (2020)</xref> utilized a framework consisting of various feature extraction algorithms (lower threshold, target point selection, and current maxima), energy features, and pattern matching (segment and domain). The authors&#x2019; model proposal, power, homogeneity, maxima, energy, and physiological traits have been employed. The algorithm achieved an accuracy rate of 92.66%, a F1-score rate of 94.86%. <xref ref-type="bibr" rid="ref13">Muhammad Usman et al. (2021)</xref> utilized a three-layer custom convolutional neural network in combination with handcrafted (temporal and spectral) features. The feature set was used to train an ensemble classifier, which integrated the outputs of SVM, CNN, and LSTM. On the CHB-MIT dataset, the average sensitivity rate achieved was 96.28%, the average specificity rate achieved was 95.65%. <xref ref-type="bibr" rid="ref12">Muhammad Usman et al. (2020)</xref> utilized short-time Fourier transform (STFT) to extract frequency-domain and time-domain information from 30-s EEG windows. A neural network was utilized to classify segments between pre-seizure and interictal periods. On the CHB-MIT dataset, the sensitivity rate achieved was 92.7%. the specificity rate achieved was 90.8%. <xref ref-type="bibr" rid="ref21">Singh and Malhotra (2022)</xref> proposed a two-layer LSTM network model that utilized the spectral power and average spectral magnitude features of &#x03B1;, &#x03B2;, &#x03B3;, &#x03B8;, &#x03B4; bands from a 23-channel EEG spectrum. The model achieved an average accuracy rate of 98.14%, an average sensitivity rate of 98.51%, an average specificity rate of 97.78%. <xref ref-type="bibr" rid="ref31">Zhang et al. (2021)</xref> combined with multidimensional sample entropy and Bi-LSTM, the seizure prediction accuracy was 80.09% and the FPR was 0.26/h. <xref ref-type="bibr" rid="ref16">Prathaban and Balasubramanian (2021)</xref> reconstructed the EEG with sparsity and converted it into a two-dimensional image. Then, to account for the relationship between channels, the two-dimensional image was converted into a three-dimensional image of time, signal value, and channel representation, and a three-dimensional optimized convolutional neural network was used to predict seizures with an accuracy of 0.98%, sensitivity of 0.99%, and False Prediction Rate (FPR) of 0.07 FP/h.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Comparison of the performance of existing epilepsy prediction methods.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Reference</th>
<th align="left" valign="top">Methods</th>
<th align="left" valign="top">Results</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref30">Zhang et al. (2020)</xref>
</td>
<td align="left" valign="middle">Pearson correlation coefficient, CNN</td>
<td align="left" valign="middle">Accuracy: 89.98%</td>
</tr>
<tr>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref5">Das et al. (2020)</xref>
</td>
<td align="left" valign="middle">Statistical features, pattern matching</td>
<td align="left" valign="middle">Accuracy: 92.66%,</td>
</tr>
<tr>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref13">Muhammad Usman et al. (2021)</xref>
</td>
<td align="left" valign="middle">Three-layer CNN, handcrafted<break/>Features, SVM, CNN, and LSTM</td>
<td align="left" valign="middle">Sensitivity: 96.28%, Specificity: 95.65%</td>
</tr>
<tr>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref12">Muhammad Usman et al. (2020)</xref>
</td>
<td align="left" valign="middle">STFT, CNN</td>
<td align="left" valign="middle">Sensitivity: 92.7%, Specificity: 90.8%</td>
</tr>
<tr>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref21">Singh and Malhotra (2022)</xref>
</td>
<td align="left" valign="middle">Two-layer LSTM, spectral power, spectral magnitude features</td>
<td align="left" valign="middle">Accuracy: 98.14%<break/>Sensitivity: 98.51%, Specificity: 97.78%</td>
</tr>
<tr>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref31">Zhang et al. (2021)</xref>
</td>
<td align="left" valign="middle">Multidimensional SampEn, Bi-LSTM</td>
<td align="left" valign="middle">Accuracy: 80.09%</td>
</tr>
<tr>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref16">Prathaban and Balasubramanian (2021)</xref>
</td>
<td align="left" valign="middle">Dynamic learning framework, sparsity based EEG Reconstruction, three-dimensional Optimized CNN</td>
<td align="left" valign="middle">Accuracy: 98%, Sensitivity: 99%</td>
</tr>
<tr>
<td align="left" valign="middle">This proposed work</td>
<td align="left" valign="middle">Nonlinear feature, Select by mRMR, Pseudo-3D, Bi-ConvLSTM3D</td>
<td align="left" valign="middle">Accuracy: 98.13%, Precision: 98.30%, Sensitivity: 98.03%, Specificity: 98.23%</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec sec-type="conclusions" id="sec13">
<label>5</label>
<title>Conclusion</title>
<p>We have proposed a seizure prediction algorithm that combines multiple feature selections and pseudo-3D neural networks. This method extracts multiple features and combines them to form unique 3D features. It uses multi-layer pseudo-3D convolutional neural networks, BiConvLSTM3D, and 3D channel attention mechanisms for automatic detection. The accuracy of this method is 98.13%, sensitivity is 98.03%, precision is 98.30% and specificity is 98.23%. The method outperforms most advanced similar methods with high sensitivity and a prediction time of 15&#x2009;min in advance. Compared to other methods, our results indicate that our model has similar or better predictive accuracy, sensitivity, accuracy and specificity, which further validate the effectiveness of our method. However, there is still room for improvement in many areas. Grid search can be applied to the model to systematically search for the optimal combination of hyperparameters for optimal performance. In this study, all scalp electrode channels were used, and future research will further investigate the optimization of multi-channel epilepsy. In three-dimensional neural networks, parameters can be reduced, computational efficiency can be improved, and the maximum information can be expressed with the least number of electrodes. In addition, the gender and age distribution of patients will be incorporated into the three-dimensional features to further investigate the relationship between gender, age, and epilepsy prediction.</p>
</sec>
<sec sec-type="data-availability" id="sec14">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec sec-type="author-contributions" id="sec15">
<title>Author contributions</title>
<p>XL, ZYL, and LSZ designed the work and wrote this original manuscript. LSZ and XCL contributed to the review and editing. CYL and HHK contributed to optimize of problem definition ZYL was mainly responsible for this project. All authors contributed to the article and approved the submitted version.</p>
</sec>
</body>
<back>
<sec sec-type="funding-information" id="sec16">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. This work was supported by the National Natural Science Foundation of China (62171073, 62311530103, and 62106032), &#x201C;Chunhui Plan&#x201D; Collaborative Research Project of the Ministry of Education, China (HZKY20220209), and Science and Technology Program of Luzhou (2023JYJ047).</p>
</sec>
<ack>
<p>We thank the Research Center of Biomedical Engineering of Chongqing University of Posts and Telecommunications for their assistance in the research.</p>
</ack>
<sec sec-type="COI-statement" id="sec17">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="sec100" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Acharya</surname> <given-names>U. R.</given-names></name> <name><surname>Vinitha Sree</surname> <given-names>S.</given-names></name> <name><surname>Swapna</surname> <given-names>G.</given-names></name> <name><surname>Martis</surname> <given-names>R. J.</given-names></name> <name><surname>Suri</surname> <given-names>J. S.</given-names></name></person-group> (<year>2013</year>). <article-title>Automated EEG analysis of epilepsy: a review</article-title>. <source>Knowl.-Based Syst.</source> <volume>45</volume>, <fpage>147</fpage>&#x2013;<lpage>165</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.knosys.2013.02.014</pub-id></citation>
</ref>
<ref id="ref2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Arunkumar</surname> <given-names>N.</given-names></name> <name><surname>Kumar</surname> <given-names>K. R.</given-names></name> <name><surname>Venkataraman</surname> <given-names>V.</given-names></name></person-group> (<year>2016</year>). <article-title>Automatic detection of epileptic seizures using new entropy measures</article-title>. <source>J. Med. Imag. Health Inform.</source> <volume>6</volume>, <fpage>724</fpage>&#x2013;<lpage>730</lpage>. doi: <pub-id pub-id-type="doi">10.1166/jmihi.2016.1736</pub-id></citation>
</ref>
<ref id="ref3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chang</surname> <given-names>B. S.</given-names></name> <name><surname>Lowenstein</surname> <given-names>D. H.</given-names></name></person-group> (<year>2003</year>). <article-title>Epilepsy</article-title>. <source>N. Engl. J. Med.</source> <volume>349</volume>, <fpage>1257</fpage>&#x2013;<lpage>1266</lpage>. doi: <pub-id pub-id-type="doi">10.1056/nejmra022308</pub-id></citation>
</ref>
<ref id="ref4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>W.</given-names></name> <name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Xie</surname> <given-names>H.</given-names></name> <name><surname>Yu</surname> <given-names>W.</given-names></name></person-group> (<year>2007</year>). <article-title>Characterization of surface EMG signal based on fuzzy entropy</article-title>. <source>IEEE Trans. Neural Syst. Rehabil. Eng.</source> <volume>15</volume>, <fpage>266</fpage>&#x2013;<lpage>272</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TNSRE.2007.897025</pub-id>, PMID: <pub-id pub-id-type="pmid">17601197</pub-id></citation>
</ref>
<ref id="ref5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Das</surname> <given-names>K.</given-names></name> <name><surname>Daschakladar</surname> <given-names>D.</given-names></name> <name><surname>Roy</surname> <given-names>P. P.</given-names></name> <name><surname>Chatterjee</surname> <given-names>A.</given-names></name> <name><surname>Saha</surname> <given-names>S. P.</given-names></name></person-group> (<year>2020</year>). <article-title>Epileptic seizure prediction by the detection of seizure waveform from the pre-ictal phase of EEG signal</article-title>. <source>Biomed. Signal Proc. Control</source> <volume>57</volume>:<fpage>101720</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2019.101720</pub-id></citation>
</ref>
<ref id="ref6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fisher</surname> <given-names>R. S.</given-names></name> <name><surname>Acevedo</surname> <given-names>C.</given-names></name> <name><surname>Arzimanoglou</surname> <given-names>A.</given-names></name> <name><surname>Bogacz</surname> <given-names>A.</given-names></name> <name><surname>Cross</surname> <given-names>J. H.</given-names></name> <name><surname>Elger</surname> <given-names>C. E.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>ILAE official report: a practical clinical definition of epilepsy</article-title>. <source>Epilepsia</source> <volume>55</volume>, <fpage>475</fpage>&#x2013;<lpage>482</lpage>. doi: <pub-id pub-id-type="doi">10.1111/epi.12550</pub-id>, PMID: <pub-id pub-id-type="pmid">24730690</pub-id></citation>
</ref>
<ref id="ref7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>He</surname> <given-names>J.</given-names></name> <name><surname>Cui</surname> <given-names>J.</given-names></name> <name><surname>Zhang</surname> <given-names>G.</given-names></name> <name><surname>Xue</surname> <given-names>M.</given-names></name> <name><surname>Chu</surname> <given-names>D.</given-names></name> <name><surname>Zhao</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Spatial&#x2013;temporal seizure detection with graph attention network and bi-directional LSTM architecture</article-title>. <source>Biomed. Signal Proc. Control</source> <volume>78</volume>:<fpage>103908</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2022.103908</pub-id></citation>
</ref>
<ref id="ref8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Higuchi</surname> <given-names>T.</given-names></name>
</person-group> (<year>1988</year>). <article-title>Approach to an irregular time series on the basis of the fractal theory</article-title>. <source>Phys. D Nonlin. Phenom.</source> <volume>31</volume>, <fpage>277</fpage>&#x2013;<lpage>283</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0167-2789(88)90081-4</pub-id></citation>
</ref>
<ref id="ref9">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>J.</given-names></name> <name><surname>Shen</surname> <given-names>L.</given-names></name> <name><surname>Sun</surname> <given-names>G.</given-names></name></person-group> (<year>2018</year>) &#x2018;Squeeze-and-excitation networks&#x2019;, 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition [Preprint].</citation>
</ref>
<ref id="ref10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Cheng</surname> <given-names>L.</given-names></name> <name><surname>Xie</surname> <given-names>M.</given-names></name> <name><surname>Cao</surname> <given-names>K.</given-names></name></person-group> (<year>2022</year>). <article-title>3D wave simulation based on a deep learning model for spatiotemporal prediction</article-title>. <source>Ocean Eng.</source> <volume>263</volume>:<fpage>112420</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.oceaneng.2022.112420</pub-id></citation>
</ref>
<ref id="ref11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name> <name><surname>Huang</surname> <given-names>S.</given-names></name> <name><surname>Lu</surname> <given-names>J.</given-names></name> <name><surname>Ye</surname> <given-names>M.</given-names></name> <name><surname>Wang</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Detection and classification of epileptic EEG signals by the methods of nonlinear dynamics</article-title>. <source>Chaos Solitons Fractals</source> <volume>151</volume>:<fpage>111032</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chaos.2021.111032</pub-id></citation>
</ref>
<ref id="ref12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Muhammad Usman</surname> <given-names>S.</given-names></name> <name><surname>Khalid</surname> <given-names>S.</given-names></name> <name><surname>Aslam</surname> <given-names>M. H.</given-names></name></person-group> (<year>2020</year>). <article-title>Epileptic seizures prediction using deep learning techniques</article-title>. <source>IEEE Access</source> <volume>8</volume>, <fpage>39998</fpage>&#x2013;<lpage>40007</lpage>. doi: <pub-id pub-id-type="doi">10.1109/access.2020.2976866</pub-id></citation>
</ref>
<ref id="ref13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Muhammad Usman</surname> <given-names>S.</given-names></name> <name><surname>Khalid</surname> <given-names>S.</given-names></name> <name><surname>Bashir</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>A deep learning based ensemble learning method for epileptic seizure prediction</article-title>. <source>Comput. Biol. Med.</source> <volume>136</volume>:<fpage>104710</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2021.104710</pub-id>, PMID: <pub-id pub-id-type="pmid">34364257</pub-id></citation>
</ref>
<ref id="ref14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Peng</surname> <given-names>H.</given-names></name> <name><surname>Long</surname> <given-names>F.</given-names></name> <name><surname>Ding</surname> <given-names>C.</given-names></name></person-group> (<year>2005</year>). <article-title>Feature selection based on mutual information criteria of max-dependency, max-relevance, and min-redundancy</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell.</source> <volume>27</volume>, <fpage>1226</fpage>&#x2013;<lpage>1238</lpage>. doi: <pub-id pub-id-type="doi">10.1109/tpami.2005.159</pub-id>, PMID: <pub-id pub-id-type="pmid">16119262</pub-id></citation>
</ref>
<ref id="ref15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pincus</surname> <given-names>S. M.</given-names></name>
</person-group> (<year>1991</year>). <article-title>Approximate entropy as a measure of system complexity</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume>88</volume>, <fpage>2297</fpage>&#x2013;<lpage>2301</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.88.6.2297</pub-id>, PMID: <pub-id pub-id-type="pmid">11607165</pub-id></citation>
</ref>
<ref id="ref16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Prathaban</surname> <given-names>B. P.</given-names></name> <name><surname>Balasubramanian</surname> <given-names>R.</given-names></name></person-group> (<year>2021</year>). <article-title>Dynamic learning framework for epileptic seizure prediction using sparsity based EEG reconstruction with optimized CNN classifier</article-title>. <source>Expert Syst. Appl.</source> <volume>170</volume>:<fpage>114533</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2020.114533</pub-id></citation>
</ref>
<ref id="ref17">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Qiu</surname> <given-names>Z.</given-names></name> <name><surname>Yao</surname> <given-names>T.</given-names></name> <name><surname>Mei</surname> <given-names>T.</given-names></name></person-group>, (<year>2017</year>). Learning spatio-temporal representation with pseudo-3d residual networks. In proceedings of the IEEE International Conference on Computer Vision (pp. 5533&#x2013;5541).</citation>
</ref>
<ref id="ref18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Richman</surname> <given-names>J. S.</given-names></name> <name><surname>Moorman</surname> <given-names>J. R.</given-names></name></person-group> (<year>2000</year>). <article-title>Physiological time-series analysis using approximate entropy and sample entropy</article-title>. <source>Am. J. Phys. Heart Circ. Phys.</source> <volume>278</volume>, <fpage>H2039</fpage>&#x2013;<lpage>H2049</lpage>. doi: <pub-id pub-id-type="doi">10.1152/ajpheart.2000.278.6.H2039</pub-id></citation>
</ref>
<ref id="ref19">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Sharma</surname> <given-names>G.</given-names></name> <name><surname>Joshi</surname> <given-names>A. M.</given-names></name></person-group> (<year>2022</year>) &#x2018;A fractal based machine learning method for automatic detection of epileptic seizures using EEG&#x2019;, <italic>2022 International Conference on Engineering and Emerging Technologies (ICEET)</italic> [Preprint].</citation>
</ref>
<ref id="ref20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sharma</surname> <given-names>M.</given-names></name> <name><surname>Shah</surname> <given-names>S.</given-names></name> <name><surname>Achuth</surname> <given-names>P. V.</given-names></name></person-group> (<year>2019</year>). <article-title>A novel approach for epilepsy detection using time&#x2013;frequency localized bi-orthogonal wavelet filter</article-title>. <source>J. Mech. Med. Biol.</source> <volume>19</volume>:<fpage>1940007</fpage>. doi: <pub-id pub-id-type="doi">10.1142/s0219519419400074</pub-id></citation>
</ref>
<ref id="ref21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>K.</given-names></name> <name><surname>Malhotra</surname> <given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>Two-layer LSTM network-based prediction of epileptic seizures using EEG spectral features</article-title>. <source>Complex Intel. Syst.</source> <volume>8</volume>, <fpage>2405</fpage>&#x2013;<lpage>2418</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s40747-021-00627-z</pub-id></citation>
</ref>
<ref id="ref22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Srinivasan</surname> <given-names>V.</given-names></name> <name><surname>Eswaran</surname> <given-names>C.</given-names></name> <name><surname>Sriraam</surname> <given-names>N.</given-names></name></person-group> (<year>2007</year>). <article-title>Approximate entropy-based epileptic EEG detection using artificial neural networks</article-title>. <source>IEEE Trans. Inf. Technol. Biomed.</source> <volume>11</volume>, <fpage>288</fpage>&#x2013;<lpage>295</lpage>. doi: <pub-id pub-id-type="doi">10.1109/titb.2006.884369</pub-id>, PMID: <pub-id pub-id-type="pmid">17521078</pub-id></citation>
</ref>
<ref id="ref23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Truong</surname> <given-names>N. D.</given-names></name> <name><surname>Nguyen</surname> <given-names>A. D.</given-names></name> <name><surname>Kuhlmann</surname> <given-names>L.</given-names></name> <name><surname>Bonyadi</surname> <given-names>M. R.</given-names></name> <name><surname>Yang</surname> <given-names>J.</given-names></name> <name><surname>Ippolito</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Convolutional neural networks for seizure prediction using intracranial and scalp electroencephalogram</article-title>. <source>Neural. Netw.</source> <volume>105</volume>, <fpage>104</fpage>&#x2013;<lpage>111</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neunet.2018.04.018</pub-id>, PMID: <pub-id pub-id-type="pmid">29793128</pub-id></citation>
</ref>
<ref id="ref24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tuncer</surname> <given-names>E.</given-names></name> <name><surname>Bolat</surname> <given-names>E. D.</given-names></name></person-group> (<year>2022</year>). <article-title>Classification of epileptic seizures from electroencephalogram (EEG) data using bidirectional short-term memory (bi-LSTM) network architecture</article-title>. <source>Biomed. Signal Proc. Control</source> <volume>73</volume>:<fpage>103462</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2021.103462</pub-id></citation>
</ref>
<ref id="ref25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Versaci</surname> <given-names>M.</given-names></name> <name><surname>Morabito</surname> <given-names>F. C.</given-names></name></person-group> (<year>2003</year>). <article-title>Fuzzy time series approach for disruption prediction in tokamak reactors</article-title>. <source>IEEE Trans. Magn.</source> <volume>39</volume>, <fpage>1503</fpage>&#x2013;<lpage>1506</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMAG.2003.810365</pub-id></citation>
</ref>
<ref id="ref26">
<citation citation-type="other"><person-group person-group-type="author"><collab id="coll1">World Health Organization</collab></person-group>. (<year>2022</year>) Available at: <ext-link xlink:href="https://www.who.int/news/item/27-05-2022-seventy-fifth-world-health-assembly---daily-update--27-may-2022" ext-link-type="uri">https://www.who.int/news/item/27-05-2022-seventy-fifth-world-health-assembly---daily-update--27-may-2022</ext-link> [Accessed: 06 October 2022].</citation>
</ref>
<ref id="ref27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xiang</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>C.</given-names></name> <name><surname>Li</surname> <given-names>H.</given-names></name> <name><surname>Cao</surname> <given-names>R.</given-names></name> <name><surname>Wang</surname> <given-names>B.</given-names></name> <name><surname>Han</surname> <given-names>X.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>The detection of epileptic seizure signals based on fuzzy entropy</article-title>. <source>J. Neurosci. Methods</source> <volume>243</volume>, <fpage>18</fpage>&#x2013;<lpage>25</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jneumeth.2015.01.015</pub-id>, PMID: <pub-id pub-id-type="pmid">25614384</pub-id></citation>
</ref>
<ref id="ref28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xing</surname> <given-names>M.</given-names></name> <name><surname>Hu</surname> <given-names>S.</given-names></name> <name><surname>Wei</surname> <given-names>B.</given-names></name> <name><surname>Lv</surname> <given-names>Z.</given-names></name></person-group> (<year>2022</year>). <article-title>Spatial-frequency-temporal convolutional recurrent network for olfactory-enhanced EEG emotion recognition</article-title>. <source>J. Neurosci. Methods</source> <volume>376</volume>:<fpage>109624</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jneumeth.2022.109624</pub-id>, PMID: <pub-id pub-id-type="pmid">35588948</pub-id></citation>
</ref>
<ref id="ref29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yu</surname> <given-names>Z.</given-names></name> <name><surname>Albera</surname> <given-names>L.</given-names></name> <name><surname>Le Bouquin Jeannes</surname> <given-names>R.</given-names></name> <name><surname>Kachenoura</surname> <given-names>A.</given-names></name> <name><surname>Karfoul</surname> <given-names>A.</given-names></name> <name><surname>Yang</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Epileptic seizure prediction using deep neural networks via transfer learning and multi-feature fusion</article-title>. <source>Int. J. Neural. Syst.</source> <volume>32</volume>:<fpage>2250032</fpage>. doi: <pub-id pub-id-type="doi">10.1142/s0129065722500320</pub-id>, PMID: <pub-id pub-id-type="pmid">35695914</pub-id></citation>
</ref>
<ref id="ref30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>S.</given-names></name> <name><surname>Chen</surname> <given-names>D.</given-names></name> <name><surname>Ranjan</surname> <given-names>R.</given-names></name> <name><surname>Ke</surname> <given-names>H.</given-names></name> <name><surname>Tang</surname> <given-names>Y.</given-names></name> <name><surname>Zomaya</surname> <given-names>A. Y.</given-names></name></person-group> (<year>2020</year>). <article-title>A lightweight solution to epileptic seizure prediction based on EEG synchronization measurement</article-title>. <source>J. Supercomput.</source> <volume>77</volume>, <fpage>3914</fpage>&#x2013;<lpage>3932</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11227-020-03426-4</pub-id></citation>
</ref>
<ref id="ref31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Q.</given-names></name> <name><surname>Ding</surname> <given-names>J.</given-names></name> <name><surname>Kong</surname> <given-names>W.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>Q.</given-names></name> <name><surname>Jiang</surname> <given-names>T.</given-names></name></person-group> (<year>2021</year>). <article-title>Epilepsy prediction through optimized multidimensional sample entropy and bi-LSTM</article-title>. <source>Biomed. Signal Proc. Control</source> <volume>64</volume>:<fpage>102293</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2020.102293</pub-id></citation>
</ref>
</ref-list>
</back>
</article>