<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Comput. Neurosci.</journal-id>
<journal-title>Frontiers in Computational Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Comput. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5188</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fncom.2023.1286664</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Artificial neural network models: implementation of functional near-infrared spectroscopy-based spontaneous lie detection in an interactive scenario</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Bhutta</surname>
<given-names>M. Raheel</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/239156/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology"/>
<role content-type="https://credit.niso.org/contributor-roles/software"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Ali</surname>
<given-names>Muhammad Umair</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1275589/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology"/>
<role content-type="https://credit.niso.org/contributor-roles/software"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Zafar</surname>
<given-names>Amad</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/570476/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing"/>
<role content-type="https://credit.niso.org/contributor-roles/validation"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Kwang Su</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Byun</surname>
<given-names>Jong Hyuk</given-names>
</name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<xref ref-type="corresp" rid="c002"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2301861/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration"/>
<role content-type="https://credit.niso.org/contributor-roles/resources"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Lee</surname>
<given-names>Seung Won</given-names>
</name>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<xref ref-type="corresp" rid="c003"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2058535/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration"/>
<role content-type="https://credit.niso.org/contributor-roles/resources"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Department of Electrical and Computer Engineering, University of UTAH Asia Campus</institution>, <addr-line>Incheon</addr-line>, <country>Republic of Korea</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department of Intelligent Mechatronics Engineering, Sejong University</institution>, <addr-line>Seoul</addr-line>, <country>Republic of Korea</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Scientific Computing, Pukyong National University</institution>, <addr-line>Busan</addr-line>, <country>Republic of Korea</country></aff>
<aff id="aff4"><sup>4</sup><institution>Interdisciplinary Biology Laboratory (iBLab), Division of Biological Science, Graduate School of Science, Nagoya University</institution>, <addr-line>Nagoya</addr-line>, <country>Japan</country></aff>
<aff id="aff5"><sup>5</sup><institution>Department of Mathematics and Institute of Mathematical Science, Pusan National University</institution>, <addr-line>Busan</addr-line>, <country>Republic of Korea</country></aff>
<aff id="aff6"><sup>6</sup><institution>Finace Fishery Manufacture Industrial Mathematics Center on BigData, Pusan National University</institution>, <addr-line>Busan</addr-line>, <country>Republic of Korea</country></aff>
<aff id="aff7"><sup>7</sup><institution>Department of Precision Medicine, Sungkyunkwan University School of Medicine</institution>, <addr-line>Suwon</addr-line>, <country>Republic of Korea</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0002">
<p>Edited by: Hava T. Siegelmann, The State University of New Jersey, United States</p>
</fn>
<fn fn-type="edited-by" id="fn0003">
<p>Reviewed by: Farzan Majeed Noori, University of Oslo, Norway; Nauman Khalid Qureshi, ETH Z&#x00FC;rich, Switzerland</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Amad Zafar, <email>amad@sejong.ac.kr</email></corresp>
<corresp id="c002">Jong Hyuk Byun, <email>maticax@pusan.ac.kr</email></corresp>
<corresp id="c003">Seung Won Lee, <email>lsw2920@gmail.com</email></corresp>
<fn fn-type="equal" id="fn0001"><p><sup>&#x2020;</sup>These authors have contributed equally to this work and first authorship</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>24</day>
<month>01</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>17</volume>
<elocation-id>1286664</elocation-id>
<history>
<date date-type="received">
<day>04</day>
<month>09</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>11</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2024 Bhutta, Ali, Zafar, Kim, Byun and Lee.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Bhutta, Ali, Zafar, Kim, Byun and Lee</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Deception is an inevitable occurrence in daily life. Various methods have been used to understand the mechanisms underlying brain deception. Moreover, numerous efforts have been undertaken to detect deception and truth-telling. Functional near-infrared spectroscopy (fNIRS) has great potential for neurological applications compared with other state-of-the-art methods. Therefore, an fNIRS-based spontaneous lie detection model was used in the present study. We interviewed 10 healthy subjects to identify deception using the fNIRS system. A card game frequently referred to as a bluff or cheat was introduced. This game was selected because its rules are ideal for testing our hypotheses. The optical probe of the fNIRS was placed on the subject&#x2019;s forehead, and we acquired optical density signals, which were then converted into oxy-hemoglobin and deoxy-hemoglobin signals using the Modified Beer&#x2013;Lambert law. The oxy-hemoglobin signal was preprocessed to eliminate noise. In this study, we proposed three artificial neural networks inspired by deep learning models, including AlexNet, ResNet, and GoogleNet, to classify deception and truth-telling. The proposed models achieved accuracies of 88.5%, 88.0%, and 90.0%, respectively. These proposed models were compared with other classification models, including k-nearest neighbor, linear support vector machines (SVM), quadratic SVM, cubic SVM, simple decision trees, and complex decision trees. These comparisons showed that the proposed models performed better than the other state-of-the-art methods.</p>
</abstract>
<kwd-group>
<kwd>spontaneous lie detection</kwd>
<kwd>deception</kwd>
<kwd>deep learning algorithm</kwd>
<kwd>functional near-infrared spectroscopy (fNIRS)</kwd>
<kwd>classification</kwd>
</kwd-group>
<contract-sponsor id="cn1">National Research Foundation<named-content content-type="fundref-id">10.13039/501100001321</named-content></contract-sponsor>
<contract-sponsor id="cn2">Ministry of Science and ICT</contract-sponsor>
<contract-sponsor id="cn3">National Research Foundation of Korea<named-content content-type="fundref-id">10.13039/501100003725</named-content></contract-sponsor>
<contract-sponsor id="cn4">Korea government (MSIT)</contract-sponsor>
<counts>
<fig-count count="7"/>
<table-count count="3"/>
<equation-count count="6"/>
<ref-count count="62"/>
<page-count count="13"/>
<word-count count="7977"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1.</label>
<title>Introduction</title>
<p>Deception is an intrinsic and unavoidable facet of our society, manifesting itself in everyday life. It is unsurprising for a person to encounter or be involved in multiple deceptive situations within a single day. Failure to identify deception has serious consequences for the victim. To avoid being deceived, people have begun to study the behavioral and physiological changes exhibited by deceivers. Hence, this study aimed to detect the differences between hemodynamic signals during spontaneous deception and classify between truth and lie during an interactive game paradigm.</p>
<p>In earlier times, people identified deceivers based on the deceiver&#x2019;s personality or their own personal experiences (<xref ref-type="bibr" rid="ref22">Freud and Strachey, 1962</xref>; <xref ref-type="bibr" rid="ref63">Zuckerman et al., 1981b</xref>; <xref ref-type="bibr" rid="ref35">Kleinmuntz and Szucko, 1984</xref>; <xref ref-type="bibr" rid="ref46">Peterman and Anderson, 1999</xref>). Additionally, during earlier times, people often relied on myths based on religious norms to identify a person who was being untruthful (<xref ref-type="bibr" rid="ref56">Trovillo, 1938</xref>). Advancements in scientific methods and new equipment, including polygraphs, have enabled us to better understand the cues of deception that are beyond the scope of religious beliefs, personal experience, and stereotypes (<xref ref-type="bibr" rid="ref10">Brett et al., 1986</xref>; <xref ref-type="bibr" rid="ref59">Varisai Mohamed et al., 2006</xref>). These physiological measures have revealed many new findings that provide the basis for numerous theories, such as the non-verbal leakage theory (<xref ref-type="bibr" rid="ref17">Ekman et al., 1969</xref>), four-factor theory (<xref ref-type="bibr" rid="ref62">Zuckerman et al., 1981a</xref>), and interpersonal deception theory (<xref ref-type="bibr" rid="ref11">Buller and Burgoon, 1996</xref>). These theories have helped us understand why these cues of deception manifest in humans when attempting to deceive someone (<xref ref-type="bibr" rid="ref9">Bond et al., 2014</xref>). Most of these theories agree that the intent and process of deception invoke changes in the deceiver&#x2019;s behavior that result from changes in the person&#x2019;s state of mind.</p>
<p>Many researchers have investigated different neurophysiological signals to identify changes in an individual&#x2019;s mental state while they are attempting to deceive. One such technique is Electroencephalography (EEG), which records event-related potentials (ERPs) from the scalp of the brain (<xref ref-type="bibr" rid="ref2">Abootalebi et al., 2009</xref>; <xref ref-type="bibr" rid="ref40">Meijer et al., 2013</xref>). ERPs are mainly used to test knowledge of crime details that are only known to the criminals involved (<xref ref-type="bibr" rid="ref21">Farwell and Donchin, 1991</xref>). This type of test is commonly known as the guilty knowledge test or concealed information test (<xref ref-type="bibr" rid="ref23">Furedy and Ben-Shakhar, 1991</xref>; <xref ref-type="bibr" rid="ref18">Elaad and BEN-SHAKHAR, 1997</xref>; <xref ref-type="bibr" rid="ref36">Kong et al., 2012</xref>). EEG has excellent temporal resolution, enabling rapid detection of brain signals (<xref ref-type="bibr" rid="ref57">Turnip et al., 2011</xref>; <xref ref-type="bibr" rid="ref12">Chen et al., 2023</xref>), but exhibits poor spatial resolution, which cannot confine the brain area associated with the deception process.</p>
<p>Functional magnetic resonance imaging (fMRI) is another technique widely used to detect brain areas activated during deception. fMRI offers a substantial advantage in terms of high spatial resolution when compared to EEG (<xref ref-type="bibr" rid="ref53">Spence et al., 2004</xref>). It can effectively localize changes in regional blood flow (<xref ref-type="bibr" rid="ref20">Farah et al., 2014</xref>) and hence provides a comprehensive review of fMRI-based deception decoding. Because of the high cost of scanners and their bulky size, the use of fMRI is very limited in day-to-day human routines. Moreover, fMRI is highly sensitive to motion artifacts. Therefore, researchers have embarked on exploring an alternative brain imaging technique: functional near-infrared spectroscopy (fNIRS).</p>
<p>Using fNIRS, brain activity is measured through hemodynamic responses associated with neuronal behavior (<xref ref-type="bibr" rid="ref32">Kamran and Hong, 2013</xref>; <xref ref-type="bibr" rid="ref50">Santosa et al., 2013</xref>; <xref ref-type="bibr" rid="ref33">Khan et al., 2014</xref>; <xref ref-type="bibr" rid="ref49">Ruotsalo et al., 2023</xref>). The fNIRS can provide topographic (<xref ref-type="bibr" rid="ref45">Obrig and Villringer, 2003</xref>; <xref ref-type="bibr" rid="ref60">Wolf et al., 2007</xref>; <xref ref-type="bibr" rid="ref27">Hu et al., 2011</xref>; <xref ref-type="bibr" rid="ref38">Li et al., 2018</xref>) and tomographic brain images (<xref ref-type="bibr" rid="ref7">Bluestone et al., 2001</xref>; <xref ref-type="bibr" rid="ref8">Boas et al., 2004</xref>). Oxy-hemoglobin (HbO), deoxy-hemoglobin (HbR), and water are significant light absorbers, whereas skin, tissue, and bone are mainly transparent to near-infrared light within an optical window of 650&#x2013;1,000&#x2009;nm. Compared with EEG and fMRI, fNIRS offers a superior tradeoff between temporal and spatial resolutions. In one study (<xref ref-type="bibr" rid="ref31">Irani et al., 2007</xref>) compared the features of fNIRS and fMRI and reported that fNIRS has excellent potential for psychotic and neurological applications because of its portability, simplicity, and insensitivity to motion artifacts compared to fMRI. fNIRS also has several advantages over other brain imaging techniques; it can be designed in a compact and portable form, is very cost-effective (<xref ref-type="bibr" rid="ref41">Muehlemann et al., 2008</xref>; <xref ref-type="bibr" rid="ref5">Bhutta et al., 2014</xref>; <xref ref-type="bibr" rid="ref55">Toglia et al., 2022</xref>), and can be used in diverse fields such as neuroscience, brain-computer interfaces (<xref ref-type="bibr" rid="ref42">Naseer and Hong, 2013a</xref>,<xref ref-type="bibr" rid="ref43">b</xref>), and rehabilitation.</p>
</sec>
<sec id="sec2">
<label>2.</label>
<title>Literature review</title>
<p>Limited research has been conducted in the field of fNIRS-based deception decoding (<xref ref-type="bibr" rid="ref54">Tian et al., 2009</xref>; <xref ref-type="bibr" rid="ref28">Hu et al., 2012</xref>; <xref ref-type="bibr" rid="ref15">Ding et al., 2013</xref>, <xref ref-type="bibr" rid="ref16">2014</xref>; <xref ref-type="bibr" rid="ref4">Bhutta et al., 2015</xref>; <xref ref-type="bibr" rid="ref19">Emberson et al., 2017</xref>; <xref ref-type="bibr" rid="ref47">Quaresima and Ferrari, 2019</xref>). To detect deception, one study (<xref ref-type="bibr" rid="ref28">Hu et al., 2012</xref>) employed a mock crime paradigm. Because individuals were instructed to provide deceptive or truthful responses at specified times and locations, this research, which was based on the concealed information test, did not incorporate a spontaneous paradigm. The first study to use fNIRS to identify the neural correlates of spontaneous deception was conducted by <xref ref-type="bibr" rid="ref15">Ding et al. (2013)</xref>. These aforementioned studies on fNIRS-based deception decoding have exclusively investigated cases of deceptions where the perpetrator lies to an unsuspecting victim; this type of deception occurs more frequently in casual social interactions. In contrast, there are also situations in which the perpetrator deliberately misleads the victim, even though both parties are fully aware of the attempt at deception. This type of circumstance is typically referred to as reverse psychology, and it frequently occurs in highly competitive settings, such as diplomatic meetings, political debates and elections, sports, card games (including gambling), and other various scenarios. In this scenario, the individual employing reverse psychology can deceive the victim not only by uttering a false statement but also by making a truthful remark. The deceiver may choose to speak the truth, knowing that the victim is aware of the deceptive intention, yet the victim interprets it as a lie, thus believing the contrary. Consequently, speaking the truth serves the deceiver&#x2019;s purpose of misleading the victim.</p>
<p>Deep learning classifiers have been widely used recently. A deep neural network (DNN) is composed of multiple layers of nonlinear processing modules called neurons (<xref ref-type="bibr" rid="ref51">Schmidhuber, 2015</xref>; <xref ref-type="bibr" rid="ref30">Huve et al., 2018</xref>). These fully connected or semi-connected neurons receive inputs from previous consecutive neurons. DNN can achieve superior classification performance in comparison to linear classifiers, such as linear discernment analysis (LDA), support vector machine (SVM), and others when applied to signals (language and speech processing) or images (<xref ref-type="bibr" rid="ref14">Collobert and Weston, 2008</xref>; <xref ref-type="bibr" rid="ref37">Krizhevsky et al., 2012</xref>; <xref ref-type="bibr" rid="ref6">Bianchini and Scarselli, 2014</xref>; <xref ref-type="bibr" rid="ref52">Simonyan and Zisserman, 2014</xref>). Hence, DNN classifiers are also gaining attention in the biomedical field (<xref ref-type="bibr" rid="ref29">Hudson and Cohen, 2000</xref>; <xref ref-type="bibr" rid="ref13">Cire&#x015F;an et al., 2013</xref>; <xref ref-type="bibr" rid="ref48">Ronneberger et al., 2015</xref>).</p>
<p>Only a few studies have employed DNN for classification. <xref ref-type="bibr" rid="ref1">Abibullaev et al. (2011)</xref> investigated the performance of a DNN in a four-class classification experiment and reported a maximum accuracy of 94%. <xref ref-type="bibr" rid="ref61">Yi et al. (2013)</xref> used a DNN to classify left and right motor imagery with an average classification accuracy of 84%. <xref ref-type="bibr" rid="ref24">Hennrich et al. (2015)</xref> reported a similar classification performance of DNN compared to that of other classifiers (such as LDA and SVM) in a three-mental task experiment. To the best of our knowledge, no previous study has used a DNN for spontaneous deception decoding using fNIRS.</p>
<p>In this study, we hypothesized that, in the real world, a deceiver can deceive another person not only by telling a lie but also by telling the truth. Therefore, the objectives of this study were to:</p>
<list list-type="bullet">
<list-item>
<p>compare the differences between the hemodynamic responses produced by spontaneous lying and stating the truth,</p>
</list-item>
<list-item>
<p>classify between the lie and truth for an interactive game paradigm,</p>
</list-item>
<list-item>
<p>develop three deep ANN models for classifying responses, and</p>
</list-item>
<list-item>
<p>compare the performance of the proposed deep ANN with other classifiers, such as LDA and SVM, in a spontaneous deception decoding paradigm.</p>
</list-item>
</list>
<p>According to these findings, the fNIRS system can accurately identify changes in HbO signals during spontaneous lies and truths.</p>
</sec>
<sec sec-type="materials|methods" id="sec3">
<label>3.</label>
<title>Materials and methods</title>
<sec id="sec4">
<label>3.1.</label>
<title>Subjects</title>
<p>Ten healthy male individuals (mean age 30.8&#x2009;&#x00B1;&#x2009;3.68) participated in the experiments. Each patient had normal or corrected-to-normal eyesight. Of the 10 subjects, nine were right-handed. None of the subjects had any history of mental or neurological illness. The card game was known to all subjects. Informed consent was obtained from all subjects, and the experiments were performed in accordance with the latest Declaration of Helsinki. The framework proposed in this study is illustrated in <xref ref-type="fig" rid="fig1">Figure 1</xref>. The framework is divided into two blocks: a training block (blue dotted lines) and a testing block (green dotted lines). The training black was used to train the neural network models on the given data, whereas the testing block was used to classify the data into truth and lie classes based on the model trained in the training block. Information on the individual blocks is provided in the respective chapters of the article.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Proposed framework for spontaneous lie detection in an interactive scenario. SM: signal mean and SS: signal slope.</p>
</caption>
<graphic xlink:href="fncom-17-1286664-g001.tif"/>
</fig>
</sec>
<sec id="sec5">
<label>3.2.</label>
<title>Experimental procedure</title>
<p>The subjects were seated comfortably in front of a second person (referred to as the victim). The subject and victims underwent three practice sessions, and a brief explanation of the experiment before the experiment was provided to ensure that they fully understood the guidelines.</p>
<p>A well-selected experimental paradigm was used in this study. The experimental paradigm was a card game known as bluff or cheat. The bluff game was chosen because the rules of the game are ideal for testing our hypotheses. Our objective was to distinguish between deceptive actions when the subject is speaking the truth and when they are intentionally deceiving the victim with a falsehood.</p>
<p>The game rules are straightforward. The subject received 20 randomly selected cards, with the consideration that a minimum of six of these cards had no corresponding matches. Therefore, the subject had to lie at least four or five times in order to get rid of those cards. The subject had to play out all of their cards without revealing any signs of bluffing. The subjects had 1&#x2009;min to carefully organize all their cards prior to starting the experiment. The duration of each experiment was approximately 10&#x2009;min, with each experiment having a maximum of 20 sessions, each lasting approximately 30&#x2009;s. In each session, the first 15&#x2009;s were allotted for card arrangement. The subject had to lie to the victim in the next 5&#x2009;s (called &#x201C;claim time&#x201D;) by laying his cards face down on the table and declaring what kind of cards they are (for instance, &#x201C;three sevens&#x201D;). Depending on his claim, the subject could select any number of cards between two and four. However, this assertion may or may not be correct. The victim then had 10&#x2009;s to react to the subject&#x2019;s assertion (response time). If the victim believed that the subject is telling the truth, they could choose to pass, removing the pile from the table. However, if the victim suspected that the subject had lied in their claim, they had the option to flip the cards face up. If the subject lied, the pile was returned to the subject. However, if the subject was truthful, the pile was removed from the table, and the next session commenced. The game continued for 20 sessions. A prize of 10,000 Korean Won was to be awarded to the subject if they managed to play all their cards within 20 sessions; however, if they were to do so, they would not receive the prize money. There were 12 total subjects in this trial. Two respondents&#x2019; data were excluded from the analysis as they consistently spoke the truth at the beginning of trials and only lied towards the end, rendering their responses predictable. Eight out of ten subjects were able to play all of their cards. One administrator continuously monitored the experiment and documented the trials in which the subject deceived the victim.</p>
</sec>
<sec id="sec6">
<label>3.3.</label>
<title>Data acquisition</title>
<p>A lab-built multichannel continuous-wave imaging system captured the brain signals (<xref ref-type="bibr" rid="ref3">Bhutta and Hong, 2013</xref>). The optical probe of the fNIRS system was positioned on the forehead of the subject such that the FP1 and FP2 locations were in the middle of the probe, as shown in <xref ref-type="fig" rid="fig2">Figure 2</xref>. To connect the flexible probe and ensure excellent contact between the its emitters and detectors and the subject&#x2019;s scalp, hair was brushed backward. Self-adhesive bandages were used to secure the probe to the subject&#x2019;s head. The emitters and detectors were systematically positioned within a 4.3&#x2009;&#x00D7;&#x2009;13&#x2009;cm<sup>2</sup> area according to a source-detector distance of approximately 3&#x2009;cm. A sampling rate of approximately 3.8&#x2009;Hz was used to collect the data. A Velcro band was used to hold the probe at the appropriate location throughout the experiment.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Optode placement and channel configuration.</p>
</caption>
<graphic xlink:href="fncom-17-1286664-g002.tif"/>
</fig>
</sec>
<sec id="sec7">
<label>3.4.</label>
<title>Data processing</title>
<p>MATLAB (MathWorks, United States) was used to import and further analyze the signals from the fNIRS equipment offline. The data were stored in a host computer text file as digitized raw intensity values from the fNIRS system. The hemoglobin conversion block of the framework was used to convert the intensity values to concentration changes of HbO and HbR using the Modified Beer&#x2013;Lambert law (<xref ref-type="bibr" rid="ref4">Bhutta et al., 2015</xref>). The change in optical density (&#x0394;OD) was calculated using these raw intensity values at each discrete time k as:</p>
<disp-formula id="EQ1">
<label>(1)</label>
<mml:math id="M1">
<mml:mover>
<mml:mrow>
<mml:mi mathvariant="normal">&#x0394;</mml:mi>
<mml:mi>O</mml:mi>
<mml:mi>D</mml:mi>
<mml:mfenced open="(" close=")" separators=";">
<mml:mi>k</mml:mi>
<mml:mi>&#x03BB;</mml:mi>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x0307;</mml:mo>
</mml:mover>
<mml:mo>=</mml:mo>
<mml:mo>ln</mml:mo>
<mml:mfrac>
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mi mathvariant="italic">out</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>&#x03BB;</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mi mathvariant="italic">in</mml:mi>
</mml:msub>
<mml:mfenced open="(" close=")" separators=";">
<mml:mi>k</mml:mi>
<mml:mi>&#x03BB;</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
<mml:mo>=</mml:mo>
<mml:mi>l</mml:mi>
<mml:mi>d</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>&#x03BB;</mml:mi>
</mml:mfenced>
<mml:mi mathvariant="normal">&#x0394;</mml:mi>
<mml:msub>
<mml:mi>&#x03BC;</mml:mi>
<mml:mi>a</mml:mi>
</mml:msub>
<mml:mfenced open="(" close=")" separators=";">
<mml:mi>k</mml:mi>
<mml:mi>&#x03BB;</mml:mi>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>Where <italic>I<sub>out</sub></italic> is the intensity of detected light; <italic>I<sub>in</sub></italic>, intensity of incident light; <italic>d</italic>, differential path length factor; <italic>l</italic>, distance between the emitter and detector; and &#x0394;<italic>&#x03BC;<sub>a</sub></italic>, absorption change of the tissue. The changes of HbO (&#x0394;<italic>c</italic><sub>HbO</sub>) and HbR (&#x0394;<italic>c</italic><sub>HbR</sub>) were measured using the modified Beer&#x2013;Lambert Law as (<xref ref-type="bibr" rid="ref4">Bhutta et al., 2015</xref>):</p>
<disp-formula id="EQ2">
<label>(2)</label>
<mml:math id="M2">
<mml:mrow>
<mml:mfenced close="]" open="[">
<mml:mrow>
<mml:mtable equalrows="true" equalcolumns="true">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>&#x0394;</mml:mi>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>O</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mtd>
<mml:mtd>
<mml:mrow>
<mml:mfenced>
<mml:mi>k</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>&#x0394;</mml:mi>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mtd>
<mml:mtd>
<mml:mrow>
<mml:mfenced>
<mml:mi>k</mml:mi>
</mml:mfenced>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced close="]" open="[">
<mml:mrow>
<mml:mtable equalrows="true" equalcolumns="true">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:msup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>&#x03BB;</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:msubsup>
<mml:mi>&#x03B1;</mml:mi>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>o</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>&#x03BB;</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mtd>
<mml:mtd>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:msup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>&#x03BB;</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:msubsup>
<mml:mi>&#x03B1;</mml:mi>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>&#x03BB;</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:msup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>&#x03BB;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:msubsup>
<mml:mi>&#x03B1;</mml:mi>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>o</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>&#x03BB;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mtd>
<mml:mtd>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:msup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>&#x03BB;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:msubsup>
<mml:mi>&#x03B1;</mml:mi>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>&#x03BB;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mo>&#x00D7;</mml:mo>
<mml:mfenced close="]" open="[">
<mml:mrow>
<mml:mtable equalrows="true" equalcolumns="true">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>&#x0394;</mml:mi>
<mml:mi>O</mml:mi>
<mml:msub>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mfenced>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mi mathvariant="normal">;</mml:mi>
<mml:mi>&#x03BB;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>&#x0394;</mml:mi>
<mml:mi>O</mml:mi>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mfenced>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mi mathvariant="normal">;</mml:mi>
<mml:mi>&#x03BB;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
<mml:mspace width="thickmathspace"/>
</mml:mrow>
</mml:math>
</disp-formula>
<p>with &#x03BB;<sub>1</sub>&#x2009;=&#x2009;640&#x2009;nm, &#x03BB;<sub>2</sub>&#x2009;=&#x2009;910&#x2009;nm, <italic>d</italic><sup>&#x03BB;</sup><sub>1</sub>&#x2009;=&#x2009;6.63, and <italic>d</italic><sup>&#x03BB;</sup><sub>2</sub>&#x2009;=&#x2009;2.765, according to the values for the wavelength-dependent absorption coefficients <italic>&#x03B1;</italic><sub>HbO</sub>, <italic>&#x03B1;</italic><sub>HbR</sub>. fNIRS, while detecting the hemodynamic responses, picks up the physiological noise of respiration, pulse, and low-frequency drift fluctuations. A second-order low-pass filter with a cutoff frequency of 0.15&#x2009;Hz was used to eliminate such noises (<xref ref-type="bibr" rid="ref28">Hu et al., 2012</xref>; <xref ref-type="bibr" rid="ref4">Bhutta et al., 2015</xref>). The HbO was considered for further analysis in this study because it is a more sensitive and reliable activity indicator than HbR (<xref ref-type="bibr" rid="ref25">Hoshi, 2003</xref>, <xref ref-type="bibr" rid="ref26">2007</xref>).</p>
</sec>
<sec id="sec8">
<label>3.5.</label>
<title>Classification</title>
<p>Once the data were preprocessed, classification was performed on the &#x0394;<italic>c</italic><sub>HbO</sub>(<italic>k</italic>) signals. We conducted this classification to distinguish between lie and truth responses based on the features extracted from fNIRS signals. The features selected in this study were the signal mean (SM) and signal slope (SS) of the HbO signal during the 5-s claim period of the stimulus. We used this claim period because it is the actual time at which the subject attempted to deceive the victim by either telling the truth or lying. The average HbO signal for each subject was obtained by averaging all 12 channels of the corresponding subject. SM and SS values over a 5-s window can yield better results in binary classification (<xref ref-type="bibr" rid="ref33">Khan et al., 2014</xref>; <xref ref-type="bibr" rid="ref4">Bhutta et al., 2015</xref>).</p>
<p>In this study, we performed the classification using various classifiers categorized into linear and nonlinear categories. LDA and SVM are the main linear classifiers, whereas the ANN is a nonlinear classifier. Both the LDA and SVM algorithms classify different classes of data based on hyperplanes. In LDA, a separating hyperplane is generated to minimize the interclass variance and maximize the distance between the class means (<xref ref-type="bibr" rid="ref39">Lotte et al., 2007</xref>). For the SVM classifier, a separating hyperplane is designed such that the distance between the hyperplane and the nearest training point(s) is maximized (<xref ref-type="bibr" rid="ref44">Naseer et al., 2014</xref>).</p>
<p>Mainstream machine learning techniques can be categorized as linear or nonlinear classifiers. Linear classifiers classify a sample based on the value of the linear combination of its features. For example, assume that we have an input feature vector x. A linear classifier then constructs a function that directly assigns the input vector x to a specific class:</p>
<disp-formula id="EQ3">
<label>(3)</label>
<mml:math id="M3">
<mml:mrow>
<mml:mtext>f</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mtext>x</mml:mtext>
</mml:mrow>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mspace width="0.25em"/>
<mml:mspace width="0.25em"/>
<mml:mspace width="0.25em"/>
<mml:mrow>
<mml:mo>{</mml:mo>
<mml:mrow>
<mml:mtable columnalign="center">
<mml:mtr columnalign="center">
<mml:mtd columnalign="center">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mspace width="0.25em"/>
<mml:mspace width="0.25em"/>
<mml:mspace width="0.25em"/>
<mml:mtext>if</mml:mtext>
<mml:mspace width="0.25em"/>
<mml:mtext>x</mml:mtext>
<mml:mo>&#x003E;</mml:mo>
<mml:mtext>threshold</mml:mtext>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr columnalign="center">
<mml:mtd columnalign="center">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mspace width="0.25em"/>
<mml:mspace width="0.25em"/>
<mml:mspace width="0.25em"/>
<mml:mtext>otherwise</mml:mtext>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>A linear SVM is a linear classifier that makes decisions according to a linear hyperplane capable of effectively segregating data. SVM finds an optimal hyperplane by maximizing the margin, which is the minimum distance between the hyperplane and any of the data samples. Such classifiers perform well when the problem is linearly separable. However, if the data are not linearly separable, they will have poor generalization ability. In this case, we could map the input vector onto a higher-dimensional space using the kernel function K and find the separating hyperplane in that particular dimension. Quadratic SVM and Cubic SVM are examples of kernelized versions of SVM that utilize second and third-degree polynomial kernels.</p>
<disp-formula id="EQ4">
<label>(4)</label>
<mml:math id="M4">
<mml:mrow>
<mml:mtext>K</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mtext>x</mml:mtext>
</mml:mrow>
<mml:mrow>
<mml:mtext>i</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mrow>
<mml:mtext>,x</mml:mtext>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mtext>j</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mrow>
<mml:msubsup>
<mml:mrow>
<mml:mtext>x</mml:mtext>
</mml:mrow>
<mml:mrow>
<mml:mtext>i</mml:mtext>
</mml:mrow>
<mml:mrow>
<mml:mtext>T</mml:mtext>
</mml:mrow>
</mml:msubsup>
<mml:msub>
<mml:mrow>
<mml:mtext>x</mml:mtext>
</mml:mrow>
<mml:mrow>
<mml:mtext>j</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x03C1;</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</disp-formula>
<p>In the machine learning literature, several other algorithms handle nonlinear cases using a completely different computational approach; one of the simplest algorithms is the K-Nearest Neighbor (KNN). The main idea of this algorithm is that, for a new instance to be classified, the algorithm searches for the K-nearest points in the feature space and assigns it to the class that prevails among its neighbors. Similarly, the decision tree constructs a classification model with a tree-like structure. It partitions a feature space into smaller regions containing homogenous instances and simultaneously incrementally constructs an associated decision tree. The partitions of the feature space are usually based on criteria such as the Gini impurity, information gain, or distance measure.</p>
</sec>
<sec id="sec9">
<label>3.6.</label>
<title>Proposed artificial neural networks (ANN) models</title>
<p>In recent years, artificial neural networks have flourished in the machine learning and pattern recognition domains. They consist of many interconnected processing units, called neurons. The outputs of the hidden layer neurons are transmitted to the inputs of the next hidden layer within the network (<xref ref-type="bibr" rid="ref58">Ullah et al., 2020</xref>). Thus, they communicate with each other by emitting signals over numerous weighted connections. During training, each neuron can update its weight, allowing the network to learn hidden patterns from the data. In this study, we designed three ANN architectures (M1, M2, and M3) to conduct experiments on our dataset. These structures were designed based on ideas from state-of-the-art convolutional neural network models, including AlexNet, ResNet, and GoogleNet. The numbers of input and output nodes and hidden layers of these neural networks are the same; however, the number of nodes in each hidden layer varies. The first two layers of M1 contain 10 neurons; the subsequent two hidden layers consist of eight and five neurons, respectively; and finally, the prediction layer contains two SoftMax classifiers. The M2 topology is similar to that of M1; however, we introduced two pairs of hidden layers with the same number of neurons in this structure. The first two layers had eight neurons, and the next pair had layers containing four neurons. We designed a third neural network architecture that differed from the aforementioned architecture. In this structure, we first increased the number of neuron dimensions from two to six and six to eight and then decreased it from eight to six and six to two neurons for the final class prediction. The architectures of the three ANNs are shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>. Neural networks have weights that are initially randomly initialized, and later in the training process, these weights are optimized. The initial weights of our neural networks were determined using Kaiming uniform initialization (also known as HE initialization). This method is tailored for layers activated by the ReLU function and provides an advantage over random initialization. Specifically, HE initialization mitigates issues such as vanishing and exploding gradient problems, thereby enabling faster convergence during training. Aligning with the characteristics of ReLU, it also minimizes the occurrence of inactive neurons at the start of training. The empirical robustness of this method makes it a superior choice for deep network initialization compared to other simplistic methods. We selected four intermediate layers to achieve an optimal balance for our dataset. With only two features present in the input, it is essential to project them into a higher-dimensional space for feature extraction and subsequently condense the dimensions as we approach the classification layer. If we were to increase the number of hidden layers, the model would risk succumbing to the vanishing gradient problem. This is especially pertinent when processing only two features across excessive layers, as this is not advisable.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Neural network architectures for lie detection. Models M1, M2, and M3 process the mean and slope of the oxy-hemoglobin (HbO) signals through varying numbers of hidden layers and neurons. Each model produces a two-dimensional output representing the probabilities of a lie and truth.</p>
</caption>
<graphic xlink:href="fncom-17-1286664-g003.tif"/>
</fig>
</sec>
</sec>
<sec sec-type="results" id="sec10">
<label>4.</label>
<title>Results and discussion</title>
<p>This section presents a comparative analysis of the six statistical machine-learning techniques and three neural network models. The experiments were conducted using the MATLAB 2018b classification learning toolbox and Python 3.5 with Keras. We utilized a confusion matrix, receiver operating curve (ROC), area under the curve (AUC), and subject-level performance evaluation for the proposed method, which are discussed in subsequent sections.</p>
<p>In the domain of machine learning, mainly while dealing with classification problems having a distinction between a number of different items, the confusion matrix is considered an effective metric for evaluation. It is also known as the error matrix, as it indicates the error rate. It is used to show the effectiveness and performance of any trained classifier and summarizes the prediction results on any classification problem. We used a confusion matrix as an evaluation metric to demonstrate the performance of our proposed method.</p>
<p>The predictive class-wise results for different classifiers with different statistical classifier flavors are shown in <xref ref-type="fig" rid="fig4">Figure 4</xref>. The top left corner in <xref ref-type="fig" rid="fig4">Figure 4</xref> shows the confusion matrix for the KNN classifier, followed by simple and complex decision trees with 55%, 77%, and 56% completely true predictions, respectively. The accuracy achieved by these classifiers for positive classes is not convincing for real-world problems or for their deployment in different sectors. Therefore, we obtained better prediction results for the same data using different classifiers in the second row, starting from the linear SVM, followed by the quadratic and cubic SVM. The quadratic SVM achieved an average correct prediction result of 80%, which was dominated by the cubic SVM. The cubic SVM obtained the highest prediction results, with 88% correct prediction results for the positive class on the same data, proving it to be the best fit for deployment and practical implementation in real-world lie detection problems. The overall accuracy performances of different classifiers are listed in <xref ref-type="table" rid="tab1">Table 1</xref>. <xref ref-type="table" rid="tab1">Table 1</xref> shows that the three proposed models were dominant for all statistical machine learning classifiers and achieved 8%&#x2013;10% of the overall accuracy of the system.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Confusion matrices of different statistical machine learning classifiers for lie prediction.</p>
</caption>
<graphic xlink:href="fncom-17-1286664-g004.tif"/>
</fig>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Comparison of different machine learning classifiers for overall accuracy.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Method</th>
<th align="center" valign="top">Overall accuracy (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">KNN</td>
<td align="center" valign="top">68.5</td>
</tr>
<tr>
<td align="left" valign="top">Simple decision tree</td>
<td align="center" valign="top">77.5</td>
</tr>
<tr>
<td align="left" valign="top">Complex decision tree</td>
<td align="center" valign="top">70.0</td>
</tr>
<tr>
<td align="left" valign="top">Linear SVM</td>
<td align="center" valign="top">80.0</td>
</tr>
<tr>
<td align="left" valign="top">Quadratic SVM</td>
<td align="center" valign="top">81.5</td>
</tr>
<tr>
<td align="left" valign="top">Cubic SVM</td>
<td align="center" valign="top">80.5</td>
</tr>
<tr>
<td align="left" valign="top">Proposed NN M1</td>
<td align="center" valign="top">88.5</td>
</tr>
<tr>
<td align="left" valign="top">Proposed NN M2</td>
<td align="center" valign="top">88.0</td>
</tr>
<tr>
<td align="left" valign="top">Proposed NN M3</td>
<td align="center" valign="top">90.0</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>KNN, k-nearest neighbor; SVM, support vector machines; NN, neural network.</p>
</table-wrap-foot>
</table-wrap>
<sec id="sec11">
<label>4.1.</label>
<title>ROC and AUC curves</title>
<p>In a binary classification problem, the output class is usually labeled as positive or negative. The classification results can be represented in a structured form called a confusion matrix. However, the confusion matrix only provides true- and false-positive results. Therefore, to check the performance of the classification model at different thresholds, we calculated the ROC curves for all classifiers. This ROC curve plots the True Positive Rate (TPR) and False Positive Rate (FPR) at various thresholds, where TPR is a synonym for recall. These can be defined as follows:</p>
<disp-formula id="EQ5">
<label>(5)</label>
<mml:math id="M5">
<mml:mrow>
<mml:mtext>TPR</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mtext>TP</mml:mtext>
</mml:mrow>
<mml:mrow>
<mml:mtext>TP</mml:mtext>
<mml:mo>+</mml:mo>
<mml:mtext>FN</mml:mtext>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="EQ6">
<label>(6)</label>
<mml:math id="M6">
<mml:mrow>
<mml:mtext>FPR</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mtext>FP</mml:mtext>
</mml:mrow>
<mml:mrow>
<mml:mtext>FP</mml:mtext>
<mml:mo>+</mml:mo>
<mml:mtext>TN</mml:mtext>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Moreover, for further evaluation, it is crucial to compute the ROC points, which is a resource-intensive method. An efficient sorting-based algorithm called the AUC, provides this information for evaluation. It measures the entire area under the ROC curve from (0,0) to (1,1). AUC offers an aggregate measure of performance at all possible thresholds. Thus, we calculated these values and obtained promising results for both the ROC curves and AUC values for all classifiers. The obtained AUC values and ROC curves for all classifiers are shown in <xref ref-type="fig" rid="fig5">Figure 5</xref>. The SVM classifiers achieved better AUC values and ROC curves, obtaining 86%, 84%, and 83% AUC for linear, quadratic, and cubic SVM, respectively. In contrast, the KNN, simple decision tree, and complex decision tree achieved AUCs of 64%, 78%, and 73%, respectively. Linear SVM has better accuracy than other statistical machine-learning techniques. However, it is still ineffective for sensitive problems, such as lie detection. To achieve better performance, we proposed three different neural network structures that increased the accuracy of lie detection from 8% to 10%.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Receiver operating characteristic (ROC) curves and the area under the curve (AUC) values achieved from different hyperplane thresholds of six machine learning classifiers.</p>
</caption>
<graphic xlink:href="fncom-17-1286664-g005.tif"/>
</fig>
</sec>
<sec id="sec12">
<label>4.2.</label>
<title>Evaluation of the proposed ANN models</title>
<p>In the proposed method, we conducted experiments on our data using the three neural network models discussed in detail in the proposed methodology section. The models were trained for 50 epochs, and the data were divided into training, validation, and test sets of 60%, 20%, and 20%, respectively. The confusion matrices, ROC curves, and AUC for the three models are shown in <xref ref-type="fig" rid="fig6">Figure 6</xref>, and the overall accuracies are listed in <xref ref-type="table" rid="tab2">Table 2</xref>. The proposed neural network models outperformed statistical machine learning approaches by a large margin, reaching 90% overall accuracy for the M3 neural network model, which is a 10% increase in accuracy. We trained our models five times and obtained the highest accuracies of 88%, 88%, and 87% for the fourth folds of M1, M2, and M3, respectively. The confusion matrices of the three models were almost identical, demonstrating the effectiveness of the models for lie detection.</p>
<fig position="float" id="fig6">
<label>Figure 6</label>
<caption>
<p>Receiver operating characteristic ROC curves, the area under the curve (AUC) values, and confusion matrices of three proposed neural network (NN) models for lie detection.</p>
</caption>
<graphic xlink:href="fncom-17-1286664-g006.tif"/>
</fig>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Results achieved by different trained models for sample test data.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Subject</th>
<th align="center" valign="top" rowspan="2">Sample</th>
<th align="center" valign="top" rowspan="2">Actual class</th>
<th align="center" valign="top" colspan="9">Prediction</th>
</tr>
<tr>
<th align="center" valign="top">NN M1</th>
<th align="center" valign="top">NN M2</th>
<th align="center" valign="top">NN M3</th>
<th align="center" valign="top">KNN</th>
<th align="center" valign="top">SDT</th>
<th align="center" valign="top">CDT</th>
<th align="center" valign="top">L-SVM</th>
<th align="center" valign="top">Q-SVM</th>
<th align="center" valign="top">C-SVM</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top" rowspan="3">1</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">2</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">3</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">2</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">2</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">3</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">3</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">2</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="center" valign="top">3</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">4</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="center" valign="top">2</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">3</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">5</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">2</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="center" valign="top">3</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">6</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="center" valign="top">2</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">3</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">7</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">2</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="center" valign="top">3</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">8</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="center" valign="top">2</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">3</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">9</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">2</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="center" valign="top">3</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">10</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="center" valign="top">2</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
</tr>
<tr>
<td align="center" valign="top">3</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">True</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
<td align="center" valign="top">False</td>
</tr>
<tr>
<td align="left" valign="top" colspan="3">Average accuracy (%)</td>
<td align="center" valign="top">80</td>
<td align="center" valign="top">80</td>
<td align="center" valign="top">90</td>
<td align="center" valign="top">60</td>
<td align="center" valign="top">70</td>
<td align="center" valign="top">60</td>
<td align="center" valign="top">70</td>
<td align="center" valign="top">77</td>
<td align="center" valign="top">73</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>KNN, k-nearest neighbor; SVM, support vector machines; NN, neural network; SDT, simple decision tree; CDT, complex decision tree; L-SVM, linear-SVM; Q-SVM, quadratic-SVM; C-SVM, cubic-SVM.</p>
</table-wrap-foot>
</table-wrap>
<p>The proposed neural network models were also evaluated for subject-wise performance, which is illustrated in <xref ref-type="fig" rid="fig7">Figure 7</xref>. In the entire dataset, we had a total of 10 subjects. For this experiment, we trained our models on nine subjects and tested the models on the remaining one subject. This experiment showed the accuracy of our models when applied to unseen data. Subjects 1 and 9 achieved the highest accuracy of 90% and 95% on each model, respectively; only subjects 2 and 7 were had accuracies less than 70%. The remaining subjects had accuracies greater than 80% for all three models. The average accuracies achieved for M1, M2, and M3 were 81%, 80%, and 82%, respectively, demonstrating that the models are very effective and robust for unseen data.</p>
<fig position="float" id="fig7">
<label>Figure 7</label>
<caption>
<p>Subject-wise performance evaluation of the three proposed neural network (NN) models.</p>
</caption>
<graphic xlink:href="fncom-17-1286664-g007.tif"/>
</fig>
<p><xref ref-type="fig" rid="fig7">Figure 7</xref> displays the results for the test set of each subject&#x2019;s data. We randomly selected three samples from each subject to check the robustness of our models for different subject&#x2019;s data. The third column represents the actual label of the test sample, and the other columns represent the results of its corresponding machine-learning algorithm. The proposed three neural network models achieved better performance of 80%, 80%, and 90% subject-wise accuracy for neural network1, neural network2, and neural network3, respectively. On the other hand, the machine learning-based methods, namely KNN, SDT, CDT, L-SVM, Q-SVM, and C-SVM achieved 60%, 72%, 60%, 70%, 77%, and 73% accuracies, respectively. The proposed models have low accuracy for only three samples&#x2019; data, including the first sample of subject 2 and the third sample of subjects 4 and 6. However, for this data, other machine learning algorithms also faced challenges in detection. Subsequent examination of data revealed that these particular samples significantly differed from the rest of the dataset and exhibited substantial noise; therefore, the outcomes for these three samples were unsatisfactory.</p>
</sec>
</sec>
<sec sec-type="conclusions" id="sec13">
<label>5.</label>
<title>Conclusion</title>
<p>In this study, we proposed an fNIRS-based spontaneous lie detection framework. The HbO and HbR signals were acquired using the fNIRS system. We used HbO SS and HbO SM as features in the classification of truths and lies. We developed an ANN, inspired by deep learning including AlexNet, ResNet, and GoogleNet, for classification during HbO concentration changes in an interactive environment. The proposed models, M1, M2, and M3, had overall accuracies of 88.5%, 88.0%, and 90.0%, respectively. We compared the results of the proposed ANN models with those of conventional classifiers such as KNN, simple decision tree, complex decision tree, linear SVM, quadratic SVM, and cubic SVM and found that the proposed ANN models outperformed conventional methods. In addition, we compared the individual subject accuracies and found higher accuracies for individual subjects. We further tested randomly selected samples from each subject, and the proposed ANN models M1, M2, and M3 achieved accuracies of 80%, 80%, and 90%, respectively. The resultant accuracies demonstrated the feasibility and robustness of practical fNIRS spontaneous lie detection in interactive scenarios.</p>
</sec>
<sec sec-type="data-availability" id="sec14">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec sec-type="ethics-statement" id="sec15">
<title>Ethics statement</title>
<p>The experiments were performed in accordance with the latest Declaration of Helsinki. The study was conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="sec16">
<title>Author contributions</title>
<p>MB: Conceptualization, Methodology, Software, Writing &#x2013; original draft. MA: Conceptualization, Methodology, Software, Writing &#x2013; original draft. AZ: Formal analysis, Investigation, Visualization, Writing &#x2013; review &#x0026; editing, Validation. KK: Formal analysis, Writing &#x2013; review &#x0026; editing. JB: Funding acquisition, Project administration, Resources, Writing &#x2013; review &#x0026; editing. SL: Funding acquisition, Project administration, Resources, Supervision, Writing &#x2013;review &#x0026; editing.</p>
</sec>
<sec id="sec17">
<title>Glossary</title>
<table-wrap position="anchor" id="tab3">
<table frame="hsides" rules="groups">
<tbody>
<tr>
<td align="left" valign="top">fNIRS</td>
<td align="left" valign="top">Functional near-infrared spectroscopy</td>
</tr>
<tr>
<td align="left" valign="top">SVM</td>
<td align="left" valign="top">Support vector machines</td>
</tr>
<tr>
<td align="left" valign="top">EEG</td>
<td align="left" valign="top">Electroencephalography</td>
</tr>
<tr>
<td align="left" valign="top">fMRI</td>
<td align="left" valign="top">Functional magnetic resonance imaging</td>
</tr>
<tr>
<td align="left" valign="top">HbO</td>
<td align="left" valign="top">Oxy-hemoglobin</td>
</tr>
<tr>
<td align="left" valign="top">HbR</td>
<td align="left" valign="top">Deoxy-hemoglobin</td>
</tr>
<tr>
<td align="left" valign="top">DNN</td>
<td align="left" valign="top">Deep neural network</td>
</tr>
<tr>
<td align="left" valign="top">LDA</td>
<td align="left" valign="top">Linear discernment analysis</td>
</tr>
<tr>
<td align="left" valign="top">SS</td>
<td align="left" valign="top">Signal slope</td>
</tr>
<tr>
<td align="left" valign="top">SM</td>
<td align="left" valign="top">Signal mean</td>
</tr>
<tr>
<td align="left" valign="top">KNN</td>
<td align="left" valign="top">K-nearest neighbor</td>
</tr>
<tr>
<td align="left" valign="top">ANN</td>
<td align="left" valign="top">Artificial neural networks</td>
</tr>
<tr>
<td align="left" valign="top">ROC</td>
<td align="left" valign="top">Receiver operating curve</td>
</tr>
<tr>
<td align="left" valign="top">AUC</td>
<td align="left" valign="top">Area under the curve</td>
</tr>
<tr>
<td align="left" valign="top">TPR</td>
<td align="left" valign="top">True Positive Rate</td>
</tr>
<tr>
<td align="left" valign="top">FPR</td>
<td align="left" valign="top">False Positive Rate</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</body>
<back>
<sec sec-type="funding-information" id="sec18">
<title>Funding</title>
<p>The experiments were performed in accordance with the latest Declaration of Helsinki. Ethical review and approval was not required for the study on human participants in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="COI-statement" id="sec19">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="sec100" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abibullaev</surname> <given-names>B.</given-names></name> <name><surname>An</surname> <given-names>J.</given-names></name> <name><surname>Moon</surname> <given-names>J.-I.</given-names></name></person-group> (<year>2011</year>). <article-title>Neural network classification of brain hemodynamic responses from four mental tasks</article-title>. <source>Int. J. Optomechatronics</source> <volume>5</volume>, <fpage>340</fpage>&#x2013;<lpage>359</lpage>. doi: <pub-id pub-id-type="doi">10.1080/15599612.2011.633209</pub-id></citation></ref>
<ref id="ref2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abootalebi</surname> <given-names>V.</given-names></name> <name><surname>Moradi</surname> <given-names>M. H.</given-names></name> <name><surname>Khalilzadeh</surname> <given-names>M. A.</given-names></name></person-group> (<year>2009</year>). <article-title>A new approach for EEG feature extraction in P300-based lie detection</article-title>. <source>Comput. Methods Prog. Biomed.</source> <volume>94</volume>, <fpage>48</fpage>&#x2013;<lpage>57</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cmpb.2008.10.001</pub-id>, PMID: <pub-id pub-id-type="pmid">19041154</pub-id></citation></ref>
<ref id="ref3"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Bhutta</surname> <given-names>M. R.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name></person-group> (<year>2013</year>). "A new near-infrared spectroscopy system for detection of hemoglobin and water concentration changes during a human activity", in: 2013 international conference on robotics, biomimetics, Intelligent Computational Systems: IEEE, 224&#x2013;227.</citation></ref>
<ref id="ref4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bhutta</surname> <given-names>M. R.</given-names></name> <name><surname>Hong</surname> <given-names>M. J.</given-names></name> <name><surname>Kim</surname> <given-names>Y.-H.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name></person-group> (<year>2015</year>). <article-title>Single-trial lie detection using a combined fNIRS-polygraph system</article-title>. <source>Front. Psychol.</source> <volume>6</volume>:<fpage>709</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2015.00709</pub-id></citation></ref>
<ref id="ref5"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Bhutta</surname> <given-names>M. R.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name> <name><surname>Naseer</surname> <given-names>N.</given-names></name> <name><surname>Khan</surname> <given-names>M. J.</given-names></name></person-group> (<year>2014</year>). "Classification of lie and truth in forced choice paradigm: an fNIRS study", in: Proc. of the 20th annual meeting of the Organization for Human Brain Mapping (OHMB).</citation></ref>
<ref id="ref6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bianchini</surname> <given-names>M.</given-names></name> <name><surname>Scarselli</surname> <given-names>F.</given-names></name></person-group> (<year>2014</year>). <article-title>On the complexity of neural network classifiers: a comparison between shallow and deep architectures</article-title>. <source>IEEE Trans. Neural Netw. Learn. Syst.</source> <volume>25</volume>, <fpage>1553</fpage>&#x2013;<lpage>1565</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TNNLS.2013.2293637</pub-id>, PMID: <pub-id pub-id-type="pmid">25050951</pub-id></citation></ref>
<ref id="ref7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bluestone</surname> <given-names>A. Y.</given-names></name> <name><surname>Abdoulaev</surname> <given-names>G.</given-names></name> <name><surname>Schmitz</surname> <given-names>C. H.</given-names></name> <name><surname>Barbour</surname> <given-names>R. L.</given-names></name> <name><surname>Hielscher</surname> <given-names>A. H.</given-names></name></person-group> (<year>2001</year>). <article-title>Three-dimensional optical tomography of hemodynamics in the human head</article-title>. <source>Opt. Express</source> <volume>9</volume>, <fpage>272</fpage>&#x2013;<lpage>286</lpage>. doi: <pub-id pub-id-type="doi">10.1364/OE.9.000272</pub-id>, PMID: <pub-id pub-id-type="pmid">19421298</pub-id></citation></ref>
<ref id="ref8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Boas</surname> <given-names>D. A.</given-names></name> <name><surname>Dale</surname> <given-names>A. M.</given-names></name> <name><surname>Franceschini</surname> <given-names>M. A.</given-names></name></person-group> (<year>2004</year>). <article-title>Diffuse optical imaging of brain activation: approaches to optimizing image sensitivity, resolution, and accuracy</article-title>. <source>NeuroImage</source> <volume>23</volume>, <fpage>S275</fpage>&#x2013;<lpage>S288</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2004.07.011</pub-id>, PMID: <pub-id pub-id-type="pmid">15501097</pub-id></citation></ref>
<ref id="ref9"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Bond</surname> <given-names>C. F.</given-names></name> <name><surname>Levine</surname> <given-names>T. R.</given-names></name> <name><surname>Hartwig</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). &#x201C;<article-title>New findings in non-verbal lie detection</article-title>&#x201D; in <source>Detecting deception: current challenges and cognitive approaches</source>. eds. <person-group person-group-type="editor"><name><surname>Granhag</surname> <given-names>P. A.</given-names></name> <name><surname>Vrij</surname> <given-names>A.</given-names></name> <name><surname>Verschuere</surname> <given-names>B.</given-names></name></person-group> (<publisher-loc>Hoboken, NJ</publisher-loc>: <publisher-name>Wiley-Blackwell</publisher-name>), <fpage>37</fpage>&#x2013;<lpage>58</lpage>.</citation></ref>
<ref id="ref10"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brett</surname> <given-names>A.</given-names></name> <name><surname>Phillips</surname> <given-names>M.</given-names></name> <name><surname>Beary</surname> <given-names>J.</given-names></name></person-group> (<year>1986</year>). <article-title>Predictive power of the polygraph: can the" lie detector" really detect liars?</article-title> <source>Lancet</source> <volume>327</volume>, <fpage>544</fpage>&#x2013;<lpage>547</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0140-6736(86)90895-0</pub-id></citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Buller</surname> <given-names>D. B.</given-names></name> <name><surname>Burgoon</surname> <given-names>J. K.</given-names></name></person-group> (<year>1996</year>). <article-title>Interpersonal deception theory</article-title>. <source>Commun. Theory</source> <volume>6</volume>, <fpage>203</fpage>&#x2013;<lpage>242</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1468-2885.1996.tb00127.x</pub-id></citation></ref>
<ref id="ref12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>Q.</given-names></name> <name><surname>He</surname> <given-names>R.</given-names></name> <name><surname>Sun</surname> <given-names>J.</given-names></name> <name><surname>Ding</surname> <given-names>K.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>He</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Common brain activation and connectivity patterns supporting the generation of creative uses and creative metaphors</article-title>. <source>Neuropsychologia</source> <volume>181</volume>:<fpage>108487</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2023.108487</pub-id>, PMID: <pub-id pub-id-type="pmid">36669695</pub-id></citation></ref>
<ref id="ref13"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Cire&#x015F;an</surname> <given-names>D. C.</given-names></name> <name><surname>Giusti</surname> <given-names>A.</given-names></name> <name><surname>Gambardella</surname> <given-names>L. M.</given-names></name> <name><surname>Schmidhuber</surname> <given-names>J.</given-names></name></person-group> (<year>2013</year>). "Mitosis detection in breast cancer histology images with deep neural networks", in: Medical image computing and computer-assisted intervention&#x2013;MICCAI 2013: 16th international conference, Nagoya, Japan, September 22&#x2013;26, 2013, proceedings, part II 16: Springer, 411&#x2013;418</citation></ref>
<ref id="ref14"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Collobert</surname> <given-names>R.</given-names></name> <name><surname>Weston</surname> <given-names>J.</given-names></name></person-group> (<year>2008</year>). "A unified architecture for natural language processing: deep neural networks with multitask learning", in: Proceedings of the 25th international conference on machine learning, 160&#x2013;167.</citation></ref>
<ref id="ref15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ding</surname> <given-names>X. P.</given-names></name> <name><surname>Gao</surname> <given-names>X.</given-names></name> <name><surname>Fu</surname> <given-names>G.</given-names></name> <name><surname>Lee</surname> <given-names>K.</given-names></name></person-group> (<year>2013</year>). <article-title>Neural correlates of spontaneous deception: a functional near-infrared spectroscopy (fNIRS) study</article-title>. <source>Neuropsychologia</source> <volume>51</volume>, <fpage>704</fpage>&#x2013;<lpage>712</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2012.12.018</pub-id>, PMID: <pub-id pub-id-type="pmid">23340482</pub-id></citation></ref>
<ref id="ref16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ding</surname> <given-names>X. P.</given-names></name> <name><surname>Sai</surname> <given-names>L.</given-names></name> <name><surname>Fu</surname> <given-names>G.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Lee</surname> <given-names>K.</given-names></name></person-group> (<year>2014</year>). <article-title>Neural correlates of second-order verbal deception: a functional near-infrared spectroscopy (fNIRS) study</article-title>. <source>NeuroImage</source> <volume>87</volume>, <fpage>505</fpage>&#x2013;<lpage>514</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.10.023</pub-id>, PMID: <pub-id pub-id-type="pmid">24161626</pub-id></citation></ref>
<ref id="ref17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ekman</surname> <given-names>P.</given-names></name> <name><surname>Sorenson</surname> <given-names>E. R.</given-names></name> <name><surname>Friesen</surname> <given-names>W. V.</given-names></name></person-group> (<year>1969</year>). <article-title>Pan-cultural elements in facial displays of emotion</article-title>. <source>Science</source> <volume>164</volume>, <fpage>86</fpage>&#x2013;<lpage>88</lpage>. doi: <pub-id pub-id-type="doi">10.1126/science.164.3875.86</pub-id>, PMID: <pub-id pub-id-type="pmid">5773719</pub-id></citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Elaad</surname> <given-names>E.</given-names></name> <name><surname>Ben-Shakhar</surname> <given-names>G.</given-names></name></person-group> (<year>1997</year>). <article-title>Effects of item repetitions and variations on the efficiency of the guilty knowledge test</article-title>. <source>Psychophysiology</source> <volume>34</volume>, <fpage>587</fpage>&#x2013;<lpage>596</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1469-8986.1997.tb01745.x</pub-id>, PMID: <pub-id pub-id-type="pmid">9299913</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Emberson</surname> <given-names>L. L.</given-names></name> <name><surname>Zinszer</surname> <given-names>B. D.</given-names></name> <name><surname>Raizada</surname> <given-names>R. D.</given-names></name> <name><surname>Aslin</surname> <given-names>R. N.</given-names></name></person-group> (<year>2017</year>). <article-title>Decoding the infant mind: multivariate pattern analysis (MVPA) using fNIRS</article-title>. <source>PLoS One</source> <volume>12</volume>:<fpage>e0172500</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0172500</pub-id>, PMID: <pub-id pub-id-type="pmid">28426802</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Farah</surname> <given-names>M. J.</given-names></name> <name><surname>Hutchinson</surname> <given-names>J. B.</given-names></name> <name><surname>Phelps</surname> <given-names>E. A.</given-names></name> <name><surname>Wagner</surname> <given-names>A. D.</given-names></name></person-group> (<year>2014</year>). <article-title>Functional MRI-based lie detection: scientific and societal challenges</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>15</volume>, <fpage>123</fpage>&#x2013;<lpage>131</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nrn3665</pub-id>, PMID: <pub-id pub-id-type="pmid">24588019</pub-id></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Farwell</surname> <given-names>L. A.</given-names></name> <name><surname>Donchin</surname> <given-names>E.</given-names></name></person-group> (<year>1991</year>). <article-title>The truth will out: interrogative polygraphy (&#x201C;lie detection&#x201D;) with event-related brain potentials</article-title>. <source>Psychophysiology</source> <volume>28</volume>, <fpage>531</fpage>&#x2013;<lpage>547</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1469-8986.1991.tb01990.x</pub-id>, PMID: <pub-id pub-id-type="pmid">1758929</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Freud</surname> <given-names>S.</given-names></name> <name><surname>Strachey</surname> <given-names>A.</given-names></name></person-group> (<year>1962</year>). <source>Fragment of an analysis of a case of hysteria (1905 [1901])</source>. <publisher-loc>London</publisher-loc>: <publisher-name>Hogarth Press</publisher-name>.</citation></ref>
<ref id="ref23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Furedy</surname> <given-names>J. J.</given-names></name> <name><surname>Ben-Shakhar</surname> <given-names>G.</given-names></name></person-group> (<year>1991</year>). <article-title>The roles of deception, intention to deceive, and motivation to avoid detection in the psychophysiological detection of guilty knowledge</article-title>. <source>Psychophysiology</source> <volume>28</volume>, <fpage>163</fpage>&#x2013;<lpage>171</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1469-8986.1991.tb00407.x</pub-id>, PMID: <pub-id pub-id-type="pmid">1946882</pub-id></citation></ref>
<ref id="ref24"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Hennrich</surname> <given-names>J.</given-names></name> <name><surname>Herff</surname> <given-names>C.</given-names></name> <name><surname>Heger</surname> <given-names>D.</given-names></name> <name><surname>Schultz</surname> <given-names>T.</given-names></name></person-group> (<year>2015</year>). "Investigating deep learning for fNIRS based BCI", in: 2015 37th annual international conference of the IEEE engineering in medicine and biology society (EMBC): IEEE, 2844&#x2013;2847.</citation></ref>
<ref id="ref25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hoshi</surname> <given-names>Y.</given-names></name></person-group> (<year>2003</year>). <article-title>Functional near-infrared optical imaging: utility and limitations in human brain mapping</article-title>. <source>Psychophysiology</source> <volume>40</volume>, <fpage>511</fpage>&#x2013;<lpage>520</lpage>. doi: <pub-id pub-id-type="doi">10.1111/1469-8986.00053</pub-id>, PMID: <pub-id pub-id-type="pmid">14570159</pub-id></citation></ref>
<ref id="ref26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hoshi</surname> <given-names>Y.</given-names></name></person-group> (<year>2007</year>). <article-title>Functional near-infrared spectroscopy: current status and future prospects</article-title>. <source>J. Biomed. Opt.</source> <volume>12</volume>, &#x2013;<lpage>062106</lpage>. doi: <pub-id pub-id-type="doi">10.1117/1.2804911</pub-id>, PMID: <pub-id pub-id-type="pmid">18163809</pub-id></citation></ref>
<ref id="ref27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>X.-S.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name> <name><surname>Ge</surname> <given-names>S. S.</given-names></name></person-group> (<year>2011</year>). <article-title>Recognition of stimulus-evoked neuronal optical response by identifying chaos levels of near-infrared spectroscopy time series</article-title>. <source>Neurosci. Lett.</source> <volume>504</volume>, <fpage>115</fpage>&#x2013;<lpage>120</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neulet.2011.09.011</pub-id>, PMID: <pub-id pub-id-type="pmid">21945547</pub-id></citation></ref>
<ref id="ref28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>X.-S.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name> <name><surname>Ge</surname> <given-names>S. S.</given-names></name></person-group> (<year>2012</year>). <article-title>fNIRS-based online deception decoding</article-title>. <source>J. Neural Eng.</source> <volume>9</volume>:<fpage>026012</fpage>. doi: <pub-id pub-id-type="doi">10.1088/1741-2560/9/2/026012</pub-id>, PMID: <pub-id pub-id-type="pmid">22337819</pub-id></citation></ref>
<ref id="ref29"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Hudson</surname> <given-names>D.L.</given-names></name> <name><surname>Cohen</surname> <given-names>M.E.</given-names></name></person-group> (<year>2000</year>). <source>Neural networks and artificial intelligence for biomedical engineering</source>. <publisher-loc>Hoboken, NJ</publisher-loc>: <publisher-name>Wiley Online Library</publisher-name>.</citation></ref>
<ref id="ref30"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Huve</surname> <given-names>G.</given-names></name> <name><surname>Takahashi</surname> <given-names>K.</given-names></name> <name><surname>Hashimoto</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). "Fnirs-based brain&#x2013;computer interface using deep neural networks for classifying the mental state of drivers", in: Artificial neural networks and machine learning&#x2013;ICANN 2018: 27th international conference on artificial neural networks, Rhodes, Greece, October 4&#x2013;7, 2018, proceedings, part III 27: Springer, 353&#x2013;362.</citation></ref>
<ref id="ref31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Irani</surname> <given-names>F.</given-names></name> <name><surname>Platek</surname> <given-names>S. M.</given-names></name> <name><surname>Bunce</surname> <given-names>S.</given-names></name> <name><surname>Ruocco</surname> <given-names>A. C.</given-names></name> <name><surname>Chute</surname> <given-names>D.</given-names></name></person-group> (<year>2007</year>). <article-title>Functional near infrared spectroscopy (fNIRS): an emerging neuroimaging technology with important applications for the study of brain disorders</article-title>. <source>Clin. Neuropsychol.</source> <volume>21</volume>, <fpage>9</fpage>&#x2013;<lpage>37</lpage>. doi: <pub-id pub-id-type="doi">10.1080/13854040600910018</pub-id></citation></ref>
<ref id="ref32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kamran</surname> <given-names>M. A.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name></person-group> (<year>2013</year>). <article-title>Linear parameter-varying model and adaptive filtering technique for detecting neuronal activities: an fNIRS study</article-title>. <source>J. Neural Eng.</source> <volume>10</volume>:<fpage>056002</fpage>. doi: <pub-id pub-id-type="doi">10.1088/1741-2560/10/5/056002</pub-id>, PMID: <pub-id pub-id-type="pmid">23893789</pub-id></citation></ref>
<ref id="ref33"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Khan</surname> <given-names>M. J.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name> <name><surname>Bhutta</surname> <given-names>M. R.</given-names></name> <name><surname>Naseer</surname> <given-names>N.</given-names></name></person-group> (<year>2014</year>). "fNIRS based dual movement control command generation using prefrontal brain activity", in: 2014 international conference on robotics and emerging allied Technologies in Engineering (iCREATE): IEEE, 244&#x2013;248.</citation></ref>
<ref id="ref35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kleinmuntz</surname> <given-names>B.</given-names></name> <name><surname>Szucko</surname> <given-names>J. J.</given-names></name></person-group> (<year>1984</year>). <article-title>Lie detection in ancient and modern times: a call for contemporary scientific study</article-title>. <source>Am. Psychol.</source> <volume>39</volume>, <fpage>766</fpage>&#x2013;<lpage>776</lpage>. doi: <pub-id pub-id-type="doi">10.1037/0003-066X.39.7.766</pub-id>, PMID: <pub-id pub-id-type="pmid">6465664</pub-id></citation></ref>
<ref id="ref36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kong</surname> <given-names>F.</given-names></name> <name><surname>Zhao</surname> <given-names>J.</given-names></name> <name><surname>You</surname> <given-names>X.</given-names></name></person-group> (<year>2012</year>). <article-title>Emotional intelligence and life satisfaction in Chinese university students: the mediating role of self-esteem and social support</article-title>. <source>Personal. Individ. Differ.</source> <volume>53</volume>, <fpage>1039</fpage>&#x2013;<lpage>1043</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.paid.2012.07.032</pub-id></citation></ref>
<ref id="ref37"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Krizhevsky</surname> <given-names>A.</given-names></name> <name><surname>Sutskever</surname> <given-names>I.</given-names></name> <name><surname>Hinton</surname> <given-names>G.E.</given-names></name></person-group> (<year>2012</year>). Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 25.</citation></ref>
<ref id="ref38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>J.</given-names></name> <name><surname>Liu</surname> <given-names>X.</given-names></name> <name><surname>Yin</surname> <given-names>X.</given-names></name> <name><surname>Li</surname> <given-names>S.</given-names></name> <name><surname>Wang</surname> <given-names>G.</given-names></name> <name><surname>Niu</surname> <given-names>X.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Transcranial direct current stimulation altered voluntary cooperative norms compliance under equal decision-making power</article-title>. <source>Front. Hum. Neurosci.</source> <volume>12</volume>:<fpage>265</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2018.00265</pub-id>, PMID: <pub-id pub-id-type="pmid">30018541</pub-id></citation></ref>
<ref id="ref39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lotte</surname> <given-names>F.</given-names></name> <name><surname>Congedo</surname> <given-names>M.</given-names></name> <name><surname>L&#x00E9;cuyer</surname> <given-names>A.</given-names></name> <name><surname>Lamarche</surname> <given-names>F.</given-names></name> <name><surname>Arnaldi</surname> <given-names>B.</given-names></name></person-group> (<year>2007</year>). <article-title>A review of classification algorithms for EEG-based brain&#x2013;computer interfaces</article-title>. <source>J. Neural Eng.</source> <volume>4</volume>, <fpage>R1</fpage>&#x2013;<lpage>R13</lpage>. doi: <pub-id pub-id-type="doi">10.1088/1741-2560/4/2/R01</pub-id>, PMID: <pub-id pub-id-type="pmid">17409472</pub-id></citation></ref>
<ref id="ref40"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Meijer</surname> <given-names>E. H.</given-names></name> <name><surname>Ben-Shakhar</surname> <given-names>G.</given-names></name> <name><surname>Verschuere</surname> <given-names>B.</given-names></name> <name><surname>Donchin</surname> <given-names>E.</given-names></name></person-group> (<year>2013</year>). <article-title>A comment on Farwell (2012): brain fingerprinting: a comprehensive tutorial review of detection of concealed information with event-related brain potentials</article-title>. <source>Cogn. Neurodyn.</source> <volume>7</volume>, <fpage>155</fpage>&#x2013;<lpage>158</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11571-012-9217-x</pub-id>, PMID: <pub-id pub-id-type="pmid">23493984</pub-id></citation></ref>
<ref id="ref41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Muehlemann</surname> <given-names>T.</given-names></name> <name><surname>Haensse</surname> <given-names>D.</given-names></name> <name><surname>Wolf</surname> <given-names>M.</given-names></name></person-group> (<year>2008</year>). <article-title>Wireless miniaturized in-vivo near infrared imaging</article-title>. <source>Opt. Express</source> <volume>16</volume>, <fpage>10323</fpage>&#x2013;<lpage>10330</lpage>. doi: <pub-id pub-id-type="doi">10.1364/OE.16.010323</pub-id>, PMID: <pub-id pub-id-type="pmid">18607442</pub-id></citation></ref>
<ref id="ref42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Naseer</surname> <given-names>N.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name></person-group> (<year>2013a</year>). <article-title>Classification of functional near-infrared spectroscopy signals corresponding to the right-and left-wrist motor imagery for development of a brain&#x2013;computer interface</article-title>. <source>Neurosci. Lett.</source> <volume>553</volume>, <fpage>84</fpage>&#x2013;<lpage>89</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neulet.2013.08.021</pub-id></citation></ref>
<ref id="ref43"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Naseer</surname> <given-names>N.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name></person-group> (<year>2013b</year>). "Discrimination of right-and left-wrist motor imagery using fNIRS: towards control of a ball-on-a-beam system", in: Proceedings of the 6th international IEEE engineering in medicine and biology society (IEEE EMBS) conference on neural engineering), 703&#x2013;706.</citation></ref>
<ref id="ref44"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Naseer</surname> <given-names>N.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name> <name><surname>Bhutta</surname> <given-names>M. R.</given-names></name> <name><surname>Khan</surname> <given-names>M. J.</given-names></name></person-group> (<year>2014</year>). "Improving classification accuracy of covert yes/no response decoding using support vector machines: an fNIRS study", in: 2014 international conference on robotics and emerging allied Technologies in Engineering (iCREATE): IEEE, 6&#x2013;9.</citation></ref>
<ref id="ref45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Obrig</surname> <given-names>H.</given-names></name> <name><surname>Villringer</surname> <given-names>A.</given-names></name></person-group> (<year>2003</year>). <article-title>Beyond the visible&#x2014;imaging the human brain with light</article-title>. <source>J. Cereb. Blood Flow Metab.</source> <volume>23</volume>, <fpage>1</fpage>&#x2013;<lpage>18</lpage>. doi: <pub-id pub-id-type="doi">10.1097/01.WCB.0000043472.45775.29</pub-id>, PMID: <pub-id pub-id-type="pmid">12500086</pub-id></citation></ref>
<ref id="ref46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Peterman</surname> <given-names>R. M.</given-names></name> <name><surname>Anderson</surname> <given-names>J. L.</given-names></name></person-group> (<year>1999</year>). <article-title>Decision analysis: a method for taking uncertainties into account in risk-based decision making</article-title>. <source>Hum. Ecol. Risk Assess. Int. J.</source> <volume>5</volume>, <fpage>231</fpage>&#x2013;<lpage>244</lpage>. doi: <pub-id pub-id-type="doi">10.1080/10807039991289383</pub-id></citation></ref>
<ref id="ref47"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Quaresima</surname> <given-names>V.</given-names></name> <name><surname>Ferrari</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Functional near-infrared spectroscopy (fNIRS) for assessing cerebral cortex function during human behavior in natural/social situations: a concise review</article-title>. <source>Organ. Res. Methods</source> <volume>22</volume>, <fpage>46</fpage>&#x2013;<lpage>68</lpage>. doi: <pub-id pub-id-type="doi">10.1177/1094428116658959</pub-id></citation></ref>
<ref id="ref48"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Ronneberger</surname> <given-names>O.</given-names></name> <name><surname>Fischer</surname> <given-names>P.</given-names></name> <name><surname>Brox</surname> <given-names>T.</given-names></name></person-group> (<year>2015</year>). "U-net: convolutional networks for biomedical image segmentation", in: Medical image computing and computer-assisted intervention&#x2013;MICCAI 2015: 18th international conference, Munich, Germany, October 5&#x2013;9, 2015, proceedings, part III 18: Springer, 234&#x2013;241.</citation></ref>
<ref id="ref49"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ruotsalo</surname> <given-names>T.</given-names></name> <name><surname>M&#x00E4;kel&#x00E4;</surname> <given-names>K.</given-names></name> <name><surname>Spap&#x00E9;</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Crowdsourcing affective annotations via fNIRS-BCI</article-title>. <source>IEEE Trans. Affect. Comput.</source>, <fpage>1</fpage>&#x2013;<lpage>12</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TAFFC.2023.3273916</pub-id></citation></ref>
<ref id="ref50"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Santosa</surname> <given-names>H.</given-names></name> <name><surname>Jiyoun Hong</surname> <given-names>M.</given-names></name> <name><surname>Kim</surname> <given-names>S.-P.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name></person-group> (<year>2013</year>). <article-title>Noise reduction in functional near-infrared spectroscopy signals by independent component analysis</article-title>. <source>Rev. Sci. Instrum.</source> <volume>84</volume>:<fpage>073106</fpage>. doi: <pub-id pub-id-type="doi">10.1063/1.4812785</pub-id>, PMID: <pub-id pub-id-type="pmid">23902043</pub-id></citation></ref>
<ref id="ref51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schmidhuber</surname> <given-names>J.</given-names></name></person-group> (<year>2015</year>). <article-title>Deep learning</article-title>. <source>Scholarpedia</source> <volume>10</volume>:<fpage>32832</fpage>. doi: <pub-id pub-id-type="doi">10.4249/scholarpedia.32832</pub-id></citation></ref>
<ref id="ref52"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Simonyan</surname> <given-names>K.</given-names></name> <name><surname>Zisserman</surname> <given-names>A.</given-names></name></person-group> (<year>2014</year>). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.</citation></ref>
<ref id="ref53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Spence</surname> <given-names>S. A.</given-names></name> <name><surname>Hunter</surname> <given-names>M. D.</given-names></name> <name><surname>Farrow</surname> <given-names>T. F.</given-names></name> <name><surname>Green</surname> <given-names>R. D.</given-names></name> <name><surname>Leung</surname> <given-names>D. H.</given-names></name> <name><surname>Hughes</surname> <given-names>C. J.</given-names></name> <etal/></person-group>. (<year>2004</year>). <article-title>A cognitive neurobiological account of deception: evidence from functional neuroimaging</article-title>. <source>Philos. Trans. R Soc. Lond. B Biol. Sci.</source> <volume>359</volume>:<fpage>1755</fpage>. doi: <pub-id pub-id-type="doi">10.1098/rstb.2004.1555</pub-id></citation></ref>
<ref id="ref54"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tian</surname> <given-names>F.</given-names></name> <name><surname>Sharma</surname> <given-names>V.</given-names></name> <name><surname>Kozel</surname> <given-names>F. A.</given-names></name> <name><surname>Liu</surname> <given-names>H.</given-names></name></person-group> (<year>2009</year>). <article-title>Functional near-infrared spectroscopy to investigate hemodynamic responses to deception in the prefrontal cortex</article-title>. <source>Brain Res.</source> <volume>1303</volume>, <fpage>120</fpage>&#x2013;<lpage>130</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.brainres.2009.09.085</pub-id></citation></ref>
<ref id="ref55"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Toglia</surname> <given-names>M. P.</given-names></name> <name><surname>Schmuller</surname> <given-names>J.</given-names></name> <name><surname>Surprenant</surname> <given-names>B. G.</given-names></name> <name><surname>Hooper</surname> <given-names>K. C.</given-names></name> <name><surname>Demeo</surname> <given-names>N. N.</given-names></name> <name><surname>Wallace</surname> <given-names>B. L.</given-names></name></person-group> (<year>2022</year>). <article-title>Novel approaches and cognitive neuroscience perspectives on false memory and deception</article-title>. <source>Front. Psychol.</source> <volume>13</volume>:<fpage>721961</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2022.721961</pub-id>, PMID: <pub-id pub-id-type="pmid">35386904</pub-id></citation></ref>
<ref id="ref56"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Trovillo</surname> <given-names>P. V.</given-names></name></person-group> (<year>1938</year>). <article-title>History of lie detection</article-title>. <source>Am. Inst. Crim. L. Criminol.</source> <volume>29</volume>:<fpage>848</fpage>.</citation></ref>
<ref id="ref57"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Turnip</surname> <given-names>A.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name> <name><surname>Jeong</surname> <given-names>M.-Y.</given-names></name></person-group> (<year>2011</year>). <article-title>Real-time feature extraction of P300 component using adaptive nonlinear principal component analysis</article-title>. <source>Biomed. Eng. Online</source> <volume>10</volume>, <fpage>1</fpage>&#x2013;<lpage>20</lpage>. doi: <pub-id pub-id-type="doi">10.1186/1475-925X-10-83</pub-id></citation></ref>
<ref id="ref58"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Ullah</surname> <given-names>A.</given-names></name> <name><surname>Muhammad</surname> <given-names>K.</given-names></name> <name><surname>Haydarov</surname> <given-names>K.</given-names></name> <name><surname>Haq</surname> <given-names>I. U.</given-names></name> <name><surname>Lee</surname> <given-names>M.</given-names></name> <name><surname>Baik</surname> <given-names>S. W.</given-names></name></person-group> (<year>2020</year>). "One-shot learning for surveillance anomaly recognition using siamese 3d cnn", in: 2020 international joint conference on neural networks (IJCNN): IEEE, 1&#x2013;8.</citation></ref>
<ref id="ref59"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Varisai Mohamed</surname> <given-names>S.</given-names></name> <name><surname>Sung</surname> <given-names>J.-M.</given-names></name> <name><surname>Jeng</surname> <given-names>T.-L.</given-names></name> <name><surname>Wang</surname> <given-names>C.-S.</given-names></name></person-group> (<year>2006</year>). <article-title>Organogenesis of <italic>Phaseolus angularis</italic> L.: high efficiency of adventitious shoot regeneration from etiolated seedlings in the presence of N6-benzylaminopurine and thidiazuron</article-title>. <source>Plant Cell Tissue Organ Cult.</source> <volume>86</volume>, <fpage>187</fpage>&#x2013;<lpage>199</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11240-006-9107-1</pub-id></citation></ref>
<ref id="ref60"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wolf</surname> <given-names>M.</given-names></name> <name><surname>Ferrari</surname> <given-names>M.</given-names></name> <name><surname>Quaresima</surname> <given-names>V.</given-names></name></person-group> (<year>2007</year>). <article-title>Progress of near-infrared spectroscopy and topography for brain and muscle clinical applications</article-title>. <source>J. Biomed. Opt.</source> <volume>12</volume>, <fpage>062104-062104</fpage>&#x2013;<lpage>062104-062114</lpage>. doi: <pub-id pub-id-type="doi">10.1117/1.2804899</pub-id></citation></ref>
<ref id="ref61"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yi</surname> <given-names>W.</given-names></name> <name><surname>Qiu</surname> <given-names>S.</given-names></name> <name><surname>Qi</surname> <given-names>H.</given-names></name> <name><surname>Zhang</surname> <given-names>L.</given-names></name> <name><surname>Wan</surname> <given-names>B.</given-names></name> <name><surname>Ming</surname> <given-names>D.</given-names></name></person-group> (<year>2013</year>). <article-title>EEG feature comparison and classification of simple and compound limb motor imagery</article-title>. <source>J. Neuroeng. Rehabil.</source> <volume>10</volume>, <fpage>1</fpage>&#x2013;<lpage>12</lpage>. doi: <pub-id pub-id-type="doi">10.1186/1743-0003-10-106</pub-id></citation></ref>
<ref id="ref62"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Zuckerman</surname> <given-names>M.</given-names></name> <name><surname>Depaulo</surname> <given-names>B. M.</given-names></name> <name><surname>Rosenthal</surname> <given-names>R.</given-names></name></person-group> (<year>1981a</year>). &#x201C;<article-title>Verbal and nonverbal communication of deception</article-title>&#x201D; in <source>Advances in experimental social psychology</source>. ed. <person-group person-group-type="editor"><name><surname>Berkowitz</surname> <given-names>L.</given-names></name></person-group> (<publisher-loc>Amsterdam</publisher-loc>: <publisher-name>Elsevier</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>59</lpage>.</citation></ref>
<ref id="ref63"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zuckerman</surname> <given-names>M.</given-names></name> <name><surname>Koestner</surname> <given-names>R.</given-names></name> <name><surname>Driver</surname> <given-names>R.</given-names></name></person-group> (<year>1981b</year>). <article-title>Beliefs about cues associated with deception</article-title>. <source>J. Nonverbal Behav.</source> <volume>6</volume>, <fpage>105</fpage>&#x2013;<lpage>114</lpage>. doi: <pub-id pub-id-type="doi">10.1007/BF00987286</pub-id></citation></ref>
</ref-list>
</back>
</article>