@ARTICLE{10.3389/fnins.2021.619591, AUTHOR={Ovchinnikova, Anastasia O. and Vasilyev, Anatoly N. and Zubarev, Ivan P. and Kozyrskiy, Bogdan L. and Shishkin, Sergei L.}, TITLE={MEG-Based Detection of Voluntary Eye Fixations Used to Control a Computer}, JOURNAL={Frontiers in Neuroscience}, VOLUME={15}, YEAR={2021}, URL={https://www.frontiersin.org/articles/10.3389/fnins.2021.619591}, DOI={10.3389/fnins.2021.619591}, ISSN={1662-453X}, ABSTRACT={Gaze-based input is an efficient way of hand-free human-computer interaction. However, it suffers from the inability of gaze-based interfaces to discriminate voluntary and spontaneous gaze behaviors, which are overtly similar. Here, we demonstrate that voluntary eye fixations can be discriminated from spontaneous ones using short segments of magnetoencephalography (MEG) data measured immediately after the fixation onset. Recently proposed convolutional neural networks (CNNs), linear finite impulse response filters CNN (LF-CNN) and vector autoregressive CNN (VAR-CNN), were applied for binary classification of the MEG signals related to spontaneous and voluntary eye fixations collected in healthy participants (n = 25) who performed a game-like task by fixating on targets voluntarily for 500 ms or longer. Voluntary fixations were identified as those followed by a fixation in a special confirmatory area. Spontaneous vs. voluntary fixation-related single-trial 700 ms MEG segments were non-randomly classified in the majority of participants, with the group average cross-validated ROC AUC of 0.66 ± 0.07 for LF-CNN and 0.67 ± 0.07 for VAR-CNN (M ± SD). When the time interval, from which the MEG data were taken, was extended beyond the onset of the visual feedback, the group average classification performance increased up to 0.91. Analysis of spatial patterns contributing to classification did not reveal signs of significant eye movement impact on the classification results. We conclude that the classification of MEG signals has a certain potential to support gaze-based interfaces by avoiding false responses to spontaneous eye fixations on a single-trial basis. Current results for intention detection prior to gaze-based interface’s feedback, however, are not sufficient for online single-trial eye fixation classification using MEG data alone, and further work is needed to find out if it could be used in practical applications.} }