@ARTICLE{10.3389/fnhum.2021.621493, AUTHOR={Li, Ruixin and Liang, Yan and Liu, Xiaojian and Wang, Bingbing and Huang, Wenxin and Cai, Zhaoxin and Ye, Yaoguang and Qiu, Lina and Pan, Jiahui}, TITLE={MindLink-Eumpy: An Open-Source Python Toolbox for Multimodal Emotion Recognition}, JOURNAL={Frontiers in Human Neuroscience}, VOLUME={15}, YEAR={2021}, URL={https://www.frontiersin.org/articles/10.3389/fnhum.2021.621493}, DOI={10.3389/fnhum.2021.621493}, ISSN={1662-5161}, ABSTRACT={Emotion recognition plays an important role in intelligent human–computer interaction, but the related research still faces the problems of low accuracy and subject dependence. In this paper, an open-source software toolbox called MindLink-Eumpy is developed to recognize emotions by integrating electroencephalogram (EEG) and facial expression information. MindLink-Eumpy first applies a series of tools to automatically obtain physiological data from subjects and then analyzes the obtained facial expression data and EEG data, respectively, and finally fuses the two different signals at a decision level. In the detection of facial expressions, the algorithm used by MindLink-Eumpy is a multitask convolutional neural network (CNN) based on transfer learning technique. In the detection of EEG, MindLink-Eumpy provides two algorithms, including a subject-dependent model based on support vector machine (SVM) and a subject-independent model based on long short-term memory network (LSTM). In the decision-level fusion, weight enumerator and AdaBoost technique are applied to combine the predictions of SVM and CNN. We conducted two offline experiments on the Database for Emotion Analysis Using Physiological Signals (DEAP) dataset and the Multimodal Database for Affect Recognition and Implicit Tagging (MAHNOB-HCI) dataset, respectively, and conducted an online experiment on 15 healthy subjects. The results show that multimodal methods outperform single-modal methods in both offline and online experiments. In the subject-dependent condition, the multimodal method achieved an accuracy of 71.00% in the valence dimension and an accuracy of 72.14% in the arousal dimension. In the subject-independent condition, the LSTM-based method achieved an accuracy of 78.56% in the valence dimension and an accuracy of 77.22% in the arousal dimension. The feasibility and efficiency of MindLink-Eumpy for emotion recognition is thus demonstrated.} }