{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,15]],"date-time":"2025-12-15T19:10:40Z","timestamp":1765825840291,"version":"3.40.3"},"publisher-location":"Cham","reference-count":33,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030805678"},{"type":"electronic","value":"9783030805685"}],"license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021]]},"DOI":"10.1007\/978-3-030-80568-5_35","type":"book-chapter","created":{"date-parts":[[2021,6,23]],"date-time":"2021-06-23T17:04:53Z","timestamp":1624467893000},"page":"423-435","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Real-Time Multimodal Emotion Classification System in E-Learning Context"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4238-5183","authenticated-orcid":false,"given":"Arijit","family":"Nandi","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6569-5497","authenticated-orcid":false,"given":"Fatos","family":"Xhafa","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8646-5463","authenticated-orcid":false,"given":"Laia","family":"Subirats","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2189-6830","authenticated-orcid":false,"given":"Santi","family":"Fort","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,7,1]]},"reference":[{"key":"35_CR1","doi-asserted-by":"crossref","unstructured":"Ayata, D., Yaslan, Y., Kama\u015fak, M.: Emotion recognition via random forest and galvanic skin response: comparison of time based feature sets, window sizes and wavelet approaches. In: Medical Technologies National Congress, pp. 1\u20134 (2016)","DOI":"10.1109\/TIPTEKNO.2016.7863130"},{"key":"35_CR2","doi-asserted-by":"publisher","first-page":"149","DOI":"10.1007\/s40846-019-00505-7","volume":"40","author":"D Ayata","year":"2020","unstructured":"Ayata, D., Yaslan, Y., Kamasak, M.F.: Emotion recognition from multimodal physiological signals for emotion aware healthcare systems. J. Med. Biol. Eng. 40, 149\u2013157 (2020)","journal-title":"J. Med. Biol. Eng."},{"issue":"3","key":"35_CR3","doi-asserted-by":"publisher","first-page":"590","DOI":"10.1080\/10494820.2014.908927","volume":"24","author":"K Bahreini","year":"2016","unstructured":"Bahreini, K., Nadolski, R., Westera, W.: Towards multimodal emotion recognition in e-learning environments. Interact. Learn. Environ. 24(3), 590\u2013605 (2016)","journal-title":"Interact. Learn. Environ."},{"issue":"2","key":"35_CR4","doi-asserted-by":"publisher","first-page":"423","DOI":"10.1109\/TPAMI.2018.2798607","volume":"41","author":"T Baltru\u0161aitis","year":"2019","unstructured":"Baltru\u0161aitis, T., Ahuja, C., Morency, L.: Multimodal machine learning: a survey and taxonomy. IEEE TPAMI 41(2), 423\u2013443 (2019)","journal-title":"IEEE TPAMI"},{"key":"35_CR5","first-page":"3","volume":"1\u201338","author":"DP Bertsekas","year":"2010","unstructured":"Bertsekas, D.P.: Incremental gradient, subgradient, and proximal methods for convex optimization: a survey. Optim. Mach. Learn. 1\u201338, 3 (2010)","journal-title":"Optim. Mach. Learn."},{"key":"35_CR6","first-page":"1601","volume":"11","author":"A Bifet","year":"2010","unstructured":"Bifet, A., Holmes, G., Kirkby, R., Pfahringer, B.: Moa: massive online analysis. J. Mach. Learn. Res. 11, 1601\u20131604 (2010)","journal-title":"J. Mach. Learn. Res."},{"key":"35_CR7","doi-asserted-by":"publisher","first-page":"220","DOI":"10.18608\/jla.2016.32.11","volume":"3","author":"P Blikstein","year":"2016","unstructured":"Blikstein, P., Worsley, M.: Multimodal learning analytics and education data mining: using computational technologies to measure complex learning tasks. J. Learn. Anal. 3, 220\u2013238 (2016)","journal-title":"J. Learn. Anal."},{"issue":"17","key":"35_CR8","doi-asserted-by":"publisher","first-page":"4723","DOI":"10.3390\/s20174723","volume":"20","author":"P Bota","year":"2020","unstructured":"Bota, P., Wang, C., Fred, A., Silva, H.: Emotion assessment using feature fusion and decision fusion classification based on physiological data: are we there yet? Sensors 20(17), 4723 (2020)","journal-title":"Sensors"},{"key":"35_CR9","doi-asserted-by":"crossref","unstructured":"Candra, H., et al.: Investigation of window size in classification of eeg-emotion signal with wavelet entropy and support vector machine. In: 37th Annual Int\u2019l Conference of the IEEE Engineering in Medicine and Biology Society, pp. 7250\u20137253 (2015)","DOI":"10.1109\/EMBC.2015.7320065"},{"key":"35_CR10","doi-asserted-by":"crossref","unstructured":"Di Mitri, D., Scheffel, M., Drachsler, H., B\u00f6rner, D., Ternier, S., Specht, M.: Learning pulse: a machine learning approach for predicting performance in self-regulated learning using multimodal data, pp. 188\u2013197. ACM (2017)","DOI":"10.1145\/3027385.3027447"},{"issue":"3\u20134","key":"35_CR11","doi-asserted-by":"publisher","first-page":"169","DOI":"10.1080\/02699939208411068","volume":"6","author":"P Ekman","year":"1992","unstructured":"Ekman, P.: An argument for basic emotions. Cogn. Emot. 6(3\u20134), 169\u2013200 (1992)","journal-title":"Cogn. Emot."},{"issue":"6","key":"35_CR12","doi-asserted-by":"publisher","first-page":"824","DOI":"10.1016\/j.tele.2016.08.007","volume":"34","author":"AR Faria","year":"2017","unstructured":"Faria, A.R., Almeida, A., Martins, C., Gon\u00e7alves, R., Martins, J., Branco, F.: A global perspective on an emotional learning model proposal. Telem. Inf. 34(6), 824\u2013837 (2017)","journal-title":"Telem. Inf."},{"issue":"1","key":"35_CR13","doi-asserted-by":"publisher","first-page":"23","DOI":"10.1016\/j.ijme.2014.12.001","volume":"13","author":"D Finch","year":"2015","unstructured":"Finch, D., Peacock, M., Lazdowski, D., Hwang, M.: Managing emotions: a case study exploring the relationship between experiential learning, emotions, and student performance. Int. J. Manag. Educ. 13(1), 23\u201336 (2015)","journal-title":"Int. J. Manag. Educ."},{"issue":"2","key":"35_CR14","doi-asserted-by":"publisher","first-page":"90","DOI":"10.1109\/MSP.2006.1621452","volume":"23","author":"A Hanjalic","year":"2006","unstructured":"Hanjalic, A.: Extracting moods from pictures and sounds: towards truly personalized tv. IEEE Signal Process. Mag. 23(2), 90\u2013100 (2006)","journal-title":"IEEE Signal Process. Mag."},{"key":"35_CR15","doi-asserted-by":"crossref","unstructured":"Hayes, T.L., Kanan, C.: Lifelong machine learning with deep streaming linear discriminant analysis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops (2020)","DOI":"10.1109\/CVPRW50498.2020.00118"},{"key":"35_CR16","doi-asserted-by":"publisher","first-page":"3265","DOI":"10.1109\/ACCESS.2019.2962085","volume":"8","author":"H Huang","year":"2020","unstructured":"Huang, H., Hu, Z., Wang, W., Wu, M.: Multimodal emotion recognition based on ensemble convolutional neural network. IEEE Access 8, 3265\u20133271 (2020)","journal-title":"IEEE Access"},{"key":"35_CR17","doi-asserted-by":"crossref","unstructured":"Islam, M.R., Ahmad, M.: Wavelet analysis based classification of emotion from eeg signal. In: International Conference on Electrical, Computer and Communication Engineering, pp. 1\u20136 (2019)","DOI":"10.1109\/ECACE.2019.8679156"},{"issue":"6","key":"35_CR18","doi-asserted-by":"publisher","first-page":"618","DOI":"10.1111\/jcal.12158","volume":"32","author":"L Kn\u00f6rzer","year":"2016","unstructured":"Kn\u00f6rzer, L., Br\u00fcnken, R., Park, B.: Emotions and multimedia learning: the moderating role of learner characteristics. J. Comp. Assist. Learn. 32(6), 618\u2013631 (2016)","journal-title":"J. Comp. Assist. Learn."},{"issue":"1","key":"35_CR19","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/T-AFFC.2011.15","volume":"3","author":"S Koelstra","year":"2012","unstructured":"Koelstra, S., et al.: Deap: a database for emotion analysis;using physiological signals. IEEE Trans. Affect. Comput. 3(1), 18\u201331 (2012)","journal-title":"IEEE Trans. Affect. Comput."},{"issue":"4","key":"35_CR20","doi-asserted-by":"publisher","first-page":"494","DOI":"10.1177\/0956797616687364","volume":"28","author":"DH Lee","year":"2017","unstructured":"Lee, D.H., Anderson, A.K.: Reading what the mind thinks from how the eye sees. Psychol. Sci. 28(4), 494\u2013503 (2017)","journal-title":"Psychol. Sci."},{"key":"35_CR21","doi-asserted-by":"crossref","unstructured":"Liu, W., Zheng, W., Lu, B.: Multimodal emotion recognition using multimodal deep learning. CoRR abs\/1602.08225 (2016)","DOI":"10.1007\/978-3-319-46672-9_58"},{"key":"35_CR22","unstructured":"Mitri, D.D., Schneider, J., Specht, M., Drachsler, H.: The big five: addressing recurrent multimodal learning data challenges, vol. 2163. CrossMML (2018)"},{"key":"35_CR23","doi-asserted-by":"crossref","unstructured":"Nandi, A., Xhafa, F., Subirats, L., Fort, S.: A survey on multimodal data stream mining for e-learner\u2019s emotion recognition. In: 2020 International Conference on Omni-layer Intelligent Systems (COINS), pp. 1\u20136 (2020)","DOI":"10.1109\/COINS49042.2020.9191370"},{"issue":"5","key":"35_CR24","doi-asserted-by":"publisher","first-page":"1589","DOI":"10.3390\/s21051589","volume":"21","author":"A Nandi","year":"2021","unstructured":"Nandi, A., Xhafa, F., Subirats, L., Fort, S.: Real-time emotion classification using eeg data stream in e-learning contexts. Sensors 21(5), 1589 (2021)","journal-title":"Sensors"},{"issue":"2","key":"35_CR25","doi-asserted-by":"publisher","first-page":"193","DOI":"10.1111\/jcal.12232","volume":"34","author":"L Prieto","year":"2018","unstructured":"Prieto, L., Sharma, K., Kidzinski, L., Rodr\u00edguez-Triana, M., Dillenbourg, P.: Multimodal teaching analytics: automated extraction of orchestration graphs from wearable sensor data. J. Comput. Assist. Learn. 34(2), 193\u2013203 (2018)","journal-title":"J. Comput. Assist. Learn."},{"issue":"2","key":"35_CR26","doi-asserted-by":"publisher","first-page":"373","DOI":"10.1109\/TNN.2006.885439","volume":"18","author":"A Savran","year":"2007","unstructured":"Savran, A.: Multifeedback-layer neural network. IEEE Trans. Neural Netw. 18(2), 373\u2013384 (2007)","journal-title":"IEEE Trans. Neural Netw."},{"issue":"2","key":"35_CR27","doi-asserted-by":"publisher","first-page":"81","DOI":"10.1037\/h0054570","volume":"61","author":"H Schlosberg","year":"1954","unstructured":"Schlosberg, H.: Three dimensions of emotion. Psych. Rev. 61(2), 81\u201388 (1954)","journal-title":"Psych. Rev."},{"key":"35_CR28","doi-asserted-by":"crossref","unstructured":"Smith, L.N.: Cyclical learning rates for training neural networks. In: IEEE Winter Conference on Applications of Computer Vision, pp. 464\u2013472 (2017)","DOI":"10.1109\/WACV.2017.58"},{"issue":"4","key":"35_CR29","doi-asserted-by":"publisher","first-page":"1084","DOI":"10.1016\/j.eswa.2006.02.005","volume":"32","author":"A Subasi","year":"2007","unstructured":"Subasi, A.: Eeg signal classification using wavelet feature extraction and a mixture of expert model. Expert Syst. Appl. 32(4), 1084\u20131093 (2007)","journal-title":"Expert Syst. Appl."},{"key":"35_CR30","doi-asserted-by":"publisher","first-page":"93","DOI":"10.1016\/j.cmpb.2016.12.005","volume":"140","author":"Z Yin","year":"2017","unstructured":"Yin, Z., Zhao, M., Wang, Y., Yang, J., Zhang, J.: Recognition of emotions using multimodal physiological signals and an ensemble deep learning model. Comput. Methods Prog. Biomed. 140, 93\u2013110 (2017)","journal-title":"Comput. Methods Prog. Biomed."},{"key":"35_CR31","doi-asserted-by":"publisher","first-page":"103","DOI":"10.1016\/j.inffus.2020.01.011","volume":"59","author":"J Zhang","year":"2020","unstructured":"Zhang, J., Yin, Z., Chen, P., Nichele, S.: Emotion recognition using multi-modal data and machine learning techniques: a tutorial and review. Inf. Fus. 59, 103\u2013126 (2020)","journal-title":"Inf. Fus."},{"key":"35_CR32","doi-asserted-by":"publisher","first-page":"7943","DOI":"10.1109\/ACCESS.2021.3049516","volume":"9","author":"Y Zhang","year":"2021","unstructured":"Zhang, Y., Cheng, C., Zhang, Y.: Multimodal emotion recognition using a hierarchical fusion convolutional neural network. IEEE Access 9, 7943\u20137951 (2021)","journal-title":"IEEE Access"},{"issue":"3","key":"35_CR33","doi-asserted-by":"publisher","first-page":"1110","DOI":"10.1109\/TCYB.2018.2797176","volume":"49","author":"W Zheng","year":"2019","unstructured":"Zheng, W., Liu, W., Lu, Y., Lu, B., Cichocki, A.: Emotionmeter: a multimodal framework for recognizing human emotions. IEEE Trans. Cybern. 49(3), 1110\u20131122 (2019)","journal-title":"IEEE Trans. Cybern."}],"container-title":["Proceedings of the International Neural Networks Society","Proceedings of the 22nd Engineering Applications of Neural Networks Conference"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-80568-5_35","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T07:54:07Z","timestamp":1656402847000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-80568-5_35"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"ISBN":["9783030805678","9783030805685"],"references-count":33,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-80568-5_35","relation":{},"ISSN":["2661-8141","2661-815X"],"issn-type":[{"type":"print","value":"2661-8141"},{"type":"electronic","value":"2661-815X"}],"subject":[],"published":{"date-parts":[[2021]]},"assertion":[{"value":"1 July 2021","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"EANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Engineering Applications of Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Crete","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2021","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25 June 2021","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 June 2021","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eann2021","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.eann2021.eu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}