{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,2]],"date-time":"2026-05-02T15:02:26Z","timestamp":1777734146934,"version":"3.51.4"},"publisher-location":"Cham","reference-count":63,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031880445","type":"print"},{"value":"9783031880452","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-88045-2_5","type":"book-chapter","created":{"date-parts":[[2025,4,5]],"date-time":"2025-04-05T15:14:32Z","timestamp":1743866072000},"page":"66-83","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Can Machine Learning Models Recognise Emotions, Particularly Neutral, Better Than Humans?"],"prefix":"10.1007","author":[{"given":"Jeffin","family":"Siby","sequence":"first","affiliation":[]},{"given":"Effie Lai-Chong","family":"Law","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,4,4]]},"reference":[{"key":"5_CR1","doi-asserted-by":"crossref","unstructured":"Ahuja, K.: Emotion AI in healthcare: application, challenges, and future directions. In: Emotional AI and Human-AI Interactions in Social Networking, pp. 131\u2013146. Academic Press, Cambridge (2024)","DOI":"10.1016\/B978-0-443-19096-4.00011-0"},{"key":"5_CR2","doi-asserted-by":"publisher","first-page":"56","DOI":"10.1016\/j.specom.2019.12.001","volume":"116","author":"MB Ak\u00e7ay","year":"2020","unstructured":"Ak\u00e7ay, M.B., \u014eguz, K.: Speech emotion recognition: emotional models, databases, features, preprocessing methods, supporting modalities, and classifiers. Speech Commun. 116, 56\u201376 (2020)","journal-title":"Speech Commun."},{"issue":"16","key":"5_CR3","doi-asserted-by":"publisher","first-page":"5941","DOI":"10.3390\/s22165941","volume":"22","author":"BT Atmaja","year":"2022","unstructured":"Atmaja, B.T., Sasou, A.: Effects of data augmentations on speech emotion recognition. Sensors 22(16), 5941 (2022)","journal-title":"Sensors"},{"key":"5_CR4","first-page":"20284","volume":"10","author":"B Azari","year":"2020","unstructured":"Azari, B., Westlin, C., Satpute, A.B.: Comparing supervised and unsupervised approaches to emotion categorization in the human brain, body, and subjective experience. Sci. Rep. Nat. 10, 20284 (2020)","journal-title":"Sci. Rep. Nat."},{"key":"5_CR5","doi-asserted-by":"crossref","unstructured":"Baltrusaitis, T., Zadeh, A., Lim, Y.C., Morency, L.-P.: Openface 2.0: facial behavior analysis toolkit. In: 2018 13th IEEE international Conference on Automatic Face & Gesture Recognition (FG 2018), pp. 59\u201366. IEEE (2018)","DOI":"10.1109\/FG.2018.00019"},{"issue":"5","key":"5_CR6","doi-asserted-by":"publisher","first-page":"1161","DOI":"10.1037\/a0025827","volume":"12","author":"T B\u00e4nziger","year":"2012","unstructured":"B\u00e4nziger, T., Mortillaro, M., Scherer, K.R.: Introducing the Geneva multimodal expression corpus for experimental research on emotion perception. Emotion 12(5), 1161\u20131179 (2012)","journal-title":"Emotion"},{"key":"5_CR7","doi-asserted-by":"crossref","unstructured":"B\u00e4nziger, T., Grandjean, D., Scherer, K.R.: Emotion recognition from expressions in face, voice, and body: the multimodal emotion recognition test (MERT). Emotion 9, 691\u2013704 (2009)","DOI":"10.1037\/a0017088"},{"key":"5_CR8","unstructured":"Binns, R.: Fairness in machine learning: lessons from political philosophy. In: Conference on Fairness, Accountability and Transparency, pp. 149\u2013159. PMLR, January 2018"},{"key":"5_CR9","doi-asserted-by":"crossref","unstructured":"Busso, C., et al.: IEMOCAP: interactive emotional dyadic motion capture database. Lang. Resour. Eval. 42, 335\u2013359 (2008)","DOI":"10.1007\/s10579-008-9076-6"},{"issue":"4","key":"5_CR10","doi-asserted-by":"publisher","first-page":"377","DOI":"10.1109\/TAFFC.2014.2336244","volume":"5","author":"H Cao","year":"2014","unstructured":"Cao, H., Cooper, D.G., Keutmann, M.K., Gur, R.C., Nenkova, A., Verma, R.: CREMA-D: crowd-sourced emotional multimodal actors dataset. IEEE Trans. Affect. Comput. 5(4), 377\u2013390 (2014)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"5_CR11","doi-asserted-by":"publisher","unstructured":"Carbonell, F.M., Boman, M., Laukka P.: Comparing supervised and unsupervised approaches to multimodal emotion recognition. PeerJ Comput. Sci. 7, e804 (2021). https:\/\/doi.org\/10.7717\/peerj-cs.804","DOI":"10.7717\/peerj-cs.804"},{"key":"5_CR12","doi-asserted-by":"publisher","first-page":"321","DOI":"10.1613\/jair.953","volume":"16","author":"NV Chawla","year":"2002","unstructured":"Chawla, N.V., Bowyer, K.W., Hall, L.O., Kegelmeyer, W.P.: SMOTE: synthetic minority over-sampling technique. J. Artif. Intell. Res. 16, 321\u2013357 (2002)","journal-title":"J. Artif. Intell. Res."},{"key":"5_CR13","doi-asserted-by":"publisher","unstructured":"Douglas-Cowie, E., et al.: The HUMAINE database. In: Cowie, R., Pelachaud, C., Petta, P. (eds.) Emotion-Oriented Systems. Cognitive Technologies, pp. 243\u2013284. Springer, Heidelberg (2011). https:\/\/doi.org\/10.1007\/978-3-642-15184-2_14","DOI":"10.1007\/978-3-642-15184-2_14"},{"key":"5_CR14","unstructured":"Ekman, P.: Universals and cultural differences in facial expressions of emotion. In: Nebraska Symposium on Motivation. University of Nebraska Press (1971)"},{"key":"5_CR15","doi-asserted-by":"crossref","unstructured":"Eyben, F., W\u00f6llmer, M., Schuller, B.: Opensmile: the Munich versatile and fast open-source audio feature extractor. In: Proceedings of the 18th ACM International Conference on Multimedia, pp. 1459\u20131462 (2010)","DOI":"10.1145\/1873951.1874246"},{"key":"5_CR16","doi-asserted-by":"crossref","unstructured":"Eyben, F., et al.: The Geneva minimalistic acoustic parameter set (GeMAPS) for voice research and affective computing. IEEE Trans. Affect. Comput. 7(2), 190\u2013202 (2015)","DOI":"10.1109\/TAFFC.2015.2457417"},{"key":"5_CR17","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.101847","volume":"99","author":"K Ezzameli","year":"2023","unstructured":"Ezzameli, K., Mahersia, H.: Emotion recognition from unimodal to multimodal analysis: a review. Inf. Fus. 99, 101847 (2023)","journal-title":"Inf. Fus."},{"issue":"1","key":"5_CR18","doi-asserted-by":"publisher","first-page":"103","DOI":"10.1007\/s11031-020-09861-3","volume":"45","author":"K Gasper","year":"2021","unstructured":"Gasper, K., Danube, C.I., Hu, D.: Making room for neutral affect: evidence indicating that neutral affect is independent of and co-occurs with eight affective states. Motiv. Emot. 45(1), 103\u2013121 (2021)","journal-title":"Motiv. Emot."},{"issue":"3","key":"5_CR19","doi-asserted-by":"publisher","first-page":"458","DOI":"10.1007\/s11031-014-9457-7","volume":"39","author":"K Gasper","year":"2015","unstructured":"Gasper, K., Hackenbracht, J.: Too busy to feel neutral: reducing cognitive resources attenuates neutral affective states. Motiv. Emot. 39(3), 458\u2013466 (2015)","journal-title":"Motiv. Emot."},{"issue":"3","key":"5_CR20","doi-asserted-by":"publisher","first-page":"385","DOI":"10.1177\/0146167216629131","volume":"42","author":"K Gasper","year":"2016","unstructured":"Gasper, K., Danube, C.I.: The scope of our affective influences: when and how naturally occurring positive, negative, and neutral affects alter judgment. Pers. Soc. Psychol. Bull. 42(3), 385\u2013399 (2016)","journal-title":"Pers. Soc. Psychol. Bull."},{"key":"5_CR21","doi-asserted-by":"publisher","first-page":"2476","DOI":"10.3389\/fpsyg.2019.02476","volume":"10","author":"K Gasper","year":"2019","unstructured":"Gasper, K., Spencer, I.A., Hu, D.: Does neutral affect exist? How challenging three beliefs about neutral affect can advance affective research. Front. Psychol. 10, 2476 (2019)","journal-title":"Front. Psychol."},{"key":"5_CR22","doi-asserted-by":"publisher","first-page":"458","DOI":"10.1007\/s42761-023-00214-0","volume":"4","author":"K Gasper","year":"2023","unstructured":"Gasper, K.: A case for neutrality: Why neutral affect is critical for advancing affetive science. Affect. Sci. 4, 458\u2013462 (2023)","journal-title":"Affect. Sci."},{"key":"5_CR23","doi-asserted-by":"crossref","unstructured":"Gladys, A.A., Vetriselvi, V.: Survey on multimodal approaches to emotion recognition. Neurocomputing, 126693 (2023)","DOI":"10.1016\/j.neucom.2023.126693"},{"key":"5_CR24","doi-asserted-by":"publisher","first-page":"1197","DOI":"10.1007\/s11192-020-03614-2","volume":"125","author":"YC Goh","year":"2020","unstructured":"Goh, Y.C., Cai, X.Q., Theseira, W., Ko, G., Khor, K.A.: Evaluating human versus machine learning performance in classifying research abstracts. Scientometrics 125, 1197\u20131212 (2020)","journal-title":"Scientometrics"},{"key":"5_CR25","unstructured":"Graves, A.: Generating sequences with recurrent neural networks. CoRR, vol. abs\/1308.0850, http:\/\/arxiv.org\/abs\/1308.0850 (2013)"},{"key":"5_CR26","doi-asserted-by":"crossref","unstructured":"Grimm, M., Kroschel, K., Narayanan, S.: The Vera Am Mittag German audio-visual emotional speech database. In: IEEE International Conference on Multimedia and Expo, pp. 865\u2013868 (2008)","DOI":"10.1109\/ICME.2008.4607572"},{"issue":"1","key":"5_CR27","first-page":"11","volume":"7","author":"ASA Hans","year":"2021","unstructured":"Hans, A.S.A., Rao, S.: A CNN-LSTM based deep neural networks for facial emotion detection in videos. Int. J. Adv. Signal Image Sci. 7(1), 11\u201320 (2021)","journal-title":"Int. J. Adv. Signal Image Sci."},{"key":"5_CR28","doi-asserted-by":"crossref","unstructured":"Kalateh, S., Estrada-Jimenez, L.A., Hojjati, S.N., Barata, J.: A systematic review on multimodal emotion recognition: building blocks, current state, applications, and challenges. IEEE Access (2024)","DOI":"10.1109\/ACCESS.2024.3430850"},{"key":"5_CR29","unstructured":"Khaireddin, Y., Chen, Z.: Facial emotion recognition: state of the art performance on FER2013. arXiv preprint arXiv:2105.03588 (2021)"},{"key":"5_CR30","doi-asserted-by":"publisher","unstructured":"Ko, B.: A brief review of facial emotion recognition based on visual information. Sensors 18(2), 401 (2018). https:\/\/doi.org\/10.3390\/s18020401","DOI":"10.3390\/s18020401"},{"key":"5_CR31","unstructured":"Kollias, D., Zafeiriou, S.: Aff-wild2: Extending the Aff-Wild database for affect recognition. CoRR, vol. abs\/1811.07770. http:\/\/arxiv.org\/abs\/1811.07770 (2018)"},{"key":"5_CR32","doi-asserted-by":"crossref","unstructured":"Kossaifi, J., et al.: SEWA DB: a rich database for audio-visual emotion and sentiment research in the wild. IEEE Trans. Pattern Anal. Mach. Intell. 43(3), 1022\u20131040 (2019)","DOI":"10.1109\/TPAMI.2019.2944808"},{"issue":"11","key":"5_CR33","doi-asserted-by":"publisher","first-page":"1072","DOI":"10.1080\/0144929X.2020.1741684","volume":"40","author":"EL-C Law","year":"2021","unstructured":"Law, E.L.-C., Soleimani, S., Watkins, D., Barwick, J.: Automatic voice emotion recognition of child-parent conversations in natural settings. Behav. Inf. Technol. 40(11), 1072\u20131089 (2021)","journal-title":"Behav. Inf. Technol."},{"issue":"3","key":"5_CR34","doi-asserted-by":"publisher","first-page":"303","DOI":"10.1016\/0098-3004(93)90090-R","volume":"19","author":"A Ma\u0107kiewicz","year":"1993","unstructured":"Ma\u0107kiewicz, A., Ratajczak, W.: Principal components analysis (PCA). Comput. Geosci. 19(3), 303\u2013342 (1993)","journal-title":"Comput. Geosci."},{"issue":"3","key":"5_CR35","doi-asserted-by":"publisher","first-page":"579","DOI":"10.1109\/TAFFC.2019.2955949","volume":"12","author":"DC Ong","year":"2019","unstructured":"Ong, D.C., et al.: Modeling emotion in complex stories: the stanford emotional narratives dataset. IEEE Trans. Affect. Comput. 12(3), 579\u2013594 (2019)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"5_CR36","unstructured":"\u00d6zseven: The acoustic cue of fear: investigation of acoustic parameters of speech containing fear. Arch. Acoust. 43(2), 245\u2013251 (2018)"},{"key":"5_CR37","doi-asserted-by":"crossref","unstructured":"\u00d6zseven, T.: A novel feature selection method for speech emotion recognition. Appl. Acoust. 146, 320\u2013326 (2019). https:\/\/www.sciencedirect.com\/science\/article\/pii\/S0003682X18309915","DOI":"10.1016\/j.apacoust.2018.11.028"},{"key":"5_CR38","unstructured":"Paszke, A., et al.: Pytorch: an imperative style, high-performance deep learning library. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"5_CR39","unstructured":"Pedregosa, F., et al.: Scikit-learn: machine learning in Python. J. Mach. Learn. Res. 12, 2825\u20132830 (2011)"},{"key":"5_CR40","doi-asserted-by":"crossref","unstructured":"Perepelkina, O., Kazimirova, E., Konstantinova, M.: RAMAS: Russian multimodal corpus of dyadic Interaction for studying emotion recognition. PeerJ Prepr. 6, e26688v1 (2018)","DOI":"10.7287\/peerj.preprints.26688v1"},{"key":"5_CR41","doi-asserted-by":"publisher","first-page":"98","DOI":"10.1016\/j.inffus.2017.02.003","volume":"37","author":"S Poria","year":"2017","unstructured":"Poria, S., Cambria, E., Bajpai, R., Hussain, A.: A review of affective computing: from unimodal analysis to multimodal fusion. Inf. Fus. 37, 98\u2013125 (2017)","journal-title":"Inf. Fus."},{"key":"5_CR42","doi-asserted-by":"crossref","unstructured":"Poria, S.,  et al.: Recognizing emotion cause in conversations. Cognit. Comput.  13 , 1317\u20131332 (2021)","DOI":"10.1007\/s12559-021-09925-7"},{"key":"5_CR43","doi-asserted-by":"crossref","unstructured":"Priyasad, D., Fernando, T., Sridharan, S., Denman, S., Fookes, C.: Dual memory fusion for multimodal speech emotion recognition. In: Proceedings of the INTERSPEECH, vol. 2023, pp. 4543\u20134547 (2023)","DOI":"10.21437\/Interspeech.2023-1090"},{"key":"5_CR44","doi-asserted-by":"crossref","unstructured":"Ringeval, F., Sonderegger, A., Sauer, J., Lalanne, D.: Introducing the RECOLA multimodal corpus of remote collaborative and affective interactions. In: 2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), pp. 1\u20138. IEEE, April 2013","DOI":"10.1109\/FG.2013.6553805"},{"key":"5_CR45","doi-asserted-by":"publisher","first-page":"435","DOI":"10.1016\/j.neucom.2022.10.013","volume":"514","author":"E Ryumina","year":"2022","unstructured":"Ryumina, E., Dresvyanskiy, D., Karpov, A.: In search of a robust facial expressions recognition model: a large-scale visual cross-corpus study. Neurocomputing 514, 435\u2013450 (2022)","journal-title":"Neurocomputing"},{"key":"5_CR46","doi-asserted-by":"publisher","first-page":"305","DOI":"10.1007\/s10919-011-0115-4","volume":"35","author":"KR Scherer","year":"2011","unstructured":"Scherer, K.R., Scherer, U.: Assessing the ability to recognize facial and vocal expressions of emotion: construction and validation of the emotion recognition index. J. Nonverbal Behav. 35, 305\u2013326 (2011)","journal-title":"J. Nonverbal Behav."},{"key":"5_CR47","unstructured":"Scherer, K.R.: On the nature and function of emotion: a component process approach. In: Scherer, K.R., Ekman. P. (eds.) Approaches to Emotion, pp. 293\u2013317. Erlbaum, Hillsdale (1984)"},{"key":"5_CR48","doi-asserted-by":"publisher","first-page":"1307","DOI":"10.1080\/02699930902928969","volume":"23","author":"KR Scherer","year":"2009","unstructured":"Scherer, K.R.: The dynamic architecture of emotion: evidence for the component process model. Cogn. Emot. 23, 1307\u20131351 (2009)","journal-title":"Cogn. Emot."},{"key":"5_CR49","doi-asserted-by":"crossref","unstructured":"Scherer, K.R.: Component models of emotion can inform the quest for emotional competence. In: Matthews, G., Zeidner, M., Roberts, R.D. (eds.) The Science of Emotional Intelligence: Knowns and Unknowns, pp. 101\u2013126. Oxford University Press, New York (2007)","DOI":"10.1093\/acprof:oso\/9780195181890.003.0004"},{"key":"5_CR50","doi-asserted-by":"crossref","unstructured":"Schuller, B., et al.: Being bored? Recognising natural interest by extensive audiovisual integration for real-life application. Image Vis. Comput. 27(12), 1760-1774 (2009)","DOI":"10.1016\/j.imavis.2009.02.013"},{"key":"5_CR51","doi-asserted-by":"crossref","unstructured":"Schuller, B., et al.: The interspeech 2016 computational paralinguistics challenge: deception, sincerity and native language (2016)","DOI":"10.21437\/Interspeech.2016-129"},{"key":"5_CR52","doi-asserted-by":"crossref","unstructured":"Shah, M., Chakrabarti, C., Spanias, A.: A multi-modal approach to emotion recognition using undirected topic models. In: 2014 IEEE International Symposium on Circuits and Systems (ISCAS), pp. 754\u2013757. IEEE, June 2014","DOI":"10.1109\/ISCAS.2014.6865245"},{"key":"5_CR53","doi-asserted-by":"crossref","unstructured":"Shi, T., Huang, S.L.: MultiEMO: an attention-based correlation-aware multimodal fusion framework for emotion recognition in conversations. In: Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 14752\u201314766, July 2023","DOI":"10.18653\/v1\/2023.acl-long.824"},{"key":"5_CR54","doi-asserted-by":"publisher","unstructured":"Singh, R., Puri, H., Aggarwal, N., Gupta, V.: An efficient language-independent acoustic emotion classification system. Arab. J. Sci. Eng. 45(4), 3111\u20133121 (2020). https:\/\/doi.org\/10.1007\/s13369-019-04293-9","DOI":"10.1007\/s13369-019-04293-9"},{"key":"5_CR55","unstructured":"Steidl, S.: Automatic Classification of Emotion Related User States in Spontaneous Children\u2019s Speech. University of Erlangen-Nuremberg, Erlangen, Germany (2009)"},{"issue":"1","key":"5_CR56","doi-asserted-by":"publisher","first-page":"93","DOI":"10.1007\/s10772-018-9491-z","volume":"21","author":"M Swain","year":"2018","unstructured":"Swain, M., Routray, A., Kabisatpathy, P.: Databases, features and classifiers for speech emotion recognition: a review. Int. J. Speech Technol. 21(1), 93\u2013120 (2018)","journal-title":"Int. J. Speech Technol."},{"key":"5_CR57","doi-asserted-by":"crossref","unstructured":"Toma\u0161ev, N., et al.: AI for social good: unlocking the opportunity for positive impact. Nat. Commun. 11(1), 2468 (2020)","DOI":"10.1038\/s41467-020-15871-z"},{"key":"5_CR58","doi-asserted-by":"publisher","unstructured":"Valles, D., Matin, R.: An audio processing approach using ensemble learning for speech-emotion recognition for children with ASD. In: IEEE World AI IoT Congress (AIIoT). IEEE (2021). https:\/\/doi.org\/10.1109\/aiiot52608.2021.9454174","DOI":"10.1109\/aiiot52608.2021.9454174"},{"issue":"10","key":"5_CR59","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0185513","volume":"12","author":"M Virtanen","year":"2017","unstructured":"Virtanen, M., et al.: The level of cognitive function and recognition of emotions in older adults. PLoS ONE 12(10), e0185513 (2017)","journal-title":"PLoS ONE"},{"issue":"3","key":"5_CR60","doi-asserted-by":"publisher","first-page":"761","DOI":"10.1177\/10731911211068084","volume":"30","author":"TF Williams","year":"2023","unstructured":"Williams, T.F., Vehabovic, N., Simms, L.J.: Developing and validating a facial emotion recognition task with graded intensity. Assessment 30(3), 761\u2013781 (2023)","journal-title":"Assessment"},{"key":"5_CR61","doi-asserted-by":"crossref","unstructured":"Winata, G.I., Kampman, O.P., Fung, P.: Attention-based LSTM for psychological stress detection from spoken language using distant supervision. In: 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6204\u20136208. IEEE, April 2018","DOI":"10.1109\/ICASSP.2018.8461990"},{"key":"5_CR62","doi-asserted-by":"crossref","unstructured":"Zadeh, A., Liang, P.P., Poria, S., Vij, P., Cambria, E., Morency, L.P.: Multi-attention recurrent network for human communication comprehension. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 32, no. 1, April 2018","DOI":"10.1609\/aaai.v32i1.12024"},{"key":"5_CR63","unstructured":"Zadeh, A., Zellers, R., Pincus, E., Morency, L.P.: MOSI: multimodal corpus of sentiment intensity and subjectivity analysis in online opinion videos. arXiv preprint arXiv:1606.06259 (2016)"}],"container-title":["Lecture Notes in Computer Science","Chatbots and Human-Centered AI"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-88045-2_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,8]],"date-time":"2025-04-08T16:37:28Z","timestamp":1744130248000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-88045-2_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031880445","9783031880452"],"references-count":63,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-88045-2_5","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"4 April 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"CONVERSATIONS","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Symposium on Chatbots and Human-Centered AI","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Thessaloniki","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 December 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 December 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"conversations2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/2024.conversations.ws\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}