{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T01:59:36Z","timestamp":1774663176426,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":23,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819698936","type":"print"},{"value":"9789819698943","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-96-9894-3_15","type":"book-chapter","created":{"date-parts":[[2025,7,25]],"date-time":"2025-07-25T19:56:38Z","timestamp":1753473398000},"page":"171-182","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["EmoTune: Enhancing Speech Emotion Recognition with Emotion-Specific Token Learning and Contrastive Representation Optimization"],"prefix":"10.1007","author":[{"given":"Jiaqian","family":"Ren","sequence":"first","affiliation":[]},{"given":"Xupu","family":"Cai","sequence":"additional","affiliation":[]},{"given":"Meng","family":"Ye","sequence":"additional","affiliation":[]},{"given":"Hong","family":"Luo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,7,26]]},"reference":[{"key":"15_CR1","doi-asserted-by":"crossref","unstructured":"Badshah, A.M., Ahmad, J., Rahim, N., Baik, S.W.: Speech emotion recognitionfrom spectrograms with deep convolutional neural network. In: 2017 International Conference on Platform Technology and Service (PlatCon), pp. 1\u20135. IEEE (2017)","DOI":"10.1109\/PlatCon.2017.7883728"},{"key":"15_CR2","unstructured":"Baevski, A., Zhou, Y., Mohamed, A., Auli, M.: wav2vec 2.0: a framework forself-supervised learning of speech representations. In: Advances in Neural Information Processing Systems, vol. 33, pp. 12449\u201312460 (2020)"},{"key":"15_CR3","doi-asserted-by":"publisher","first-page":"335","DOI":"10.1007\/s10579-008-9076-6","volume":"42","author":"C Busso","year":"2008","unstructured":"Busso, C., et al.: Iemocap: interactive emotional dyadic motion capture database. Lang. Resour. Eval. 42, 335\u2013359 (2008)","journal-title":"Lang. Resour. Eval."},{"key":"15_CR4","doi-asserted-by":"crossref","unstructured":"Cai, X., Yuan, J., Zheng, R., Huang, L., Church, K.: Speech emotion recognition with multi-task learning. In: Interspeech, Brno, vol. 2021, pp. 4508\u20134512 (2021)","DOI":"10.21437\/Interspeech.2021-1852"},{"key":"15_CR5","doi-asserted-by":"crossref","unstructured":"Chen, L.W., Rudnicky, A.: Exploring wav2vec 2.0 fine tuning for improved speechvemotion recognition. In: ICASSP 2023\u20132023 IEEE International Conference onAcoustics, Speech and Signal Processing (ICASSP), pp. 1\u20135. IEEE (2023)","DOI":"10.1109\/ICASSP49357.2023.10095036"},{"issue":"6","key":"15_CR6","doi-asserted-by":"publisher","first-page":"1505","DOI":"10.1109\/JSTSP.2022.3188113","volume":"16","author":"S Chen","year":"2022","unstructured":"Chen, S., et al.: Wavlm: large-scale self-supervised pre-training for full stack speech processing. IEEE J. Sel. Top. Signal Process. 16(6), 1505\u20131518 (2022)","journal-title":"IEEE J. Sel. Top. Signal Process."},{"key":"15_CR7","doi-asserted-by":"crossref","unstructured":"Fang, Y., Xing, X., Xu, X., Zhang, W.: Exploring downstream transfer of self-supervised features for speech emotion recognition. In: Proceedings of INTERSPEECH, vol. 2023, pp. 3627\u20133631 (2023)","DOI":"10.21437\/Interspeech.2023-653"},{"key":"15_CR8","doi-asserted-by":"crossref","unstructured":"Gao, Y., Chu, C., Kawahara, T.: Two-stage finetuning of wav2vec 2.0 for speech emotion recognition with ASR and gender pretraining. In: Proceedings of Interspeech, pp. 3637\u20133641 (2023)","DOI":"10.21437\/Interspeech.2023-756"},{"key":"15_CR9","doi-asserted-by":"crossref","unstructured":"Gao, Y., Shi, H., Chu, C., Kawahara, T.: Enhancing two-stage finetuning for speechemotion recognition using adapters. In: ICASSP 2024\u20132024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 11316\u201311320. IEEE (2024)","DOI":"10.1109\/ICASSP48485.2024.10446645"},{"key":"15_CR10","doi-asserted-by":"publisher","first-page":"3451","DOI":"10.1109\/TASLP.2021.3122291","volume":"29","author":"WN Hsu","year":"2021","unstructured":"Hsu, W.N., Bolte, B., Tsai, Y.H.H., Lakhotia, K., Salakhutdinov, R., Mohamed, A.: Hubert: self-supervised speech representation learning by masked prediction of hidden units. IEEE\/ACM Trans. Audio Speech Lang. Process. 29, 3451\u20133460 (2021)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"15_CR11","doi-asserted-by":"crossref","unstructured":"Huang, Z., Dong, M., Mao, Q., Zhan, Y.: Speech emotion recognition using CNN. In:Proceedings of the 22nd ACM International Conference on Multimedia, pp. 801\u2013804 (2014)","DOI":"10.1145\/2647868.2654984"},{"key":"15_CR12","doi-asserted-by":"crossref","unstructured":"Li, Q., et al.: Frame-level emotional state alignment method for speech emotion recognition. In: ICASSP 2024\u20132024 IEEE International Conference on Acoustics, Speech and Signal Processing(ICASSP), pp. 11486\u201311490. IEEE (2024)","DOI":"10.1109\/ICASSP48485.2024.10446812"},{"key":"15_CR13","doi-asserted-by":"crossref","unstructured":"Pandey, S.K., Shekhawat, H.S., Prasanna, S.M.: Deep learning techniques for speech emotion recognition: a review. In: 2019 29th International Conference Radioelektronika (RADIOELEKTRONIKA), pp. 1\u20136. IEEE (2019)","DOI":"10.1109\/RADIOELEK.2019.8733432"},{"key":"15_CR14","doi-asserted-by":"crossref","unstructured":"Pepino, L., Riera, P., Ferrer, L.: Emotion recognition from speech usingwav2vec 2.0 embeddings. In: Proceedings of Interspeech 2021, pp. 3400\u20133404 (2021)","DOI":"10.21437\/Interspeech.2021-703"},{"key":"15_CR15","doi-asserted-by":"crossref","unstructured":"Satt, A., Rozenberg, S., Hoory, R., et al.: Efficient emotion recognition from speechusing deep learning on spectrograms. In: Interspeech, pp. 1089\u20131093 (2017)","DOI":"10.21437\/Interspeech.2017-200"},{"key":"15_CR16","doi-asserted-by":"crossref","unstructured":"Shen, S., Gao, Y., Liu, F., Wang, H., Zhou, A.: Emotion neural transducer for fine-grained speech emotion recognition. In: ICASSP 2024\u20132024 IEEE InternationalConference on Acoustics, Speech and Signal Processing (ICASSP), pp. 10111\u201310115. IEEE (2024)","DOI":"10.1109\/ICASSP48485.2024.10446974"},{"key":"15_CR17","doi-asserted-by":"crossref","unstructured":"Shen, S., Liu, F., Zhou, A.: Mingling or misalignment? Temporal shift for speechemotion recognition with pre-trained representations. In: ICASSP 2023\u20132023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1\u20135. IEEE (2023)","DOI":"10.1109\/ICASSP49357.2023.10095193"},{"key":"15_CR18","doi-asserted-by":"crossref","unstructured":"Sun, D., He, Y., Han, J.: Using auxiliary tasks in multimodal fusion of wav2vec2.0 and bert for multimodal emotion recognition. In: ICASSP 2023\u20132023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1\u20135. IEEE (2023)","DOI":"10.1109\/ICASSP49357.2023.10096586"},{"key":"15_CR19","doi-asserted-by":"crossref","unstructured":"Tang, D., Zeng, J., Li, M.: An end-to-end deep learning framework for speechemotion recognition of atypical individuals. In: Interspeech, vol. 2018, pp. 162\u2013166 (2018)","DOI":"10.21437\/Interspeech.2018-2581"},{"key":"15_CR20","doi-asserted-by":"crossref","unstructured":"Tzirakis, P., Zhang, J., Schuller, B.W.: End-to-end speech emotion recognitionusing deep neural networks. In: 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5089\u20135093. IEEE (2018)","DOI":"10.1109\/ICASSP.2018.8462677"},{"key":"15_CR21","doi-asserted-by":"crossref","unstructured":"Wang, P., Han, K., Wei, X.S., Zhang, L., Wang, L.: Contrastive learning based hybrid networks for long-tailed image classification. In: Proceedings of the IEEE\/CVFConference on Computer Vision and Pattern Recognition, pp. 943\u2013952 (2021)","DOI":"10.1109\/CVPR46437.2021.00100"},{"key":"15_CR22","doi-asserted-by":"publisher","first-page":"47795","DOI":"10.1109\/ACCESS.2021.3068045","volume":"9","author":"TM Wani","year":"2021","unstructured":"Wani, T.M., Gunawan, T.S., Qadri, S.A.A., Kartiwi, M., Ambikairajah, E.: A comprehensive review of speech emotion recognition systems. IEEE Access 9, 47795\u201347814 (2021)","journal-title":"IEEE Access"},{"key":"15_CR23","doi-asserted-by":"crossref","unstructured":"Zou, H., Si, Y., Chen, C., Rajan, D., Chng, E.S.: Speech emotion recognition withco-attention based multi-level acoustic information. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7367\u20137371. IEEE (2022)","DOI":"10.1109\/ICASSP43922.2022.9747095"}],"container-title":["Lecture Notes in Computer Science","Advanced Intelligent Computing Technology and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-96-9894-3_15","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T01:38:17Z","timestamp":1774661897000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-96-9894-3_15"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9789819698936","9789819698943"],"references-count":23,"URL":"https:\/\/doi.org\/10.1007\/978-981-96-9894-3_15","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"26 July 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Intelligent Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Ningbo","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 July 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 July 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icic2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.ic-icc.cn\/icg\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}