{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T00:45:03Z","timestamp":1776041103050,"version":"3.50.1"},"reference-count":39,"publisher":"Elsevier BV","issue":"5","license":[{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2024,10,15]],"date-time":"2024-10-15T00:00:00Z","timestamp":1728950400000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100007957","name":"Chongqing Municipal Education Commission","doi-asserted-by":"publisher","award":["CYB21203"],"award-info":[{"award-number":["CYB21203"]}],"id":[{"id":"10.13039\/501100007957","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004502","name":"Chongqing University of Posts and Telecommunications","doi-asserted-by":"publisher","award":["BYJS20 2106"],"award-info":[{"award-number":["BYJS20 2106"]}],"id":[{"id":"10.13039\/501100004502","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Digital Communications and Networks"],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1016\/j.dcan.2024.10.007","type":"journal-article","created":{"date-parts":[[2024,10,18]],"date-time":"2024-10-18T10:29:09Z","timestamp":1729247349000},"page":"1567-1577","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":4,"title":["Cross-feature fusion speech emotion recognition based on attention mask residual network and Wav2vec 2.0"],"prefix":"10.1016","volume":"11","author":[{"given":"Xiaoke","family":"Li","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5315-2065","authenticated-orcid":false,"given":"Zufan","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.dcan.2024.10.007_br0010","series-title":"The Nature of Emotion: Fundamental Questions","year":"1994"},{"issue":"4","key":"10.1016\/j.dcan.2024.10.007_br0020","first-page":"2958","article-title":"Dynamic trust relationships aware data privacy protection in mobile crowd-sensing","volume":"5","author":"Wu","year":"2018","journal-title":"IEEE Int. Things J."},{"issue":"2","key":"10.1016\/j.dcan.2024.10.007_br0030","first-page":"1928","article-title":"A feature-based learning system for Internet of Things applications","volume":"6","author":"Wu","year":"2019","journal-title":"IEEE Int. Things J."},{"key":"10.1016\/j.dcan.2024.10.007_br0040","doi-asserted-by":"crossref","first-page":"19143","DOI":"10.1109\/ACCESS.2019.2896880","article-title":"Speech recognition using deep neural networks: a systematic review","volume":"7","author":"Nassif","year":"2019","journal-title":"IEEE Access"},{"issue":"6","key":"10.1016\/j.dcan.2024.10.007_br0050","doi-asserted-by":"crossref","first-page":"1576","DOI":"10.1109\/TMM.2017.2766843","article-title":"Speech emotion recognition using deep convolutional neural network and discriminant temporal pyramid matching","volume":"20","author":"Zhang","year":"2018","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.dcan.2024.10.007_br0060","doi-asserted-by":"crossref","first-page":"56","DOI":"10.1016\/j.specom.2019.12.001","article-title":"Speech emotion recognition: emotional models, databases, features, preprocessing methods, supporting modalities, and classifiers","volume":"116","author":"Ak\u00e7ay","year":"2020","journal-title":"Speech Commun."},{"key":"10.1016\/j.dcan.2024.10.007_br0070","series-title":"Proceedings of the 2016 International Conference on Communication and Electronics Systems (ICCES)","first-page":"1","article-title":"A comparative study of silence and non silence regions of speech signal using prosody features","author":"Rakesh","year":"2016"},{"key":"10.1016\/j.dcan.2024.10.007_br0080","doi-asserted-by":"crossref","first-page":"184","DOI":"10.1016\/j.inffus.2018.06.003","article-title":"Audio-visual emotion fusion (avef): a deep efficient weighted approach","volume":"46","author":"Ma","year":"2019","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.dcan.2024.10.007_br0090","series-title":"Proceedings of Interspeech 2017","first-page":"1089","article-title":"Efficient emotion recognition from speech using deep learning on spectrograms","author":"Satt","year":"2017"},{"key":"10.1016\/j.dcan.2024.10.007_br0100","series-title":"Proceedings of Interspeech 2023","first-page":"3637","article-title":"Two-stage finetuning of wav2vec 2.0 for speech emotion recognition with asr and gender pretraining","author":"Gao","year":"2023"},{"key":"10.1016\/j.dcan.2024.10.007_br0110","series-title":"Proceedings of the 2017 4th International Conference on Signal Processing and Integrated Networks (SPIN)","first-page":"137","article-title":"Speech emotion recognition with deep learning","author":"Harar","year":"2017"},{"key":"10.1016\/j.dcan.2024.10.007_br0120","series-title":"Proceedings of the 2017 Information Theory and Applications Workshop (ITA)","first-page":"1","article-title":"Speech emotion recognition based on Gaussian mixture models and deep neural networks","author":"Tashev","year":"2017"},{"key":"10.1016\/j.dcan.2024.10.007_br0130","series-title":"Proceedings of the 2018 26th European Signal Processing Conference (EUSIPCO)","first-page":"2055","article-title":"An unsupervised frame selection technique for robust emotion recognition in noisy speech","author":"Pandharipande","year":"2018"},{"key":"10.1016\/j.dcan.2024.10.007_br0140","series-title":"Proceedings of Interdisciplinary Workshop on Laughter and Other Non-Verbal Vocalisations in Speech 2015","first-page":"39","article-title":"Recognizing emotions in dialogues with disfluencies and non-verbal vocalisations","author":"Tian","year":"2015"},{"key":"10.1016\/j.dcan.2024.10.007_br0150","series-title":"Proceedings of the 2022 IEEE Spoken Language Technology Workshop (SLT)","first-page":"868","article-title":"Exploration of a self-supervised speech model: a study on emotional corpora","author":"Li","year":"2023"},{"key":"10.1016\/j.dcan.2024.10.007_br0160","series-title":"Proceedings of the 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","first-page":"1","article-title":"Exploring wav2vec 2.0 fine tuning for improved speech emotion recognition","author":"Chen","year":"2023"},{"key":"10.1016\/j.dcan.2024.10.007_br0170","series-title":"Proceedings of Interspeech 2019","first-page":"3465","article-title":"Wav2vec: unsupervised pre-training for speech recognition","author":"Schneider","year":"2019"},{"key":"10.1016\/j.dcan.2024.10.007_br0180","series-title":"Proceedings of the 33st International Conference on Neural Information Processing Systems (NeurIPS)","first-page":"12449","article-title":"Wav2vec 2.0: a framework for self-supervised learning of speech representations","author":"Baevski","year":"2020"},{"key":"10.1016\/j.dcan.2024.10.007_br0190","doi-asserted-by":"crossref","first-page":"3451","DOI":"10.1109\/TASLP.2021.3122291","article-title":"Hubert: self-supervised speech representation learning by masked prediction of hidden units","volume":"29","author":"Hsu","year":"2021","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"issue":"6","key":"10.1016\/j.dcan.2024.10.007_br0200","doi-asserted-by":"crossref","first-page":"1505","DOI":"10.1109\/JSTSP.2022.3188113","article-title":"Wavlm: large-scale self-supervised pre-training for full stack speech processing","volume":"16","author":"Chen","year":"2022","journal-title":"IEEE J. Sel. Top. Signal Process."},{"key":"10.1016\/j.dcan.2024.10.007_br0210","series-title":"Proceedings of Interspeech 2021","first-page":"3400","article-title":"Emotion recognition from speech using wav2vec 2.0 embeddings","author":"Pepino","year":"2021"},{"key":"10.1016\/j.dcan.2024.10.007_br0220","series-title":"Proceedings of Interspeech 2021","first-page":"3370","article-title":"Temporal context in speech emotion recognition","author":"Xia","year":"2021"},{"key":"10.1016\/j.dcan.2024.10.007_br0230","series-title":"Proceedings of the 2022 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)","first-page":"1232","article-title":"Multi-task learning for speech emotion and emotion intensity recognition","author":"Yue","year":"2022"},{"key":"10.1016\/j.dcan.2024.10.007_br0240","series-title":"Proceedings of 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","first-page":"7362","article-title":"Fusing asr outputs in joint training for speech emotion recognition","author":"Li","year":"2022"},{"key":"10.1016\/j.dcan.2024.10.007_br0250","series-title":"Proceedings of the 2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops","first-page":"1","article-title":"Interpreting ambiguous emotional expressions","author":"Mower","year":"2009"},{"key":"10.1016\/j.dcan.2024.10.007_br0260","series-title":"Proceedings of Workshop on Speech, Music and Mind (SMM 2018)","first-page":"21","article-title":"Cnn+lstm architecture for speech emotion recognition with data augmentation","author":"Etienne","year":"2018"},{"key":"10.1016\/j.dcan.2024.10.007_br0270","series-title":"Proceedings of the 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","first-page":"4964","article-title":"Soft-target training with ambiguous emotional utterances for dnn-based speech emotion classification","author":"Ando","year":"2018"},{"issue":"8","key":"10.1016\/j.dcan.2024.10.007_br0280","doi-asserted-by":"crossref","first-page":"2203","DOI":"10.1109\/TMM.2014.2360798","article-title":"Learning salient features for speech emotion recognition using convolutional neural networks","volume":"16","author":"Mao","year":"2014","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.dcan.2024.10.007_br0290","series-title":"Proceedings of the 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"770","article-title":"Deep residual learning for image recognition","author":"He","year":"2016"},{"key":"10.1016\/j.dcan.2024.10.007_br0300","series-title":"Proceedings of European Conference on Computer Vision (ECCV)","first-page":"630","article-title":"Identity mappings in deep residual networks","author":"He","year":"2016"},{"key":"10.1016\/j.dcan.2024.10.007_br0310","first-page":"448","article-title":"Batch normalization: accelerating deep network training by reducing internal covariate shift","author":"Ioffe","year":"2015"},{"key":"10.1016\/j.dcan.2024.10.007_br0320","series-title":"Proceedings of the 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"7132","article-title":"Squeeze-and-excitation networks","author":"Hu","year":"2018"},{"key":"10.1016\/j.dcan.2024.10.007_br0330","series-title":"Proceedings of the 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","first-page":"4275","article-title":"Multimodal cross- and self-attention network for speech emotion recognition","author":"Sun","year":"2021"},{"key":"10.1016\/j.dcan.2024.10.007_br0340","series-title":"Proceedings of the 31st International Conference on Neural Information Processing Systems (NeurIPS)","first-page":"5998","article-title":"Attention is all you need","author":"Vaswani","year":"2017"},{"issue":"4","key":"10.1016\/j.dcan.2024.10.007_br0350","doi-asserted-by":"crossref","first-page":"335","DOI":"10.1007\/s10579-008-9076-6","article-title":"Iemocap: interactive emotional dyadic motion capture database","volume":"42","author":"Busso","year":"2008","journal-title":"Lang. Resour. Eval."},{"issue":"10","key":"10.1016\/j.dcan.2024.10.007_br0360","doi-asserted-by":"crossref","first-page":"1440","DOI":"10.1109\/LSP.2018.2860246","article-title":"3-d convolutional recurrent neural networks with attention model for speech emotion recognition","volume":"25","author":"Chen","year":"2018","journal-title":"IEEE Signal Process. Lett."},{"key":"10.1016\/j.dcan.2024.10.007_br0370","series-title":"Proceedings of the 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","first-page":"5886","article-title":"Every rating matters: joint learning of subjective labels and individual annotators for speech emotion classification","author":"Chou","year":"2019"},{"issue":"16","key":"10.1016\/j.dcan.2024.10.007_br0380","doi-asserted-by":"crossref","first-page":"9062","DOI":"10.3390\/app13169062","article-title":"Cross-corpus training strategy for speech emotion recognition using self-supervised representations","volume":"13","author":"Pastor","year":"2023","journal-title":"Appl. Sci."},{"key":"10.1016\/j.dcan.2024.10.007_br0390","first-page":"1","article-title":"Learning with rater-expanded label space to improve speech emotion recognition","author":"Upadhyay","year":"2024","journal-title":"IEEE Trans. Affect. Comput."}],"container-title":["Digital Communications and Networks"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S2352864824001299?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S2352864824001299?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,12,19]],"date-time":"2025-12-19T18:14:07Z","timestamp":1766168047000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S2352864824001299"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10]]},"references-count":39,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2025,10]]}},"alternative-id":["S2352864824001299"],"URL":"https:\/\/doi.org\/10.1016\/j.dcan.2024.10.007","relation":{},"ISSN":["2352-8648"],"issn-type":[{"value":"2352-8648","type":"print"}],"subject":[],"published":{"date-parts":[[2025,10]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Cross-feature fusion speech emotion recognition based on attention mask residual network and Wav2vec 2.0","name":"articletitle","label":"Article Title"},{"value":"Digital Communications and Networks","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.dcan.2024.10.007","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2024 Chongqing University of Posts and Telecommunications. Production and hosting by Elsevier B.V. on behalf of KeAi Communications Co. Ltd.","name":"copyright","label":"Copyright"}]}}