{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T18:00:07Z","timestamp":1770832807257,"version":"3.50.1"},"publisher-location":"Cham","reference-count":24,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031204999","type":"print"},{"value":"9783031205002","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20500-2_26","type":"book-chapter","created":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T05:12:32Z","timestamp":1672549952000},"page":"315-326","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":8,"title":["Audio-Visual Fusion Network Based on\u00a0Conformer for\u00a0Multimodal Emotion Recognition"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6654-1550","authenticated-orcid":false,"given":"Peini","family":"Guo","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5044-2400","authenticated-orcid":false,"given":"Zhengyan","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5236-7010","authenticated-orcid":false,"given":"Yidi","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7498-6541","authenticated-orcid":false,"given":"Hong","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,1,1]]},"reference":[{"key":"26_CR1","doi-asserted-by":"crossref","unstructured":"Praveen, R.G., et al.: A Joint Cross-Attention Model for Audio-Visual Fusion in Dimensional Emotion Recognition. arXiv preprint arXiv:2203.14779 (2022)","DOI":"10.1109\/CVPRW56347.2022.00278"},{"key":"26_CR2","doi-asserted-by":"crossref","unstructured":"Praveen, R.G., Granger, E., Cardinal, P.: Cross attentional audio-visual fusion for dimensional emotion recognition. In: IEEE International Conference on Automatic Face and Gesture Recognition, pp. 1\u20138 (2021)","DOI":"10.1109\/FG52635.2021.9667055"},{"key":"26_CR3","doi-asserted-by":"crossref","unstructured":"Wu, C.H., Lin, J.C., Wei, W.L.: Survey on audio-visual emotion recognition: databases, features, and data fusion strategies. APSIPA Trans. Signal Inf. Process. 3(1) (2014)","DOI":"10.1017\/ATSIP.2014.11"},{"key":"26_CR4","unstructured":"Deng, D., Zhou, Y., Pi, J., Shi, B.E.: Multimodal utterance-level affect analysis using visual, audio and text features. arXiv preprint arXiv:1805.00625 (2018)"},{"key":"26_CR5","doi-asserted-by":"crossref","unstructured":"Kumar, A., Vepa, J.: Gated mechanism for attention based multimodal sentiment analysis. In: IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 4477\u20134481 (2020)","DOI":"10.1109\/ICASSP40776.2020.9053012"},{"issue":"2","key":"26_CR6","doi-asserted-by":"publisher","first-page":"41","DOI":"10.3390\/a9020041","volume":"9","author":"Y Yu","year":"2016","unstructured":"Yu, Y., Lin, H., Meng, J., Zhao, Z.: Visual and textual sentiment analysis of a microblog using deep convolutional neural networks. Algorithms 9(2), 41 (2016)","journal-title":"Algorithms"},{"key":"26_CR7","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1016\/j.knosys.2019.01.019","volume":"167","author":"F Huang","year":"2019","unstructured":"Huang, F., Zhang, X., Zhao, Z., Xu, J., Li, Z.: Image-text sentiment analysis via deep multimodal attentive fusion. Knowl.-Based Syst. 167, 26\u201337 (2019)","journal-title":"Knowl.-Based Syst."},{"key":"26_CR8","doi-asserted-by":"crossref","unstructured":"Petridis, S., Stafylakis, T., Ma, P., Cai, F.: End-to-end audiovisual speech recognition. In: IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 6548\u20136552 (2018)","DOI":"10.1109\/ICASSP.2018.8461326"},{"key":"26_CR9","doi-asserted-by":"crossref","unstructured":"Liu, H., Chen, Z., Yang, B.: Lip graph assisted audio-visual speech recognition using bidirectional synchronous fusion. In: Conference of the International Speech Communication Association, pp. 3520\u20133524 (2020)","DOI":"10.21437\/Interspeech.2020-3146"},{"key":"26_CR10","doi-asserted-by":"crossref","unstructured":"Liu, H., Xu, W., Yang, B.: Audio-visual speech recognition using a two-step feature fusion strategy. In: International Conference on Pattern Recognition, pp. 1896\u20131903 (2021)","DOI":"10.1109\/ICPR48806.2021.9412454"},{"key":"26_CR11","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Annual Conference on Neural Information Processing Systems, vol. 30, pp. 6000\u20136010 (2017)"},{"key":"26_CR12","unstructured":"Fu, Z., et al.: A Cross-Modal Fusion Network Based on Self-attention and Residual Structure for Multimodal Emotion Recognition. arXiv preprint arXiv:2111.02172 (2021)"},{"key":"26_CR13","doi-asserted-by":"crossref","unstructured":"Zhang, S., Ding, Y., Wei, Z., Guan, C.: Continuous emotion recognition with audio-visual leader-follower attentive fusion. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3567\u20133574 (2021)","DOI":"10.1109\/ICCVW54120.2021.00397"},{"key":"26_CR14","doi-asserted-by":"crossref","unstructured":"Li, Y., Liu, H., Tang, H.: Multi-modal perception attention network with self-supervised learning for audio-visual speaker tracking. In: Proceedings of the AAAI Conference on Artificial Intelligence, pp. 1456\u20131463 (2022)","DOI":"10.1609\/aaai.v36i2.20035"},{"key":"26_CR15","doi-asserted-by":"crossref","unstructured":"Serdyuk, D., Braga, O., Siohan, O.: Audio-visual speech recognition is worth $$32\\times 32\\times 8$$ voxels. In: IEEE Automatic Speech Recognition and Understanding Workshop, pp. 796\u2013802 (2021)","DOI":"10.1109\/ASRU51503.2021.9688191"},{"key":"26_CR16","doi-asserted-by":"crossref","unstructured":"Tran, M., Soleymani, M.: A pre-trained audio-visual transformer for emotion recognition. In: IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 4698\u20134702 (2022)","DOI":"10.1109\/ICASSP43922.2022.9747278"},{"key":"26_CR17","doi-asserted-by":"crossref","unstructured":"Chang, F.J., Radfar, M., Mouchtaris, A., King, B., Kunzmann, S.: End-to-end multi-channel transformer for speech recognition. In: IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 5884\u20135888 (2021)","DOI":"10.1109\/ICASSP39728.2021.9414123"},{"key":"26_CR18","doi-asserted-by":"crossref","unstructured":"Gulati, A., et al.: Conformer: convolution-augmented transformer for speech recognition. In: Conference of the International Speech Communication Association, pp. 5036\u20135040 (2020)","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"26_CR19","doi-asserted-by":"crossref","unstructured":"Livingstone, S.R., Russo, F.A.: The ryerson audio-visual database of emotional speech and song (RAVDESS): a dynamic, multimodal set of facial and vocal expressions in North American English. PloS Computational Linguistics, pp. 2978\u20132988 (2019)","DOI":"10.1371\/journal.pone.0196391"},{"key":"26_CR20","unstructured":"Lu, Y., et al.: Understanding and improving transformer from a multi-particle dynamic system point of view. In: Workshop on Integration of Deep Neural Models and Differential Equations (2020)"},{"key":"26_CR21","doi-asserted-by":"crossref","unstructured":"Baltrusaitis, T., Zadeh, A., Lim, Y.C., Morency, L.P.: Openface 2.0: facial behavior analysis toolkit. In: IEEE International Conference on Automatic Face and Gesture Recognition, pp. 59\u201366 (2018)","DOI":"10.1109\/FG.2018.00019"},{"key":"26_CR22","unstructured":"Joze, H.R.V., Shaban, A., Iuzzolino, M.L., Koishida, K.: MMTM: multimodal transfer module for CNN fusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13289\u201313299 (2020)"},{"key":"26_CR23","unstructured":"Su, L., Hu, C., Li, G., Cao, D.: MSAF: Multimodal Split Attention Fusion. arXiv preprint arXiv:2012.07175 (2020)"},{"key":"26_CR24","doi-asserted-by":"crossref","unstructured":"Verbitskiy, S., Berikov, V., Vyshegorodtsev, V.: ERANNs: Efficient Residual Audio Neural Networks for Audio Pattern Recognition. arXiv preprint arXiv:2106.01621 (2021)","DOI":"10.1016\/j.patrec.2022.07.012"}],"container-title":["Lecture Notes in Computer Science","Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20500-2_26","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T05:38:52Z","timestamp":1672551532000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20500-2_26"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031204999","9783031205002"],"references-count":24,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20500-2_26","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"1 January 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"CICAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"CAAI International Conference on Artificial Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Beijing","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 August 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"cicai2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/cicai.caai.cn\/#\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"472","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"164","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"35% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.1","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.7","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}