{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:09:59Z","timestamp":1775578199811,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":35,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,10,26]],"date-time":"2023-10-26T00:00:00Z","timestamp":1698278400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,10,26]]},"DOI":"10.1145\/3581783.3612872","type":"proceedings-article","created":{"date-parts":[[2023,10,27]],"date-time":"2023-10-27T07:27:30Z","timestamp":1698391650000},"page":"9596-9600","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":5,"title":["Building Robust Multimodal Sentiment Recognition via a Simple yet Effective Multimodal Transformer"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-8109-2943","authenticated-orcid":false,"given":"Daoming","family":"Zong","sequence":"first","affiliation":[{"name":"SenseTime Group Limited, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-0161-4838","authenticated-orcid":false,"given":"Chaoyue","family":"Ding","sequence":"additional","affiliation":[{"name":"SenseTime Group Limited, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-4490-2157","authenticated-orcid":false,"given":"Baoxiang","family":"Li","sequence":"additional","affiliation":[{"name":"SenseTime Group Limited, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-8519-4630","authenticated-orcid":false,"given":"Dinghao","family":"Zhou","sequence":"additional","affiliation":[{"name":"SenseTime Group Limited, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-2492-3528","authenticated-orcid":false,"given":"Jiakui","family":"Li","sequence":"additional","affiliation":[{"name":"SenseTime Group Limited, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-5856-9969","authenticated-orcid":false,"given":"Ken","family":"Zheng","sequence":"additional","affiliation":[{"name":"SenseTime Group Limited, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-0746-9249","authenticated-orcid":false,"given":"Qunyan","family":"Zhou","sequence":"additional","affiliation":[{"name":"SenseTime Group Limited, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2023,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Alexei Baevski Yuhao Zhou Abdelrahman Mohamed and Michael Auli. 2020. wav2vec 2.0: A framework for self-supervised learning of speech representations. In NeurIPS. 12449--12460."},{"key":"e_1_3_2_1_2_1","first-page":"423","article-title":"Multimodal machine learning: A survey and taxonomy","volume":"41","author":"Tadas Baltruvs","year":"2018","unstructured":"Tadas Baltruvs aitis, Chaitanya Ahuja, and Louis-Philippe Morency. 2018. Multimodal machine learning: A survey and taxonomy. IEEE Transactions on Pattern Analysis and Machine Intelligence, Vol. 41 (2018), 423--443.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"e_1_3_2_1_3_1","volume-title":"Crossvit: Cross-attention multi-scale vision transformer for image classification. In ICCV. 357--366.","author":"Richard Chen Chun-Fu","year":"2021","unstructured":"Chun-Fu Richard Chen, Quanfu Fan, and Rameswar Panda. 2021. Crossvit: Cross-attention multi-scale vision transformer for image classification. In ICCV. 357--366."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"crossref","unstructured":"Junyan Cheng Iordanis Fostiropoulos Barry Boehm and Mohammad Soleymani. 2021. Multimodal phased transformer for sentiment analysis. In EMNLP. 2447--2458.","DOI":"10.18653\/v1\/2021.emnlp-main.189"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"crossref","unstructured":"Lukas Christ Shahin Amiriparian Alice Baird Alexander Kathan Niklas M\u00fcller Steffen Klug Chris Gagne Panagiotis Tzirakis Eva-Maria Me\u00dfner Andreas K\u00f6nig et al. 2023. The MuSe 2023 Multimodal Sentiment Analysis Challenge: Mimicked Emotions Cross-Cultural Humour and Personalisation. arXiv preprint arXiv:2305.03369 (2023).","DOI":"10.1145\/3606039.3613114"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"crossref","unstructured":"Chaoyue Ding Jiakui Li Daoming Zong Baoxiang Li TianHao Zhang and Qunyan Zhou. 2023 b. Stable Speech Emotion Recognition with Head-k-Pooling Loss. In INTERSPEECH.","DOI":"10.21437\/Interspeech.2023-80"},{"key":"e_1_3_2_1_7_1","volume-title":"2023 a. Speed-Robust Keyword Spotting Via Soft Self-Attention on Multi-Scale Features","author":"Ding Chaoyue","unstructured":"Chaoyue Ding, Jiakui Li, Martin Zong, and Baoxiang Li. 2023 a. Speed-Robust Keyword Spotting Via Soft Self-Attention on Multi-Scale Features. In SLT. IEEE, 1104--1111."},{"key":"e_1_3_2_1_8_1","volume-title":"LETR: A lightweight and efficient transformer for keyword spotting","author":"Ding Kevin","year":"2022","unstructured":"Kevin Ding, Martin Zong, Jiakui Li, and Baoxiang Li. 2022. LETR: A lightweight and efficient transformer for keyword spotting. In ICASSP. IEEE, 7987--7991."},{"key":"e_1_3_2_1_9_1","unstructured":"Alexey Dosovitskiy Lucas Beyer Alexander Kolesnikov Dirk Weissenborn Xiaohua Zhai Thomas Unterthiner Mostafa Dehghani Matthias Minderer Georg Heigold Sylvain Gelly et al. 2020. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)."},{"key":"e_1_3_2_1_10_1","volume-title":"Aaron Courville, Mehdi Mirza, Ben Hamner, Will Cukierski, Yichuan Tang, David Thaler, Dong-Hyun Lee, et al.","author":"Goodfellow Ian J","year":"2013","unstructured":"Ian J Goodfellow, Dumitru Erhan, Pierre Luc Carrier, Aaron Courville, Mehdi Mirza, Ben Hamner, Will Cukierski, Yichuan Tang, David Thaler, Dong-Hyun Lee, et al. 2013. Challenges in representation learning: A report on three machine learning contests. In ICONIP. 117--124."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"crossref","unstructured":"Kaiming He Xiangyu Zhang Shaoqing Ren and Jian Sun. 2016. Deep residual learning for image recognition. In CVPR. 770--778.","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/3551876.3554811"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"e_1_3_2_1_14_1","first-page":"10944","article-title":"What makes multi-modal learning better than single (provably)","volume":"34","author":"Huang Yu","year":"2021","unstructured":"Yu Huang, Chenzhuang Du, Zihui Xue, Xuanyao Chen, Hang Zhao, and Longbo Huang. 2021. What makes multi-modal learning better than single (provably). NeurIPS, Vol. 34 (2021), 10944--10956.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_15_1","unstructured":"Yu Huang Junyang Lin Chang Zhou Hongxia Yang and Longbo Huang. 2022. Modality competition: What makes joint training of multi-modal network fail in deep learning?(provably). In ICML. PMLR 9226--9259."},{"key":"e_1_3_2_1_16_1","volume-title":"Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980","author":"Kingma Diederik P","year":"2014","unstructured":"Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)."},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/3551876.3554809"},{"key":"e_1_3_2_1_18_1","unstructured":"Shan Li Weihong Deng and JunPing Du. 2017. Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In CVPR. 2852--2861."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3049898"},{"key":"e_1_3_2_1_20_1","volume-title":"MER 2023: Multi-label Learning, Modality Robustness, and Semi-Supervised Learning. arXiv preprint arXiv:2304","author":"Lian Zheng","year":"2023","unstructured":"Zheng Lian, Haiyang Sun, Licai Sun, Jinming Zhao, Ye Liu, Bin Liu, Jiangyan Yi, Meng Wang, Erik Cambria, Guoying Zhao, et al. 2023. MER 2023: Multi-label Learning, Modality Robustness, and Semi-Supervised Learning. arXiv preprint arXiv:2304.08981 (2023)."},{"key":"e_1_3_2_1_21_1","volume-title":"Umt: Unified multi-modal transformers for joint video moment retrieval and highlight detection. In CVPR. 3042--3051.","author":"Liu Ye","year":"2022","unstructured":"Ye Liu, Siyuan Li, Yang Wu, Chang-Wen Chen, Ying Shan, and Xiaohu Qie. 2022. Umt: Unified multi-modal transformers for joint video moment retrieval and highlight detection. In CVPR. 3042--3051."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"crossref","unstructured":"Fengmao Lv Xiang Chen Yanyong Huang Lixin Duan and Guosheng Lin. 2021. Progressive modality reinforcement for human multimodal emotion recognition from unaligned multimodal sequences. In CVPR. 2554--2562.","DOI":"10.1109\/CVPR46437.2021.00258"},{"key":"e_1_3_2_1_23_1","volume-title":"Attention bottlenecks for multimodal fusion. NeurIPS","author":"Nagrani Arsha","year":"2021","unstructured":"Arsha Nagrani, Shan Yang, Anurag Arnab, Aren Jansen, Cordelia Schmid, and Chen Sun. 2021. Attention bottlenecks for multimodal fusion. NeurIPS (2021), 14200--14213."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"crossref","unstructured":"Xiaokang Peng Yake Wei Andong Deng Dong Wang and Di Hu. 2022. Balanced multimodal learning via on-the-fly gradient modulation. In CVPR. 8238--8247.","DOI":"10.1109\/CVPR52688.2022.00806"},{"key":"e_1_3_2_1_25_1","volume-title":"Musan: A music, speech, and noise corpus. arXiv preprint arXiv:1510.08484","author":"Snyder David","year":"2015","unstructured":"David Snyder, Guoguo Chen, and Daniel Povey. 2015. Musan: A music, speech, and noise corpus. arXiv preprint arXiv:1510.08484 (2015)."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.5555\/2627435.2670313"},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2023.3274829"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1145\/3423327.3423672"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/3475957.3484456"},{"key":"e_1_3_2_1_30_1","volume-title":"J. Zico Kolter, Louis-Philippe Morency, and Ruslan Salakhutdinov.","author":"Hubert Tsai Yao-Hung","year":"2019","unstructured":"Yao-Hung Hubert Tsai, Shaojie Bai, Paul Pu Liang, J. Zico Kolter, Louis-Philippe Morency, and Ruslan Salakhutdinov. 2019. Multimodal Transformer for Unaligned Multimodal Language Sequences. CoRR, Vol. abs\/1906.00295 (2019). arxiv: 1906.00295 http:\/\/arxiv.org\/abs\/1906.00295"},{"key":"e_1_3_2_1_31_1","unstructured":"Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Llion Jones Aidan N Gomez \u0141ukasz Kaiser and Illia Polosukhin. 2017. Attention is all you need. In NeurIPS. 5998--6008."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"crossref","unstructured":"Weiyao Wang Du Tran and Matt Feiszli. 2020. What makes training multi-modal classification networks hard?. In CVPR. 12695--12705.","DOI":"10.1109\/CVPR42600.2020.01271"},{"key":"e_1_3_2_1_33_1","volume-title":"Wenetspeech: A 10000 hours multi-domain mandarin corpus for speech recognition. In ICASSP. 6182--6186.","author":"Zhang Binbin","year":"2022","unstructured":"Binbin Zhang, Hang Lv, Pengcheng Guo, Qijie Shao, Chao Yang, Lei Xie, Xin Xu, Hui Bu, Xiaoyu Chen, Chenchen Zeng, et al. 2022. Wenetspeech: A 10000 hours multi-domain mandarin corpus for speech recognition. In ICASSP. 6182--6186."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2016.2603342"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3093397"}],"event":{"name":"MM '23: The 31st ACM International Conference on Multimedia","location":"Ottawa ON Canada","acronym":"MM '23","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 31st ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3612872","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3581783.3612872","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:01:01Z","timestamp":1755820861000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3612872"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,26]]},"references-count":35,"alternative-id":["10.1145\/3581783.3612872","10.1145\/3581783"],"URL":"https:\/\/doi.org\/10.1145\/3581783.3612872","relation":{},"subject":[],"published":{"date-parts":[[2023,10,26]]},"assertion":[{"value":"2023-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}