{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T23:54:06Z","timestamp":1775001246166,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":41,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,10,26]],"date-time":"2023-10-26T00:00:00Z","timestamp":1698278400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"the Science and Technology Innovation Committee of Shenzhen Municipalit Foundation","award":["No.JCYJ20210324132203007"],"award-info":[{"award-number":["No.JCYJ20210324132203007"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,10,26]]},"DOI":"10.1145\/3581783.3612295","type":"proceedings-article","created":{"date-parts":[[2023,10,27]],"date-time":"2023-10-27T07:27:30Z","timestamp":1698391650000},"page":"426-434","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":17,"title":["Cross-modality Representation Interactive Learning for Multimodal Sentiment Analysis"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-9429-4221","authenticated-orcid":false,"given":"Jian","family":"Huang","sequence":"first","affiliation":[{"name":"University of Electronic Science and Technology of China, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9122-6141","authenticated-orcid":false,"given":"Yanli","family":"Ji","sequence":"additional","affiliation":[{"name":"Shenzhen Institute for Advanced Study &amp; UESTC, Chengdu, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5070-4511","authenticated-orcid":false,"given":"Yang","family":"Yang","sequence":"additional","affiliation":[{"name":"UESTC, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2999-2088","authenticated-orcid":false,"given":"Heng Tao","family":"Shen","sequence":"additional","affiliation":[{"name":"UESTC &amp; Peng Cheng Laboratory, Chengdu, China"}]}],"member":"320","published-online":{"date-parts":[[2023,10,27]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2711011"},{"key":"e_1_3_2_2_2_1","volume-title":"Tadas Baltruvsaitis, Amir Zadeh, and Louis-Philippe Morency.","author":"Chen Minghai","year":"2017","unstructured":"Minghai Chen, Sen Wang, Paul Pu Liang, Tadas Baltruvsaitis, Amir Zadeh, and Louis-Philippe Morency. 2017. Multimodal sentiment analysis with word-level fusion and reinforcement learning. In ICMI."},{"key":"e_1_3_2_2_3_1","volume-title":"Multimodal Affective Computing with Dense Fusion Transformer for Inter- and Intra-modality Interactions","author":"Deng Huan","unstructured":"Huan Deng, Zhenguo Yang, Tianyong Hao, Qing Li, and Wenyin Liu. 2022. Multimodal Affective Computing with Dense Fusion Transformer for Inter- and Intra-modality Interactions. IEEE Transactions on Multimedia, Vol. Early Access (2022)."},{"key":"e_1_3_2_2_4_1","volume-title":"Leung","author":"Deng James J.","year":"2021","unstructured":"James J. Deng and Clement H. C. Leung. 2021. Towards Learning a Joint Representation from Transformer in Multimodal Emotion Recognition. In BI."},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.1145\/2682899"},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"crossref","unstructured":"Wei Han Hui Chen Alexander F. Gelbukh Amir Zadeh Louis-Philippe Morency and Soujanya Poria. 2021b. Bi-bimodal modality fusion for correlation-controlled multimodal sentiment analysis. In ICMI.","DOI":"10.1145\/3462244.3479919"},{"key":"e_1_3_2_2_7_1","doi-asserted-by":"crossref","unstructured":"Wei Han Hui Chen and Soujanya Poria. 2021a. Improving Multimodal Fusion with Hierarchical Mutual Information Maximization for Multimodal Sentiment Analysis. In EMNLP.","DOI":"10.18653\/v1\/2021.emnlp-main.723"},{"key":"e_1_3_2_2_8_1","doi-asserted-by":"crossref","unstructured":"M. Hasan Wasifur Rahman Amir Zadeh Jianyuan Zhong Md. Iftekhar Tanveer Louis-Philippe Morency and Ehsan Hoque. 2019. UR-FUNNY: A Multimodal Language Dataset for Understanding Humor. In EMNLP.","DOI":"10.18653\/v1\/D19-1211"},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413678"},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475583"},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"crossref","unstructured":"Yan Ling Jianfei Yu and Rui Xia. 2022. Vision-Language Pre-Training for Multimodal Aspect-Based Sentiment Analysis. In ACL.","DOI":"10.18653\/v1\/2022.acl-long.152"},{"key":"e_1_3_2_2_12_1","volume-title":"Paul Pu Liang, Amir Zadeh, and Louis-Philippe Morency.","author":"Liu Zhun","year":"2018","unstructured":"Zhun Liu, Ying Shen, Varun Bharadhwaj Lakshminarasimhan, Paul Pu Liang, Amir Zadeh, and Louis-Philippe Morency. 2018a. Efficient Low-rank Multimodal Fusion With Modality-Specific Factors. In ACL."},{"key":"e_1_3_2_2_13_1","volume-title":"Paul Pu Liang, Amir Zadeh, and Louis-Philippe Morency.","author":"Liu Zhun","year":"2018","unstructured":"Zhun Liu, Ying Shen, Varun Bharadhwaj Lakshminarasimhan, Paul Pu Liang, Amir Zadeh, and Louis-Philippe Morency. 2018b. Efficient Low-rank Multimodal Fusion With Modality-Specific Factors. In ACL."},{"key":"e_1_3_2_2_14_1","unstructured":"Fengmao Lv Xiang Chen Yanyong Huang Lixin Duan and Guosheng Lin. 2021. Progressive Modality Reinforcement for Human Multimodal Emotion Recognition from Unaligned Multimodal Sequences. In CVPR."},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"crossref","unstructured":"Sijie Mai Haifeng Hu and Songlong Xing. 2020. Modality to modality translation: An adversarial representation learning and graph fusion network for multimodal fusion. In AAAI.","DOI":"10.1609\/aaai.v34i01.5347"},{"key":"e_1_3_2_2_16_1","unstructured":"Huisheng Mao Ziqi Yuan Hua Xu Wenmeng Yu Yihe Liu and Kai Gao. 2022. M-SENA: An Integrated Platform for Multimodal Sentiment Analysis. In ACL."},{"key":"e_1_3_2_2_17_1","first-page":"1","article-title":"CM-GANs","volume":"15","author":"Peng Yuxin","year":"2017","unstructured":"Yuxin Peng, Jinwei Qi, and Yuxin Yuan. 2017. CM-GANs. ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM), Vol. 15 (2017), 1--24.","journal-title":"ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM)"},{"key":"e_1_3_2_2_18_1","volume-title":"Adriano M. Pereira, Fabr\u00edcio Benevenuto, and Daniel Hasan Dalip.","author":"Pereira Mois\u00e9s H. R.","year":"2016","unstructured":"Mois\u00e9s H. R. Pereira, Fl\u00e1vio Luis Cardeal P\u00e1dua, Adriano M. Pereira, Fabr\u00edcio Benevenuto, and Daniel Hasan Dalip. 2016. Fusing Audio, Textual, and Visual Features for Sentiment Analysis of News Videos. In ICWSM."},{"key":"e_1_3_2_2_19_1","volume-title":"Thomas Manzini, Louis-Philippe Morency, and Barnab\u00e1s P\u00f3czos.","author":"Pham Hai","year":"2019","unstructured":"Hai Pham, Paul Pu Liang, Thomas Manzini, Louis-Philippe Morency, and Barnab\u00e1s P\u00f3czos. 2019. Found in translation: Learning robust joint representations by cyclic translations between modalities. In AAAI."},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"crossref","unstructured":"Soujanya Poria E. Cambria and Alexander Gelbukh. 2015. Deep Convolutional Neural Network Textual Features and Multiple Kernel Learning for Utterance-level Multimodal Sentiment Analysis. In EMNLP.","DOI":"10.18653\/v1\/D15-1303"},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"crossref","unstructured":"Soujanya Poria Iti Chaturvedi E. Cambria and Amir Hussain. 2016. Convolutional MKL Based Multimodal Emotion Recognition and Sentiment Analysis. In ICDM.","DOI":"10.1109\/ICDM.2016.0055"},{"key":"e_1_3_2_2_22_1","volume-title":"Ensemble of svm trees for multimodal emotion recognition","author":"Rozgi\u0107 Viktor","unstructured":"Viktor Rozgi\u0107, Sankaranarayanan Ananthakrishnan, Shirin Saleem, Rohit Kumar, and Rohit Prasad. 2012. Ensemble of svm trees for multimodal emotion recognition. In APSIPA. IEEE."},{"key":"e_1_3_2_2_23_1","doi-asserted-by":"crossref","unstructured":"Ekaterina Shutova Douwe Kiela and Jean Maillard. 2016. Black Holes and White Rabbits: Metaphor Identification with Visual Features. In NAACL.","DOI":"10.18653\/v1\/N16-1020"},{"key":"e_1_3_2_2_24_1","volume-title":"NFCMF: Noise Filtering and CrossModal Fusion for Multimodal Sentiment Analysis. 2021 International Conference on Asian Language Processing (IALP), 316--321","author":"Su Guixin","year":"2021","unstructured":"Guixin Su, Junyi He, Xia Li, Meixiu Lu, and Hanqun Yang. 2021. NFCMF: Noise Filtering and CrossModal Fusion for Multimodal Sentiment Analysis. 2021 International Conference on Asian Language Processing (IALP), 316--321."},{"key":"e_1_3_2_2_25_1","volume-title":"William A. Sethares, and Yingyu Liang.","author":"Sun Zhongkai","year":"2019","unstructured":"Zhongkai Sun, Prathusha Kameswara Sarma, William A. Sethares, and Yingyu Liang. 2019. Learning Relationships between Text, Audio, and Video via Deep Canonical Correlation for Multimodal Language Analysis. In AAAI."},{"key":"e_1_3_2_2_26_1","volume-title":"CTFN: Hierarchical Learning for Multimodal Sentiment Analysis Using Coupled-Translation Fusion Network. In ACL.","author":"Tang Jiajia","year":"2021","unstructured":"Jiajia Tang, Kang Li, Xuanyu Jin, Andrzej Cichocki, Qibin Zhao, and Wanzeng Kong. 2021. CTFN: Hierarchical Learning for Multimodal Sentiment Analysis Using Coupled-Translation Fusion Network. In ACL."},{"key":"e_1_3_2_2_27_1","volume-title":"J. Zico Kolter, Louis-Philippe Morency, and Ruslan Salakhutdinov.","author":"Hubert Tsai Yao-Hung","year":"2019","unstructured":"Yao-Hung Hubert Tsai, Shaojie Bai, Paul Pu Liang, J. Zico Kolter, Louis-Philippe Morency, and Ruslan Salakhutdinov. 2019. Multimodal Transformer for Unaligned Multimodal Language Sequences. ACL."},{"key":"e_1_3_2_2_28_1","article-title":"Visualizing data using t-SNE","volume":"9","author":"der Maaten Laurens Van","year":"2008","unstructured":"Laurens Van der Maaten and Geoffrey Hinton. 2008. Visualizing data using t-SNE. Journal of machine learning research, Vol. 9, 11 (2008).","journal-title":"Journal of machine learning research"},{"key":"e_1_3_2_2_29_1","volume-title":"Attention is all you need. Advances in neural information processing systems","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, Vol. 30 (2017)."},{"key":"e_1_3_2_2_30_1","volume-title":"Transmodality: An end2end fusion method with transformer for multimodal sentiment analysis. In WWW.","author":"Wang Zilong","year":"2020","unstructured":"Zilong Wang, Zhaohong Wan, and Xiaojun Wan. 2020. Transmodality: An end2end fusion method with transformer for multimodal sentiment analysis. In WWW."},{"key":"e_1_3_2_2_31_1","unstructured":"Yang Wu Zijie Lin Yanyan Zhao Bing Qin and Li-Nan Zhu. 2021. A text-centered shared-private framework via cross-modal prediction for multimodal sentiment analysis. In ACL."},{"key":"e_1_3_2_2_32_1","unstructured":"Yang Wu Yanyan Zhao Hao Yang Songmin Chen Bing Qin Xiaohuan Cao and Wenting Zhao. 2022. Sentiment Word Aware Multimodal Refinement for Multimodal Sentiment Analysis with ASR Errors. In ACL."},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2021.09.041"},{"key":"e_1_3_2_2_34_1","doi-asserted-by":"crossref","unstructured":"Xiaocui Yang Shi Feng Yifei Zhang and Daling Wang. 2021. Multimodal Sentiment Detection Based on Multi-channel Graph Neural Networks. In ACL.","DOI":"10.18653\/v1\/2021.acl-long.28"},{"key":"e_1_3_2_2_35_1","unstructured":"Jianfei Yu Luis Marujo Jing Jiang Pradeep Karuturi and William Brendel. 2018. Improving multi-label emotion classification via sentiment classification with dual attention transfer network. In ACL."},{"key":"e_1_3_2_2_36_1","unstructured":"Wenmeng Yu Hua Xu Ziqi Yuan and Jiele Wu. 2021. Learning modality-specific representations with self-supervised multi-task learning for multimodal sentiment analysis. In AAAI."},{"key":"e_1_3_2_2_37_1","doi-asserted-by":"crossref","unstructured":"Amir Zadeh Minghai Chen Soujanya Poria E. Cambria and Louis-Philippe Morency. 2017. Tensor Fusion Network for Multimodal Sentiment Analysis. In EMNLP.","DOI":"10.18653\/v1\/D17-1115"},{"key":"e_1_3_2_2_38_1","volume-title":"Soujanya Poria, E. Cambria, and Louis-Philippe Morency.","author":"Zadeh Amir","year":"2018","unstructured":"Amir Zadeh, Paul Pu Liang, Soujanya Poria, E. Cambria, and Louis-Philippe Morency. 2018. Multimodal language analysis in the wild: Cmu-mosei dataset and interpretable dynamic fusion graph. In ACL."},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.1109\/MIS.2016.94"},{"key":"e_1_3_2_2_40_1","volume-title":"Thomas Natschl\u00e4ger, and Susanne Saminger-Platz.","author":"Zellinger Werner","year":"2017","unstructured":"Werner Zellinger, Thomas Grubinger, Edwin David Lughofer, Thomas Natschl\u00e4ger, and Susanne Saminger-Platz. 2017. Central Moment Discrepancy (CMD) for Domain-Invariant Representation Learning. In ICLR."},{"key":"e_1_3_2_2_41_1","volume-title":"Multimodal Emotion Classification with Multi-level Semantic Reasoning Network","author":"Zhu Tong","unstructured":"Tong Zhu, Leida Li, Jufeng Yang, Sicheng Zhao, and Xiao Xiao. 2022. Multimodal Emotion Classification with Multi-level Semantic Reasoning Network. IEEE Transactions on Multimedia, Vol. Early Access (2022)."}],"event":{"name":"MM '23: The 31st ACM International Conference on Multimedia","location":"Ottawa ON Canada","acronym":"MM '23","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 31st ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3612295","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3581783.3612295","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:02:15Z","timestamp":1755820935000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3612295"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,26]]},"references-count":41,"alternative-id":["10.1145\/3581783.3612295","10.1145\/3581783"],"URL":"https:\/\/doi.org\/10.1145\/3581783.3612295","relation":{},"subject":[],"published":{"date-parts":[[2023,10,26]]},"assertion":[{"value":"2023-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}