{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T10:56:55Z","timestamp":1730199415041,"version":"3.28.0"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,12,16]],"date-time":"2023-12-16T00:00:00Z","timestamp":1702684800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,12,16]],"date-time":"2023-12-16T00:00:00Z","timestamp":1702684800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,12,16]]},"DOI":"10.1109\/asru57964.2023.10389694","type":"proceedings-article","created":{"date-parts":[[2024,1,19]],"date-time":"2024-01-19T18:38:40Z","timestamp":1705689520000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Boosting Modality Representation With Pre-Trained Models and Multi-Task Training for Multimodal Sentiment Analysis"],"prefix":"10.1109","author":[{"given":"Jiarui","family":"Hai","sequence":"first","affiliation":[{"name":"Johns Hopkins University,Laboratory for Computational Auditory Perception,Baltimore,USA"}]},{"given":"Yu-Jeh","family":"Liu","sequence":"additional","affiliation":[{"name":"Johns Hopkins University,Laboratory for Computational Auditory Perception,Baltimore,USA"}]},{"given":"Mounya","family":"Elhilali","sequence":"additional","affiliation":[{"name":"Johns Hopkins University,Laboratory for Computational Auditory Perception,Baltimore,USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/2070481.2070509"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/T-AFFC.2011.40"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/MIS.2013.9"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3536221.3556630"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.343"},{"key":"ref6","article-title":"Mosi: Multimodal corpus of sentiment intensity and subjectivity analysis in online opinion videos","volume":"abs\/1606.06259","author":"Zadeh","year":"2016","journal-title":"ArXiv"},{"key":"ref7","article-title":"Meld: A multimodal multi-party dataset for emotion recognition in conversations","volume":"abs\/1810.02508","author":"Poria","year":"2018","journal-title":"ArXiv"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1115"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2012.03.001"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2018.07.041"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12024"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3462244.3479919"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1002\/widm.1328"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2019.12.696"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413678"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-63031-7_26"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746536"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3276075"},{"key":"ref19","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv preprint arXiv:1810.04805"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.25080\/Majora-7b98e3ed-003"},{"key":"ref21","first-page":"1459","article-title":"opensmile-the munich versatile and fast open-source audio feature extractor","volume-title":"Proc. ACM Multimedia (MM)","author":"Eyben"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/WACV.2016.7477553"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2018.00019"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10354"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1212"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3276075"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33017216"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1656"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref30","article-title":"A fine-tuned wav2vec 2.0\/hubert benchmark for speech emotion recognition, speaker verification and spoken language understanding","author":"Wang","year":"2021","journal-title":"arXiv preprint arXiv:2111.02735"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475587"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00320"},{"key":"ref33","article-title":"The kinetics human action video dataset","author":"Kay","year":"2017","journal-title":"arXiv preprint arXiv:1705.06950"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-demo.20"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.214"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4899-7687-1_79"}],"event":{"name":"2023 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","start":{"date-parts":[[2023,12,16]]},"location":"Taipei, Taiwan","end":{"date-parts":[[2023,12,20]]}},"container-title":["2023 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10388490\/10389614\/10389694.pdf?arnumber=10389694","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,23]],"date-time":"2024-01-23T16:36:05Z","timestamp":1706027765000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10389694\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,16]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/asru57964.2023.10389694","relation":{},"subject":[],"published":{"date-parts":[[2023,12,16]]}}}