{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T17:08:49Z","timestamp":1773248929568,"version":"3.50.1"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100016311","name":"Arm","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100016311","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9747278","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"4698-4702","source":"Crossref","is-referenced-by-count":38,"title":["A Pre-Trained Audio-Visual Transformer for Emotion Recognition"],"prefix":"10.1109","author":[{"given":"Minh","family":"Tran","sequence":"first","affiliation":[{"name":"Institute for Creative Technologies University of Southern California,Los Angeles,USA"}]},{"given":"Mohammad","family":"Soleymani","sequence":"additional","affiliation":[{"name":"Institute for Creative Technologies University of Southern California,Los Angeles,USA"}]}],"member":"263","reference":[{"key":"ref10","article-title":"A short note on the kinetics-700 human action dataset","author":"carreira","year":"2019","journal-title":"arXiv preprint arXiv 1907 09509"},{"key":"ref11","first-page":"776","article-title":"Audio set: An ontology and human-labeled dataset for audio events","author":"gemmeke","year":"2017","journal-title":"ICASSP"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2014.2336244"},{"key":"ref13","article-title":"Msp-improv: An acted corpus of dyadic interactions to study emotion perception","author":"busso","year":"2016","journal-title":"IEEE Trans Affective Computing"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1656"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3095662"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383575"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2018.00019"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP40778.2020.9191019"},{"key":"ref4","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"NeurIPS"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TSP49548.2020.9163474"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054458"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00877"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052916"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.7005"},{"key":"ref8","first-page":"13","article-title":"Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume":"32","author":"lu","year":"2019","journal-title":"NeurIPS"},{"key":"ref7","article-title":"Lxmert: Learning crossmodality encoder representations from transformers","author":"tan","year":"2019","journal-title":"arXiv preprint arXiv 1908 07463"},{"key":"ref2","first-page":"4171","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2019","journal-title":"NAACL"},{"key":"ref9","article-title":"Parameter efficient multimodal transformers for video representation learning","author":"lee","year":"2020","journal-title":"ICLRE"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"ref20","article-title":"Towards learning a universal nonsemantic representation of speech","author":"shor","year":"2020","journal-title":"arXiv preprint arXiv 2002 12271"},{"key":"ref22","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv preprint arXiv 1412 6980"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1115"},{"key":"ref24","first-page":"325","article-title":"Deep multilayer perceptrons for dimensional speech emotion recognition","author":"atmaja","year":"2020","journal-title":"APSIPA ASC"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2037"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/MMUL.2019.2960219"},{"key":"ref25","doi-asserted-by":"crossref","first-page":"1103","DOI":"10.21437\/Interspeech.2017-1494","article-title":"Jointly predicting arousal, valence and dominance with multi-task learning","volume":"2017","author":"parthasarathy","year":"2017","journal-title":"InterSpeech"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Singapore, Singapore","start":{"date-parts":[[2022,5,23]]},"end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09747278.pdf?arnumber=9747278","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,15]],"date-time":"2022-08-15T20:07:09Z","timestamp":1660594029000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9747278\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9747278","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}