{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,26]],"date-time":"2026-01-26T02:27:43Z","timestamp":1769394463708,"version":"3.49.0"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,6,6]]},"DOI":"10.1109\/icassp39728.2021.9413910","type":"proceedings-article","created":{"date-parts":[[2021,5,13]],"date-time":"2021-05-13T19:53:45Z","timestamp":1620935625000},"page":"6329-6333","source":"Crossref","is-referenced-by-count":38,"title":["Contrastive Unsupervised Learning for Speech Emotion Recognition"],"prefix":"10.1109","author":[{"given":"Mao","family":"Li","sequence":"first","affiliation":[]},{"given":"Bo","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Joshua","family":"Levy","sequence":"additional","affiliation":[]},{"given":"Andreas","family":"Stolcke","sequence":"additional","affiliation":[]},{"given":"Viktor","family":"Rozgic","sequence":"additional","affiliation":[]},{"given":"Spyros","family":"Matsoukas","sequence":"additional","affiliation":[]},{"given":"Constantinos","family":"Papayiannis","sequence":"additional","affiliation":[]},{"given":"Daniel","family":"Bone","sequence":"additional","affiliation":[]},{"given":"Chao","family":"Wang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"ref11","article-title":"Representation learning with contrastive predictive coding","volume":"abs 1807 3748","author":"van den oord","year":"2018","journal-title":"ArXiv"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462685"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854517"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1582"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/S0167-6393(02)00071-7"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2018.2879512"},{"key":"ref17","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv preprint arXiv 1412 6980"},{"key":"ref18","article-title":"On variational bounds of mutual information","author":"poole","year":"2019","journal-title":"ICML"},{"key":"ref19","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2017.2736999"},{"key":"ref3","doi-asserted-by":"crossref","first-page":"335","DOI":"10.1007\/s10579-008-9076-6","article-title":"IEMOCAP: interactive emotional dyadic motion capture database","volume":"42","author":"busso","year":"2008","journal-title":"Language Resources and Evaluation"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2006.1621452"},{"key":"ref5","author":"yu","year":"2016","journal-title":"Automatic Speech Recognition"},{"key":"ref8","article-title":"Language models are few-shot learners","volume":"abs 2005 14165","author":"brown","year":"2020","journal-title":"ArXiv"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ACII.2009.5349500"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3129340"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472669"},{"key":"ref9","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2019","journal-title":"NAACL-HLT"},{"key":"ref20","first-page":"2196","article-title":"Discriminatively trained recurrent neural networks for continuous dimensional emotion recognition from audio","volume":"2016","author":"weninger","year":"2016","journal-title":"IJCAI"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.2307\/2532051"}],"event":{"name":"ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Toronto, ON, Canada","start":{"date-parts":[[2021,6,6]]},"end":{"date-parts":[[2021,6,11]]}},"container-title":["ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9413349\/9413350\/09413910.pdf?arnumber=9413910","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T15:40:48Z","timestamp":1652197248000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9413910\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,6,6]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/icassp39728.2021.9413910","relation":{},"subject":[],"published":{"date-parts":[[2021,6,6]]}}}