{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T16:18:43Z","timestamp":1772554723996,"version":"3.50.1"},"reference-count":25,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018,4]]},"DOI":"10.1109\/icassp.2018.8462011","type":"proceedings-article","created":{"date-parts":[[2018,9,21]],"date-time":"2018-09-21T22:24:48Z","timestamp":1537568688000},"page":"5104-5108","source":"Crossref","is-referenced-by-count":20,"title":["Human-Like Emotion Recognition: Multi-Label Learning from Noisy Labeled Audio-Visual Expressive Speech"],"prefix":"10.1109","author":[{"given":"Yelin","family":"Kim","sequence":"first","affiliation":[]},{"given":"Jeesun","family":"Kim","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2010.09.020"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2011.5771357"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/T-AFFC.2012.16"},{"key":"ref13","first-page":"46","article-title":"Visual vs. auditory emotion information: how language and culture affect our bias towards the different modalities","author":"chong","year":"2015","journal-title":"AVSPN"},{"key":"ref14","article-title":"Auditory, visual, and auditory-visual spoken emotion recognition in young and old adults","author":"simonetti","year":"2015","journal-title":"Proceedings of the 18th International Congress of Phonetic Sciences (ICPhS 2015) 10-14 August 2015"},{"key":"ref15","article-title":"Exploring acoustic differences between Cantonese (tonal) and English (non-tonal) spoken expressions of emotions","author":"chong","year":"2015","journal-title":"Sixteenth Annual Conference of the International Speech Communication Association"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2005.03.007"},{"key":"ref17","first-page":"37","article-title":"The sound of disgust: How facial expression may influence speech production","author":"chong","year":"2016","journal-title":"TERSPEECH"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ACII.2009.5349500"},{"key":"ref19","article-title":"Selecting training data for cross-corpus speech emotion recognition: Prototypicality vs. generalization","author":"schuller","year":"2011","journal-title":"Proc 2011 Afeka-AVIOS Speech Processing Conference Tel Aviv Israel Citeseer"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"1057","DOI":"10.1109\/TASL.2010.2076804","article-title":"A framework for automatic human emotion classification using emotion profiles","volume":"19","author":"emily","year":"2011","journal-title":"IEEE Transactions on Audio Speech and Language Processing"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ISCAS.2014.6865245"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2016.7727250"},{"key":"ref5","first-page":"415","article-title":"Formulating emotion perception as a probabilistic model with application to categorical emotion classificatio","author":"lotfian","year":"2017","journal-title":"Affective Computing and Intelligent Interaction (ACII)"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2012.02.005"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.patrec.2014.11.007"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2012.2236291"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/2661806.2661807"},{"key":"ref20","first-page":"3687","article-title":"Deep learning for robust feature generation in audio-visual emotion recognition","author":"kim","year":"2013","journal-title":"IEEE International Conference on Acoustics Speech and Signal Processing (ICASSP)"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2010.5494890"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123383"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCB.2011.2168604"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2014.10.001"},{"key":"ref25","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2017-94","article-title":"Discretized continuous speech emotion recognition with multi-task deep recurrent neural network","author":"le","year":"2017","journal-title":"terspeech 2017"}],"event":{"name":"ICASSP 2018 - 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Calgary, AB","start":{"date-parts":[[2018,4,15]]},"end":{"date-parts":[[2018,4,20]]}},"container-title":["2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8450881\/8461260\/08462011.pdf?arnumber=8462011","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2020,8,24]],"date-time":"2020-08-24T04:27:19Z","timestamp":1598243239000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8462011\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,4]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/icassp.2018.8462011","relation":{},"subject":[],"published":{"date-parts":[[2018,4]]}}}