{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T01:07:47Z","timestamp":1773277667321,"version":"3.50.1"},"reference-count":30,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2016,3]]},"DOI":"10.1109\/icassp.2016.7472790","type":"proceedings-article","created":{"date-parts":[[2016,6,24]],"date-time":"2016-06-24T01:58:30Z","timestamp":1466733510000},"page":"5805-5809","source":"Crossref","is-referenced-by-count":45,"title":["Cross-corpus acoustic emotion recognition from singing and speaking: A multi-task learning approach"],"prefix":"10.1109","author":[{"given":"Biqiao","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Emily Mower","family":"Provost","sequence":"additional","affiliation":[]},{"given":"Georg","family":"Essi","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2014.141"},{"key":"ref10","first-page":"341","article-title":"Automatic speech classification to five emotional states based on gender information","author":"ververidis","year":"2004","journal-title":"Proc Conf Signal Process"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TSA.2004.838534"},{"key":"ref12","article-title":"Improving automatic emotion recognition from speech via gender differentiation","author":"vogt","year":"2006","journal-title":"Proceedings of the Language Resources and Evaluation Conference"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/T-AFFC.2010.8"},{"key":"ref14","first-page":"353","article-title":"Emotion recognition from speech by combining databases and fusion of classifiers","author":"iulia","year":"2010","journal-title":"Text Speech and Dialogue"},{"key":"ref15","first-page":"1553","article-title":"Using multiple databases for training in emotion recognition: To unite or to vote?","author":"schuller","year":"2011","journal-title":"Proceedings of INTER-SPEECH"},{"key":"ref16","article-title":"Selecting training data for cross-corpus speech emotion recognition: Prototypicality vs. generalization","author":"schuller","year":"2011","journal-title":"Proceedings of the Afeka-AVIOS Speech Processing Conference"},{"key":"ref17","first-page":"2530","article-title":"Speech emotion recognition using transfer learning","volume":"97","author":"peng","year":"2014","journal-title":"IEICE Transactions on Information and Systems"},{"key":"ref18","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v29i1.9334","article-title":"Predicting emotion perception across domains: A study of singing and speaking","author":"zhang","year":"2015","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"key":"ref19","article-title":"Ravdess: The ryerson audio-visual database of emotional speech and song","author":"steven","year":"2012","journal-title":"Annual Meeting of the Canadian Society for Brain Behaviour and Cognitive Science"},{"key":"ref28","first-page":"601","article-title":"Correcting sample selection bias by unlabeled data","author":"jiayuan","year":"2006","journal-title":"Advances in neural information processing systems"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"1057","DOI":"10.1109\/TASL.2010.2076804","article-title":"A framework for automatic human emotion classification using emotion profiles","volume":"19","author":"emily","year":"2011","journal-title":"IEEE Transactions on Audio Speech and Language Processing"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICDMW.2007.109"},{"key":"ref3","doi-asserted-by":"crossref","first-page":"227","DOI":"10.1016\/S0167-6393(02)00084-5","article-title":"Vocal communication of emotion: A review of research paradigms","volume":"40","author":"klaus","year":"2003","journal-title":"Speech Communication"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/2168752.2168754"},{"key":"ref29","doi-asserted-by":"crossref","first-page":"1068","DOI":"10.1109\/LSP.2014.2324759","article-title":"Autoencoder-based unsupervised domain adaptation for speech emotion recognition","volume":"21","author":"deng","year":"2014","journal-title":"Signal Processing Letters"},{"key":"ref5","first-page":"255","article-title":"Music emotion recognition: A state of the art review","author":"youngmoo","year":"2010","journal-title":"International Society for Music Information Retrieval"},{"key":"ref8","article-title":"Acoustic differences in the speaking and singing voice","volume":"19","author":"steven","year":"2013","journal-title":"Proceedings of Meetings on Acoustics"},{"key":"ref7","article-title":"Comparing the acoustic expression of emotion in the speaking and the singing voice","author":"scherer","year":"2013","journal-title":"Computer Speech & Language"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/79.911197"},{"key":"ref9","article-title":"Recog-nizing emotion from singing and speaking using shared models","author":"zhang","year":"2015","journal-title":"Proceedings of Affective Computing and Intelligent Interaction"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-85099-1_7"},{"key":"ref20","article-title":"The interspeech 2013 computational paralinguistics challenge: Social signals, conflict, emotion, autism","author":"schuller","year":"2013","journal-title":"Proceedings of INTER-SPEECH"},{"key":"ref22","article-title":"Multi-task feature learning","volume":"19","author":"argyriou","year":"2007","journal-title":"Advances in neural information processing systems"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/1873951.1874246"},{"key":"ref24","first-page":"521","article-title":"Learning with whom to share in multi-task feature learning","author":"kang","year":"2011","journal-title":"Proceedings of the International Conference on Machine Learning"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-007-5040-8"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2008.927665"},{"key":"ref25","first-page":"1871","article-title":"Liblinear: A library for large linear classification","volume":"9","author":"fan","year":"2008","journal-title":"The Journal of Machine Learning Research"}],"event":{"name":"2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Shanghai","start":{"date-parts":[[2016,3,20]]},"end":{"date-parts":[[2016,3,25]]}},"container-title":["2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7465907\/7471614\/07472790.pdf?arnumber=7472790","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,19]],"date-time":"2023-08-19T00:30:19Z","timestamp":1692405019000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/7472790\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2016,3]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/icassp.2016.7472790","relation":{},"subject":[],"published":{"date-parts":[[2016,3]]}}}