{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T10:55:20Z","timestamp":1775040920163,"version":"3.50.1"},"reference-count":46,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,9]]},"DOI":"10.1109\/acii.2019.8925513","type":"proceedings-article","created":{"date-parts":[[2019,12,27]],"date-time":"2019-12-27T08:44:34Z","timestamp":1577436274000},"page":"732-737","source":"Crossref","is-referenced-by-count":48,"title":["Unsupervised Adversarial Domain Adaptation for Cross-Lingual Speech Emotion Recognition"],"prefix":"10.1109","author":[{"given":"Siddique","family":"Latif","sequence":"first","affiliation":[]},{"given":"Junaid","family":"Qadir","sequence":"additional","affiliation":[]},{"given":"Muhammad","family":"Bilal","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICET.2017.8281753"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ACII.2013.90"},{"key":"ref33","author":"gideon","year":"2019","journal-title":"Barking up the right tree Improving cross-corpus speech emotion recognition with adversarial discriminative domain generalization (addog)"},{"key":"ref32","author":"zhou","year":"2018","journal-title":"Transferable positive\/negative speech emotion recognition via class-wise adversarial domain adaptation"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.316"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.18"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/FIT.2018.00023"},{"key":"ref36","first-page":"3501","article-title":"Emovo corpus: an italian emotional speech database","author":"costantini","year":"2014","journal-title":"LREC"},{"key":"ref35","article-title":"Surrey audio-visual expressed emotion(savee)database","author":"jackson","year":"2014","journal-title":"University of Surrey"},{"key":"ref34","first-page":"1517","article-title":"A database of german emotional speech","volume":"5","author":"burkhardt","year":"2005","journal-title":"InterSpeech"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683299"},{"key":"ref40","article-title":"Darpa timit acoustic-phonetic continous speech corpus cd-rom. nist speech disc 1-1.1","volume":"93","author":"garofolo","year":"1993","journal-title":"NASA STI\/Recon Technical Report N"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472789"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2017.2672753"},{"key":"ref13","first-page":"2672","article-title":"Generative adversarial nets","volume":"2014","author":"goodfellow","year":"0","journal-title":"Advances in neural information processing systems"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461423"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-879"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2016.11.063"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461932"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2018.2867099"},{"key":"ref19","author":"tu","year":"2019","journal-title":"Towards adversarial learning of speaker-invariant representation for speech emotion recognition"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2014.2324759"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1568"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2013.2255278"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1111\/ecc.13033"},{"key":"ref6","author":"rana","year":"2019","journal-title":"Multi-task semi-supervised adversarial autoencoding for speech emotion"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2017.2672753"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-3252"},{"key":"ref8","author":"latif","year":"2018","journal-title":"Cross corpus speech emotion classification-an effective transfer learning technique"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/T-AFFC.2010.8"},{"key":"ref2","doi-asserted-by":"crossref","DOI":"10.3390\/fi9040093","article-title":"How 5g wireless (and concomitant technologies)will revolutionize healthcare?","volume":"9","author":"latif","year":"2017","journal-title":"Future Internet"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1625"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2017.2710800"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/T-AFFC.2010.8"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2015.2503757"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2012.03.001"},{"key":"ref22","first-page":"77","article-title":"Cross-corpus classification of realistic emotions-some pilot experiments","author":"eyben","year":"2010","journal-title":"Proc LREC Workshop Emotion Corpora"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICSDA.2014.7051419"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1145\/1873951.1874246"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462162"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2015.2457417"},{"key":"ref23","first-page":"58","article-title":"A cross-corpus experiment in speech emotion recognition","author":"parlak","year":"2014","journal-title":"SLAM INTERSPEECH"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638345"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2011.6163986"},{"key":"ref43","first-page":"26","article-title":"Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude","volume":"4","author":"tieleman","year":"2012","journal-title":"COURSERA Neural Networks for Machine Learning"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2019.04.004"}],"event":{"name":"2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)","location":"Cambridge, United Kingdom","start":{"date-parts":[[2019,9,3]]},"end":{"date-parts":[[2019,9,6]]}},"container-title":["2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8911251\/8925431\/08925513.pdf?arnumber=8925513","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,19]],"date-time":"2022-07-19T16:26:36Z","timestamp":1658247996000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8925513\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,9]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/acii.2019.8925513","relation":{},"subject":[],"published":{"date-parts":[[2019,9]]}}}