{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,4]],"date-time":"2026-03-04T05:27:24Z","timestamp":1772602044913,"version":"3.50.1"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,5]]},"DOI":"10.1109\/icassp.2019.8683483","type":"proceedings-article","created":{"date-parts":[[2019,4,17]],"date-time":"2019-04-17T16:01:56Z","timestamp":1555516916000},"page":"2822-2826","source":"Crossref","is-referenced-by-count":87,"title":["Speech Emotion Recognition Using Multi-hop Attention Mechanism"],"prefix":"10.1109","author":[{"given":"Seunghyun","family":"Yoon","sequence":"first","affiliation":[{"name":"Department of Electrical and Computer Engineering, Seoul National University, Seoul, Korea"}]},{"given":"Seokhyun","family":"Byun","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, Seoul National University, Seoul, Korea"}]},{"given":"Subhadeep","family":"Dey","sequence":"additional","affiliation":[{"name":"Idiap Research Institute, Martigny, Switzerland"}]},{"given":"Kyomin","family":"Jung","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, Seoul National University, Seoul, Korea"}]}],"member":"263","reference":[{"key":"ref10","first-page":"i","article-title":"Hidden markov model-based speech emotion recognition","volume":"1","author":"schuller","year":"2003","journal-title":"Multimedia and Expo 2003 ICME&#x2019;03 Proceedings 2003 International Conference on IEEE"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2011.06.004"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953131"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/PlatCon.2017.7883728"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952655"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-200"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1242"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952552"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-2466"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472176"},{"key":"ref4","article-title":"Speech emotion recognition using deep neural network and extreme learning machine","author":"han","year":"2014","journal-title":"Fifteenth Annual Conference of the International Speech Communication Association"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-08491-6_5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178872"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2004.1326051"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref7","article-title":"Multi-modal speech emotion recognition using audio and text","author":"yoon","year":"2018"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1093\/acprof:oso\/9780195387643.003.0008"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/KST.2013.6512793"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/S1071-5819(03)00052-1"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D16-1093"},{"key":"ref22","article-title":"The kaldi speech recognition toolkit","author":"povey","year":"2011","journal-title":"Tech Rep"},{"key":"ref21","article-title":"Multi-modal emotion recognition on iemocap dataset using deep learning","author":"tripathi","year":"2018"},{"key":"ref24","article-title":"Cloud speech-to-text","year":"2018"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/2502081.2502224"},{"key":"ref26","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014"},{"key":"ref25","doi-asserted-by":"crossref","first-page":"1263","DOI":"10.21437\/Interspeech.2017-917","article-title":"Attentive convolutional neural network based speech emotion recognition: A study on the impact of input features, signal length, and acted speech","author":"neumann","year":"2017","journal-title":"Proc Interspeech 2017"}],"event":{"name":"ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Brighton, UK","start":{"date-parts":[[2019,5,12]]},"end":{"date-parts":[[2019,5,17]]}},"container-title":["ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8671773\/8682151\/08683483.pdf?arnumber=8683483","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T00:37:51Z","timestamp":1755909471000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8683483\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,5]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/icassp.2019.8683483","relation":{},"subject":[],"published":{"date-parts":[[2019,5]]}}}