{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T16:04:16Z","timestamp":1772553856217,"version":"3.50.1"},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9052937","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T16:21:13Z","timestamp":1586449273000},"page":"7149-7153","source":"Crossref","is-referenced-by-count":63,"title":["Speech Sentiment Analysis via Pre-Trained Features from End-to-End ASR Models"],"prefix":"10.1109","author":[{"given":"Zhiyun","family":"Lu","sequence":"first","affiliation":[]},{"given":"Liangliang","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Yu","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Chung-Cheng","family":"Chiu","sequence":"additional","affiliation":[]},{"given":"James","family":"Fan","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1207"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462685"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682896"},{"key":"ref13","first-page":"193","article-title":"Exploring architectures, data and units for streaming end-to-end speech recognition with rnn-transducer","author":"rao","year":"2017","journal-title":"Proc of ASRU"},{"key":"ref14","first-page":"4774","article-title":"State-of-theart speech recognition with sequence-to-sequence models","author":"chiu","year":"2018","journal-title":"ICASSP"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682336"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2680"},{"key":"ref17","first-page":"927","article-title":"Switchboard-1 release 2","volume":"926","author":"godfrey","year":"1997","journal-title":"Linguistic Data Consortium Philadelphia"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref19","article-title":"Sequence transduction with recurrent neural networks","author":"graves","year":"2012"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2925934"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683163"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794468"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462677"},{"key":"ref8","doi-asserted-by":"crossref","first-page":"939","DOI":"10.21437\/Interspeech.2017-233","article-title":"A comparison of sequence-to-sequence models for speech recognition","author":"prabhavalkar","year":"2017","journal-title":"InterSpeech"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683077"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1242"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-2466"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682154"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638947"},{"key":"ref22","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref21","article-title":"A structured self-attentive sentence embedding","author":"lin","year":"2017"},{"key":"ref24","doi-asserted-by":"crossref","first-page":"3707","DOI":"10.21437\/Interspeech.2017-1566","article-title":"Neural speech recognizer: Acoustic-to-word lstm model for large vocabulary speech recognition","author":"soltau","year":"2017","journal-title":"InterSpeech"},{"key":"ref23","article-title":"A large scale speech sentiment corpus","author":"chen","year":"2020","journal-title":"Proceedings of the Twelfth International Conference on Language Resources and Evaluation (LREC 2020)"},{"key":"ref25","article-title":"Lingvo: a modular and scalable framework for sequence-to-sequence modeling","author":"shen","year":"2019"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Barcelona, Spain","start":{"date-parts":[[2020,5,4]]},"end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09052937.pdf?arnumber=9052937","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,27]],"date-time":"2022-06-27T20:11:44Z","timestamp":1656360704000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9052937\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9052937","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}