{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,8]],"date-time":"2024-09-08T13:09:54Z","timestamp":1725800994711},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9053163","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T16:21:13Z","timestamp":1586449273000},"page":"7999-8003","source":"Crossref","is-referenced-by-count":15,"title":["Large-Scale Unsupervised Pre-Training for End-to-End Spoken Language Understanding"],"prefix":"10.1109","author":[{"given":"Pengwei","family":"Wang","sequence":"first","affiliation":[]},{"given":"Liangchen","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Yong","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Jinghui","family":"Xie","sequence":"additional","affiliation":[]},{"given":"Zaiqing","family":"Nie","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"crossref","DOI":"10.1109\/SLT.2018.8639043","article-title":"From audio to semantics: Approaches to end-to-end spoken language understanding","author":"haghani","year":"2018"},{"key":"ref11","article-title":"Towards end-to-end spoken language understanding","author":"serdyuk","year":"2019","journal-title":"InterSpeech"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2396"},{"key":"ref13","article-title":"Exploring asr-free end-to-end modeling to improve spoken language understanding in a cloud-based dialog system","author":"qian","year":"2017","journal-title":"ASRU"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1873"},{"key":"ref15","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2019","journal-title":"NAACL-HLT"},{"article-title":"Xlnet: Generalized autoregressive pretraining for language understanding","year":"2019","author":"yang","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1202"},{"article-title":"Ernie 2.0: A continual pre-training framework for language understanding","year":"2018","author":"sun","key":"ref18"},{"article-title":"Improving language understanding by generative pre-training","year":"2018","author":"radford","key":"ref19"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33014959"},{"key":"ref3","article-title":"Recurrent neural networks for language understanding","author":"yao","year":"2014","journal-title":"InterSpeech"},{"article-title":"Snips voice platform: an embedded spoken language understanding system for private-by-design voice interfaces","year":"2018","author":"coucke","key":"ref6"},{"key":"ref5","article-title":"Improving slot filling in spoken language understanding with joint pointer and attention","author":"zhao","year":"2019","journal-title":"ACL"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2014.2383614"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/S0167-6393(97)00040-X"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6853573"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461718"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2012.6289054"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639585"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461404"},{"article-title":"Interpretable convolutional filters with sincnet","year":"2018","author":"ravanelli","key":"ref21"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref23","article-title":"A structured self-attentive sentence embedding","author":"lin","year":"2017","journal-title":"ICLRE"},{"key":"ref25","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2017-950","article-title":"Voxceleb: a large-scale speaker identification dataset","author":"nagrani","year":"2017"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2020,5,4]]},"location":"Barcelona, Spain","end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09053163.pdf?arnumber=9053163","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,27]],"date-time":"2022-06-27T20:21:36Z","timestamp":1656361296000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9053163\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9053163","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}