{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T05:04:50Z","timestamp":1774674290563,"version":"3.50.1"},"reference-count":44,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,13]],"date-time":"2021-12-13T00:00:00Z","timestamp":1639353600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,13]],"date-time":"2021-12-13T00:00:00Z","timestamp":1639353600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12,13]]},"DOI":"10.1109\/asru51503.2021.9687874","type":"proceedings-article","created":{"date-parts":[[2022,2,3]],"date-time":"2022-02-03T20:31:00Z","timestamp":1643920260000},"page":"8-15","source":"Crossref","is-referenced-by-count":68,"title":["Efficient Conformer: Progressive Downsampling and Grouped Attention for Automatic Speech Recognition"],"prefix":"10.1109","author":[{"given":"Maxime","family":"Burchi","sequence":"first","affiliation":[{"name":"Orange Labs,Cesson-S&#x00E9;vign&#x00E9;,France"}]},{"given":"Valentin","family":"Vielzeuf","sequence":"additional","affiliation":[{"name":"Orange Labs,Cesson-S&#x00E9;vign&#x00E9;,France"}]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-2012"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053205"},{"key":"ref33","first-page":"933","article-title":"Language modeling with gated convolutional networks","author":"dauphin","year":"0","journal-title":"ICML"},{"key":"ref32","first-page":"4055","article-title":"Image transformer","author":"parmar","year":"0","journal-title":"ICML"},{"key":"ref31","article-title":"Efficient conformer with prob-sparse attention mechanism for end-to-endspeech recognition","author":"wang","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref30","first-page":"3531","article-title":"Efficient attention: Attention with linear complexities","author":"shen","year":"0","journal-title":"WACV"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1285"},{"key":"ref35","article-title":"Sequence transduction with recurrent neural networks","author":"graves","year":"2012","journal-title":"ArXiv Preprint"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref10","first-page":"6105","article-title":"Efficientnet: Rethinking model scaling for convolutional neural networks","author":"tan","year":"0","journal-title":"ICML"},{"key":"ref40","first-page":"8024","article-title":"Pytorch: An imperative style, high-performance deep learning library","author":"paszke","year":"0","journal-title":"NeurIPS"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638947"},{"key":"ref12","article-title":"Deep speech: Scaling up end-to-end speech recognition","author":"hannun","year":"2014","journal-title":"ArXiv Preprint"},{"key":"ref13","first-page":"4960","article-title":"Listen, attend and spell","author":"chan","year":"0","journal-title":"ICASSP"},{"key":"ref14","first-page":"193","article-title":"Exploring architectures, data and units for streaming end-to-end speech recognition with rnn-transducer","author":"rao","year":"0","journal-title":"ASRU"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682336"},{"key":"ref16","article-title":"Wav2letter: an end-to-end convnet-based speech recognition system","author":"collobert","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1819"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2059"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462506"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref4","article-title":"An overview of neural network compression","author":"o'neill","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01625"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/3-540-45105-6_93"},{"key":"ref29","article-title":"Efficient conformer-based speech recognition with linear attention","author":"li","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016292"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/1150402.1150464"},{"key":"ref7","article-title":"High-performance hardware for machine learning","volume":"2","author":"dally","year":"0","journal-title":"NeurIPS tutorial"},{"key":"ref2","article-title":"Citrinet: Closing the gap between non-autoregressive and autoregressive end-to-end models for automatic speech recognition","author":"majumdar","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2013-552"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053889"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003750"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414858"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053896"},{"key":"ref42","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"0","journal-title":"NeurIPS"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref41","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"0","journal-title":"ICLRE"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00745"},{"key":"ref44","first-page":"187","article-title":"Kenlm: Faster and smaller language model queries","author":"heafield","year":"0","journal-title":"Proceedings of the Sixth Workshop on Statistical Machine Translation"},{"key":"ref26","article-title":"Stand-alone self-attention in vision models","author":"ramachandran","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref43","doi-asserted-by":"crossref","first-page":"1424","DOI":"10.1109\/72.548170","article-title":"An analysis of noise in recurrent neural networks: convergence and generalization","volume":"7","author":"jim","year":"1996","journal-title":"IEEE Transactions on Neural Networks"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00338"}],"event":{"name":"2021 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Cartagena, Colombia","start":{"date-parts":[[2021,12,13]]},"end":{"date-parts":[[2021,12,17]]}},"container-title":["2021 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9687821\/9687855\/09687874.pdf?arnumber=9687874","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T20:41:30Z","timestamp":1652733690000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9687874\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,13]]},"references-count":44,"URL":"https:\/\/doi.org\/10.1109\/asru51503.2021.9687874","relation":{},"subject":[],"published":{"date-parts":[[2021,12,13]]}}}