{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T14:25:20Z","timestamp":1766067920097},"reference-count":28,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10447827","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"11901-11905","source":"Crossref","is-referenced-by-count":5,"title":["Folding Attention: Memory and Power Optimization for On-Device Transformer-Based Streaming Speech Recognition"],"prefix":"10.1109","author":[{"given":"Yang","family":"Li","sequence":"first","affiliation":[{"name":"Meta AI"}]},{"given":"Liangzhen","family":"Lai","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Yuan","family":"Shangguan","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Forrest N.","family":"Iandola","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Zhaoheng","family":"Ni","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Ernie","family":"Chang","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Yangyang","family":"Shi","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Vikas","family":"Chandra","sequence":"additional","affiliation":[{"name":"Meta AI"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Attention Is All You Need","author":"Vaswani","year":"2017","journal-title":"NeurIPS"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462506"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003750"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1910"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1107"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1292"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414560"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1995"},{"article-title":"Transformer-Transducer: End-to-End Speech Recognition with Self-Attention","year":"2019","author":"Yeh","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682539"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462497"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054345"},{"article-title":"Generating Long Sequences with Sparse Transformers","year":"2019","author":"Child","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.232"},{"article-title":"Linformer: Self-Attention with Linear Complexity","year":"2020","author":"Wang","key":"ref16"},{"article-title":"Retentive Network: A Successor to Transformer for Large Language Models","year":"2023","author":"Sun","key":"ref17"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682954"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054476"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3316781.3317874"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ISSCC.2018.8310262"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095845"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"article-title":"Fast Transformer Decoding: One Write-Head Is All You Need","year":"2019","author":"Shazeer","key":"ref24"},{"key":"ref25","article-title":"Boosting the Throughput and Accelerator Utilization of Specialized CNN Inference beyond Increasing Batch Size","author":"Kosaian","year":"2021","journal-title":"ICML"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.195"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-2012"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-711"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2024,4,14]]},"location":"Seoul, Korea, Republic of","end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10447827.pdf?arnumber=10447827","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T06:14:34Z","timestamp":1722579274000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10447827\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10447827","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}