{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,1,18]],"date-time":"2025-01-18T05:07:41Z","timestamp":1737176861527,"version":"3.33.0"},"reference-count":35,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,12,2]]},"DOI":"10.1109\/slt61566.2024.10832323","type":"proceedings-article","created":{"date-parts":[[2025,1,16]],"date-time":"2025-01-16T18:31:27Z","timestamp":1737052287000},"page":"169-176","source":"Crossref","is-referenced-by-count":0,"title":["An Analysis of Linear Complexity Attention Substitutes With Best-RQ"],"prefix":"10.1109","author":[{"given":"Ryan","family":"Whetten","sequence":"first","affiliation":[{"name":"Avignon Universit&#x00E9;,Laboratoire Informatique d&#x2019;Avignon,France"}]},{"given":"Titouan","family":"Parcollet","sequence":"additional","affiliation":[{"name":"Samsung AI Center Cambridge,United Kingdom"}]},{"given":"Adel","family":"Moumen","sequence":"additional","affiliation":[{"name":"Avignon Universit&#x00E9;,Laboratoire Informatique d&#x2019;Avignon,France"}]},{"given":"Marco","family":"Dinarelli","sequence":"additional","affiliation":[{"name":"Univervist&#x00E9; Grenoble Alpes, Inria, CNRS, Grenoble INP, LIG,Grenoble,France,38000"}]},{"given":"Yannick","family":"Est\u00e8ve","sequence":"additional","affiliation":[{"name":"Avignon Universit&#x00E9;,Laboratoire Informatique d&#x2019;Avignon,France"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3207050"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1775"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1835"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-143"},{"key":"ref5","article-title":"Google usm: Scaling automatic speech recognition beyond 100 languages","author":"Zhang","year":"2023","journal-title":"arXiv preprint arXiv:2303.01037"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1176"},{"key":"ref7","first-page":"1416","article-title":"Efficient self-supervised learning with contextualized target representations for vision, speech and language","volume-title":"International Conference on Machine Learning","author":"Baevski"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1510"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-3015"},{"key":"ref11","first-page":"3915","article-title":"Self-supervised learning with random-projection quantizer for speech recognition","volume-title":"International Conference on Machine Learning","author":"Chiu"},{"key":"ref12","first-page":"12449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume":"33","author":"Baevski","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSPW62465.2024.10626364"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1611"},{"key":"ref15","article-title":"Fastformer: Additive attention can be all you need","author":"Wu","year":"2021","journal-title":"arXiv preprint arXiv:2108.09084"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2024-40"},{"key":"ref17","article-title":"Mamba: Linear-time sequence modeling with selective state spaces","author":"Gu","year":"2023","journal-title":"arXiv preprint arXiv:2312.00752"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1087"},{"key":"ref19","article-title":"Speechbrain: A general-purpose speech toolkit","author":"Ravanelli","year":"2021","journal-title":"arXiv preprint arXiv:2106.04624"},{"key":"ref20","first-page":"17283","article-title":"Big bird: Transformers for longer sequences","volume":"33","author":"Zaheer","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref21","article-title":"Longformer: The long-document transformer","author":"Beltagy","year":"2020","journal-title":"arXiv:2004.05150"},{"key":"ref22","article-title":"Linformer: Self-attention with linear complexity","author":"Wang","year":"2020","journal-title":"arXiv preprint arXiv:2006.04768"},{"key":"ref23","first-page":"5156","article-title":"Transformers are RNNs: Fast autoregressive transformers with linear attention","volume-title":"Proceedings of the 37th International Conference on Machine Learning","volume":"119","author":"Katharopoulos"},{"key":"ref24","first-page":"17627","article-title":"Branchformer: Parallel mlp-attention architectures to capture local and global context for speech recognition and understanding","volume-title":"International Conference on Machine Learning","author":"Peng"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.871"},{"key":"ref26","first-page":"24261","article-title":"Mlp-mixer: An all-mlp architecture for vision","volume":"34","author":"Tolstikhin","year":"2021","journal-title":"Advances in neural information processing systems"},{"article-title":"Hypernet-works","volume-title":"International Conference on Learning Representations","author":"Ha","key":"ref27"},{"key":"ref28","article-title":"Mamba in speech: Towards an alternative to self-attention","author":"Zhang","year":"2024","journal-title":"arXiv preprint arXiv:2405.12609"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-950"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461375"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2650"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.588"},{"key":"ref34","first-page":"4218","article-title":"Common voice: A massively-multilingual speech corpus","volume-title":"Proceedings of the Twelfth Language Resources and Evaluation Conference","author":"Ardila"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2059"}],"event":{"name":"2024 IEEE Spoken Language Technology Workshop (SLT)","start":{"date-parts":[[2024,12,2]]},"location":"Macao","end":{"date-parts":[[2024,12,5]]}},"container-title":["2024 IEEE Spoken Language Technology Workshop (SLT)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10830790\/10830793\/10832323.pdf?arnumber=10832323","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,17]],"date-time":"2025-01-17T07:50:35Z","timestamp":1737100235000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10832323\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,2]]},"references-count":35,"URL":"https:\/\/doi.org\/10.1109\/slt61566.2024.10832323","relation":{},"subject":[],"published":{"date-parts":[[2024,12,2]]}}}