{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,24]],"date-time":"2026-03-24T15:20:03Z","timestamp":1774365603871,"version":"3.50.1"},"reference-count":33,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10446321","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"11161-11165","source":"Crossref","is-referenced-by-count":5,"title":["Residualtransformer: Residual Low-Rank Learning With Weight-Sharing For Transformer Layers"],"prefix":"10.1109","author":[{"given":"Yiming","family":"Wang","sequence":"first","affiliation":[{"name":"Microsoft Corporation,Redmond,WA,USA"}]},{"given":"Jinyu","family":"Li","sequence":"additional","affiliation":[{"name":"Microsoft Corporation,Redmond,WA,USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref2","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. ICLR","author":"Hu"},{"key":"ref3","article-title":"BERT: pre-training of deep bidirectional transformers for language understanding","volume-title":"Proc. NAACL-HLT","author":"Devlin"},{"key":"ref4","article-title":"Language models are few-shot learners","volume-title":"NeurIPS","author":"Brown"},{"key":"ref5","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"Proc. ICML","author":"Radford"},{"key":"ref6","article-title":"Google USM: scaling automatic speech recognition beyond 100 languages","author":"Zhang","year":"2023"},{"key":"ref7","article-title":"EnergonAI: An inference system for 10-100 billion parameter transformer models","author":"Du","year":"2022"},{"key":"ref8","article-title":"Wake word detection and its applications","volume-title":"Ph.D. thesis","author":"Wang","year":"2021"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095006"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.5555\/3295222.3295349"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1285"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462506"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053896"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414777"},{"key":"ref15","article-title":"Distilling the knowledge in a neural network","volume-title":"Proc. NeurIPS Deep Learning Workshop","author":"Hinton"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1025"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854828"},{"key":"ref18","article-title":"Lightweight and efficient end-toend speech recognition using low-rank transformer","volume-title":"Proc. ICASSP","author":"Winata"},{"key":"ref19","article-title":"Deep compression: Compressing deep neural network with pruning, trained quantization and huffman coding","volume-title":"Proc. ICLR","author":"Han"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10809"},{"key":"ref21","article-title":"Learning both weights and connections for efficient neural network","volume-title":"Proc. NeurIPS","author":"Han"},{"key":"ref22","article-title":"To prune, or not to prune: Exploring the efficacy of pruning for model compression","volume-title":"Proc. ICLR Workshop Track","author":"Zhu"},{"key":"ref23","article-title":"Universal transformers","volume-title":"Proc. ICLR","author":"Dehghani"},{"key":"ref24","article-title":"ALBERT: A lite BERT for self-supervised learning of language representations","volume-title":"Proc. ICLR","author":"Lan"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-819"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-3015"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.656"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953116"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-2012"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-2027"},{"key":"ref31","article-title":"Microsoft speech language translation (MSLT) corpus: The IWSLT 2016 release for English, French and German","volume-title":"Proc. IWSLT","author":"Federmann"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-24797-2"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413535"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Seoul, Korea, Republic of","start":{"date-parts":[[2024,4,14]]},"end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10446321.pdf?arnumber=10446321","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T04:42:18Z","timestamp":1722573738000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10446321\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10446321","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}