{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T05:36:29Z","timestamp":1774935389358,"version":"3.50.1"},"reference-count":62,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62101351"],"award-info":[{"award-number":["62101351"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Tencent AI Lab Rhino Bird Funding","award":["RBFR2023014"],"award-info":[{"award-number":["RBFR2023014"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/taslp.2024.3363446","type":"journal-article","created":{"date-parts":[[2024,2,12]],"date-time":"2024-02-12T19:44:03Z","timestamp":1707767043000},"page":"1559-1572","source":"Crossref","is-referenced-by-count":9,"title":["Computation and Parameter Efficient Multi-Modal Fusion Transformer for Cued Speech Recognition"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8109-5248","authenticated-orcid":false,"given":"Lei","family":"Liu","sequence":"first","affiliation":[{"name":"The Chinese University of Hong Kong, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4497-0135","authenticated-orcid":false,"given":"Li","family":"Liu","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology(Guangzhou), Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9158-9401","authenticated-orcid":false,"given":"Haizhou","family":"Li","sequence":"additional","affiliation":[{"name":"Shenzhen Research Institute of Big Data, School of Data Science, Chinese University of Hong Kong, Shenzhen, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"3","article-title":"Cued speech","volume":"112","author":"Cornett","year":"1967","journal-title":"Amer. Ann. Deaf"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1353\/aad.2019.0031"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462090"},{"key":"ref4","first-page":"2090","article-title":"Continuous phoneme recognition in cued speech for french","volume-title":"Proc. IEEE Eur. Signal Process. Conf.","author":"Heracleous","year":"2012"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-2434"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-440"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2023-663"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-432"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01095"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3226330"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095271"},{"key":"ref13","first-page":"5156","article-title":"Transformers are RNNs: Fast autoregressive transformers with linear attention","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Katharopoulos"},{"key":"ref14","first-page":"1","article-title":"DeLight: Deep and light-weight transformer","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Mehta","year":"2020"},{"key":"ref15","first-page":"24226","article-title":"Flowformer: Linearizing transformers with conservation flows","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wu","year":"2022"},{"key":"ref16","first-page":"1","article-title":"Random feature attention","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Peng","year":"2020"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3530811"},{"key":"ref18","first-page":"1","article-title":"cosFormer: Rethinking softmax in attention","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Qin","year":"2021"},{"key":"ref19","first-page":"1","article-title":"Rethinking attention with performers","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Choromanski","year":"2020"},{"key":"ref20","article-title":"Linformer: Self-attention with linear complexity","author":"Wang","year":"2020"},{"key":"ref21","article-title":"Generating long sequences with sparse transformers","author":"Child","year":"2020"},{"key":"ref22","first-page":"1","article-title":"Reformer: The efficient transformer","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kitaev","year":"2019"},{"key":"ref23","first-page":"17283","article-title":"Big bird: Transformers for longer sequences","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Zaheer","year":"2020"},{"key":"ref24","first-page":"1","article-title":"Pay less attention with lightweight and dynamic convolutions","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wu","year":"2019"},{"key":"ref25","first-page":"933","article-title":"Language modeling with gated convolutional networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Dauphin","year":"2017"},{"key":"ref26","first-page":"1","article-title":"Lite transformer with long-short range attention","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wu","year":"2020"},{"key":"ref27","first-page":"9099","article-title":"Transformer quality in linear time","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Hua","year":"2022"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.23919\/Eusipco47968.2020.9287365"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01044"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1212"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054441"},{"key":"ref32","article-title":"Towards data distillation for end-to-end spoken conversational question answering","author":"You","year":"2020"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414999"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-naacl.91"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-110"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-120"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-emnlp.3"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/549"},{"key":"ref39","first-page":"1","article-title":"Cued speech hand gestures recognition tool","volume-title":"Proc. IEEE 13th Eur. Signal Process. Conf.","author":"Burger","year":"2005"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/s00138-012-0445-1"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2010.03.001"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.23919\/EUSIPCO.2019.8903053"},{"key":"ref43","first-page":"1755","article-title":"Dlib-ml: A machine learning toolkit","volume":"10","author":"King","year":"2009","journal-title":"J. Mach. Learn. Res."},{"key":"ref44","article-title":"MediaPipe: A framework for building perception pipelines","author":"Lugaresi","year":"2019"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3275156"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.232"},{"key":"ref47","first-page":"21665","article-title":"Fast transformers with clustered attention","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Vyas","year":"2020"},{"key":"ref48","first-page":"1177","article-title":"Random features for large-scale kernel machines","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Rahimi","year":"2007"},{"key":"ref49","first-page":"682","article-title":"Using the Nystrm method to speed up kernel machines","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Williams","year":"2001"},{"key":"ref50","first-page":"21297","article-title":"Soft: Softmax-free transformer with linear complexity","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Lu","year":"2021"},{"key":"ref51","first-page":"12321","article-title":"You only sample (almost) once: Linear cost self-attention via bernoulli sampling","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zeng","year":"2021"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00041"},{"key":"ref53","article-title":"Searching for activation functions","author":"Le","year":"2017"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746976"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00359"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2889052"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054127"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414567"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2004.10.013"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/11678816_2"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.2976493"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6570655\/10304349\/10432942.pdf?arnumber=10432942","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,8]],"date-time":"2024-03-08T18:57:30Z","timestamp":1709924250000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10432942\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":62,"URL":"https:\/\/doi.org\/10.1109\/taslp.2024.3363446","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"value":"2329-9290","type":"print"},{"value":"2329-9304","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}