{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:28:26Z","timestamp":1775230106085,"version":"3.50.1"},"reference-count":61,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/taslp.2024.3352248","type":"journal-article","created":{"date-parts":[[2024,1,11]],"date-time":"2024-01-11T18:24:35Z","timestamp":1704997475000},"page":"1336-1351","source":"Crossref","is-referenced-by-count":23,"title":["Self-Supervised Audio Teacher-Student Transformer for Both Clip-Level and Frame-Level Tasks"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-5796-8317","authenticated-orcid":false,"given":"Xian","family":"Li","sequence":"first","affiliation":[{"name":"Institute of Advanced Technology, Westlake Institute for Advanced Study, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6260-3005","authenticated-orcid":false,"given":"Nian","family":"Shao","sequence":"additional","affiliation":[{"name":"Institute of Advanced Technology, Westlake Institute for Advanced Study, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0393-9905","authenticated-orcid":false,"given":"Xiaofei","family":"Li","sequence":"additional","affiliation":[{"name":"Institute of Advanced Technology, Westlake Institute for Advanced Study, Hangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Representation learning with contrastive predictive coding","author":"Oord","year":"2018"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2020.2985586"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413528"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9415009"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9534474"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21315"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10961"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746490"},{"key":"ref9","first-page":"28708","article-title":"Masked autoencoders that listen","volume":"35","author":"Huang","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref10","article-title":"BEATs: Audio pre-training with acoustic tokenizers","author":"Chen","year":"2022"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.patter.2022.100616"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3221007"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1810.04805"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref15","first-page":"1","article-title":"Masked spectrogram modeling using masked autoencoders for learning general-purpose audio representation","volume-title":"Proc. Conf. Mach. Learn. Res.","author":"Niizumi","year":"2022"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10097236"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.5555\/3495724.3497510"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10126"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952261"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01549"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.5555\/3524938.3525087"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054458"},{"key":"ref23","first-page":"12449","article-title":"Wav2Vec 2.0: A framework for self-supervised learning of speech representations","volume-title":"Proc. 34th Int. Conf. Neural Inf. Process. Syst.","author":"Baevski","year":"2020"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3095662"},{"key":"ref26","first-page":"1298","article-title":"Data2vec: A general framework for self-supervised learning in speech, vision and language","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Baevski","year":"2022"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-698"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095691"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref30","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1176"},{"key":"ref32","article-title":"Learning from between-class examples for deep sound recognition","volume-title":"Proc. 6th Int. Conf. Learn. Representations","author":"Tokozume","year":"2018"},{"key":"ref33","article-title":"Mixup: Beyond empirical risk minimization","volume-title":"Proc. 6th Int. Conf. Learn. Representations","author":"Zhang","year":"2018"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.4324\/9781410605337-29"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref36","article-title":"ASiT: Audio spectrogram vision transformer for general audio representation","author":"Atito","year":"2022"},{"key":"ref37","article-title":"Distilling the knowledge in a neural network","volume-title":"Proc. NIPS Deep Learn. Representation Learn. Workshop","author":"Hinton","year":"2015"},{"key":"ref38","article-title":"CMKD: CNN\/transformer-based cross-model knowledge distillation for audio classification","author":"Gong","year":"2022"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3120633"},{"key":"ref40","article-title":"Decoupled weight decay regularization","volume-title":"Proc. 7th Int. Conf. Learn. Representations","author":"Loshchilov","year":"2019"},{"key":"ref41","article-title":"SGDR: Stochastic gradient descent with warm restarts","volume-title":"Proc. 5th Int. Conf. Learn. Representations","author":"Loshchilov","year":"2017"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1145\/2647868.2655045"},{"key":"ref43","article-title":"Speech commands: A dataset for limited-vocabulary speech recognition","author":"Warden","year":"2018"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2017-950"},{"key":"ref45","first-page":"1068","article-title":"Neural audio synthesis of musical notes with wavenet autoencoders","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Engel","year":"2017"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3133208"},{"key":"ref47","article-title":"BEiT: BERT pre-training of image transformers","volume-title":"Proc. 10th Int. Conf. Learn. Representations","author":"Bao","year":"2022"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.33682\/006b-jx26"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414579"},{"key":"ref50","article-title":"Mean teacher convolution system for DCASE 2018 task 4","author":"JiaKai","year":"2018"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052995"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952132"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.33682\/w13e-5v06"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1242"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746790"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.3030497"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2605"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746312"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-227"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref61","first-page":"125","article-title":"HEAR: Holistic evaluation of audio representations","volume-title":"Proc. NIPS Competitions Demonstrations Track","author":"Turian","year":"2022"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6570655\/10304349\/10397558.pdf?arnumber=10397558","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,13]],"date-time":"2024-02-13T10:42:20Z","timestamp":1707820940000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10397558\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":61,"URL":"https:\/\/doi.org\/10.1109\/taslp.2024.3352248","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"value":"2329-9290","type":"print"},{"value":"2329-9304","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}