{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T19:45:39Z","timestamp":1730231139867,"version":"3.28.0"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6,4]]},"DOI":"10.1109\/icassp49357.2023.10096970","type":"proceedings-article","created":{"date-parts":[[2023,5,5]],"date-time":"2023-05-05T17:28:30Z","timestamp":1683307710000},"page":"1-5","source":"Crossref","is-referenced-by-count":0,"title":["SLICER: Learning Universal Audio Representations Using Low-Resource Self-Supervised Pre-Training"],"prefix":"10.1109","author":[{"given":"Ashish","family":"Seth","sequence":"first","affiliation":[{"name":"IIT Madras,Speech Lab,Department of Electrical Engineering,Chennai,India"}]},{"given":"Sreyan","family":"Ghosh","sequence":"additional","affiliation":[{"name":"University of Maryland,College Park,USA"}]},{"given":"S.","family":"Umesh","sequence":"additional","affiliation":[{"name":"IIT Madras,Speech Lab,Department of Electrical Engineering,Chennai,India"}]},{"given":"Dinesh","family":"Manocha","sequence":"additional","affiliation":[{"name":"University of Maryland,College Park,USA"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383605"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746790"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9534474"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1111\/2041-210X.13103"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref14","first-page":"829","article-title":"Fsd50k: an open dataset of human-labeled sound events","volume":"30","author":"fonseca","year":"2021","journal-title":"IEEE\/ACM TASLP"},{"key":"ref20","first-page":"1068","article-title":"Neural audio synthesis of musical notes with wavenet autoencoders","author":"engel","year":"0","journal-title":"ICML 2017"},{"key":"ref11","first-page":"6419","article-title":"Mockingjay: Unsupervised speech representation learning with deep bidirectional trans-former encoders","author":"liu","year":"0","journal-title":"IEEE ICASSP 2020"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/2647868.2655045"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413528"},{"article-title":"A multi-device dataset for urban acoustic scene classification","year":"2018","author":"mesaros","key":"ref21"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3202093"},{"journal-title":"mixup Beyond empirical risk minimization","year":"2017","author":"zhang","key":"ref1"},{"journal-title":"Speech commands A dataset for limited-vocabulary speech recognition","year":"2018","author":"warden","key":"ref17"},{"key":"ref16","article-title":"Voxceleb: A large-scale speaker identification dataset","author":"nagrani","year":"0","journal-title":"Interspeech 2017 ISCA"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"year":"0","key":"ref18","article-title":"Free speech... recognition (linux, windows and mac) - voxforge.org"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"journal-title":"Superb Speech processing universal performance benchmark","year":"2021","author":"yang","key":"ref7"},{"journal-title":"Deep clustering for general-purpose audio representations","year":"2021","author":"ghosh","key":"ref9"},{"key":"ref4","first-page":"21271","article-title":"Bootstrap your own latent-a new approach to self-supervised learning","volume":"33","author":"grill","year":"0","journal-title":"NeurIPS 2020"},{"key":"ref3","first-page":"12449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume":"33","author":"baevski","year":"0","journal-title":"NeurIPS 2020"},{"article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","year":"2018","author":"devlin","key":"ref6"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00975"}],"event":{"name":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2023,6,4]]},"location":"Rhodes Island, Greece","end":{"date-parts":[[2023,6,10]]}},"container-title":["ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10094559\/10094560\/10096970.pdf?arnumber=10096970","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,20]],"date-time":"2023-11-20T19:02:00Z","timestamp":1700506920000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10096970\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,4]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icassp49357.2023.10096970","relation":{},"subject":[],"published":{"date-parts":[[2023,6,4]]}}}