{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T19:14:56Z","timestamp":1774120496246,"version":"3.50.1"},"reference-count":21,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9746056","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"606-610","source":"Crossref","is-referenced-by-count":31,"title":["S3T: Self-Supervised Pre-Training with Swin Transformer For Music Classification"],"prefix":"10.1109","author":[{"given":"Hang","family":"Zhao","sequence":"first","affiliation":[{"name":"ByteDance AI Lab Speech &amp; Audio Team,China"}]},{"given":"Chen","family":"Zhang","sequence":"additional","affiliation":[{"name":"Zhejiang University,China"}]},{"given":"Bilei","family":"Zhu","sequence":"additional","affiliation":[{"name":"ByteDance AI Lab Speech &amp; Audio Team,China"}]},{"given":"Zejun","family":"Ma","sequence":"additional","affiliation":[{"name":"ByteDance AI Lab Speech &amp; Audio Team,China"}]},{"given":"Kejun","family":"Zhang","sequence":"additional","affiliation":[{"name":"Zhejiang University,China"}]}],"member":"263","reference":[{"key":"ref10","article-title":"Self-supervised learning with swin transformers","author":"xie","year":"2021"},{"key":"ref11","article-title":"Efficient self-supervised vision transformers for representation learning","author":"li","year":"2021"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TSA.2002.800560"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2015.2478068"},{"key":"ref14","article-title":"Codified audio language modeling learns useful representations for music information retrieval","author":"castellon","year":"2021","journal-title":"ISMIR"},{"key":"ref15","article-title":"Fma: A dataset for music analysis","author":"defferrard","year":"2017","journal-title":"18th International Society for Music Information Retrieval Conference"},{"key":"ref16","first-page":"387","article-title":"Evaluation of algorithms using games: The case of music tagging","author":"law","year":"2009","journal-title":"ISMIR"},{"key":"ref17","article-title":"Decoupled weight decay regularization","author":"loshchilov","year":"2017"},{"key":"ref18","article-title":"Attentionbased neural bag-of-features learning for sequence data","author":"tran","year":"2020"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref4","first-page":"1597","article-title":"A simple framework for contrastive learning of visual representations","author":"chen","year":"2020","journal-title":"International Conference on Machine Learning"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"ref6","doi-asserted-by":"crossref","first-page":"150","DOI":"10.3390\/app8010150","article-title":"Samplecnn: End-to-end deep convolutional neural networks using very small filters for music classification","volume":"8","author":"lee","year":"2018","journal-title":"Applied Sciences"},{"key":"ref5","article-title":"Contrastive learning of musical representations","author":"spijkervet","year":"2021","journal-title":"ISMIR"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2680"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414405"},{"key":"ref1","article-title":"Representation learning with contrastive predictive coding","author":"van den oord","year":"2018"},{"key":"ref9","first-page":"2530","article-title":"Clar: Contrastive learning of auditory representations","author":"al-tahan","year":"2021","journal-title":"International Conference on Artificial Intelligence and Statistics"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00207"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Singapore, Singapore","start":{"date-parts":[[2022,5,23]]},"end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09746056.pdf?arnumber=9746056","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,15]],"date-time":"2022-08-15T20:07:47Z","timestamp":1660594067000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9746056\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":21,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9746056","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}