{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T13:31:42Z","timestamp":1769693502229,"version":"3.49.0"},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,10,17]],"date-time":"2021-10-17T00:00:00Z","timestamp":1634428800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,10,17]],"date-time":"2021-10-17T00:00:00Z","timestamp":1634428800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,10,17]]},"DOI":"10.1109\/waspaa52581.2021.9632721","type":"proceedings-article","created":{"date-parts":[[2021,12,13]],"date-time":"2021-12-13T21:12:28Z","timestamp":1639429948000},"page":"196-200","source":"Crossref","is-referenced-by-count":1,"title":["Cross-Domain Semi-Supervised Audio Event Classification Using Contrastive Regularization"],"prefix":"10.1109","author":[{"given":"Donmoon","family":"Lee","sequence":"first","affiliation":[{"name":"Music and Research Group, Seoul National University,Department of Intelligence and Information"}]},{"given":"Kyogu","family":"Lee","sequence":"additional","affiliation":[{"name":"Music and Research Group, Seoul National University,Department of Intelligence and Information"}]}],"member":"263","reference":[{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682345"},{"key":"ref10","article-title":"Mixmatch: A holistic approach to semi-supervised learning","author":"berthelot","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref11","article-title":"Fixmatch: Simplifying semi-supervised learning with consistency and confidence","author":"sohn","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref12","first-page":"694","article-title":"Perceptual losses for real-time style transfer and super-resolution","author":"johnson","year":"0","journal-title":"European Conference on Computer Vision"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1142\/S0218001493000339"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2006.100"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414337"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461684"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1121\/1.4799597"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9415116"},{"key":"ref28","author":"abadi","year":"2015","journal-title":"TensorFlow Large-Scale Machine Learning on Heterogeneous Systems"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2605"},{"key":"ref27","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"ArXiv Preprint"},{"key":"ref3","article-title":"Big self-supervised models are strong semi-supervised learners","author":"chen","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9534474"},{"key":"ref29","doi-asserted-by":"crossref","first-page":"41","DOI":"10.1023\/A:1007379606734","article-title":"Multitask learning","volume":"28","author":"caruana","year":"1997","journal-title":"Machine Learning"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1873"},{"key":"ref8","article-title":"What should not be contrastive in contrastive learning","author":"xiao","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref7","article-title":"Multi-format contrastive learning of audio representations","author":"wang","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref2","first-page":"1597","article-title":"A simple framework for contrastive learning of visual representations","author":"chen","year":"0","journal-title":"Int Conference on Machine Learning"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/B978-0-12-396502-8.00022-X"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952261"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/2733373.2806390"},{"key":"ref22","article-title":"Rethinking cnn models for audio classification","author":"palanisamy","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/2647868.2655045"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_38"},{"key":"ref23","article-title":"Esresnet: Environmental sound classification based on visual domain models","author":"guzhov","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref26","article-title":"Evaluation of cnn-based automatic music tagging models","author":"won","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref25","article-title":"Bootstrap your own latent: A new approach to self-supervised learning","author":"grill","year":"2020","journal-title":"ArXiv Preprint"}],"event":{"name":"2021 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)","location":"New Paltz, NY, USA","start":{"date-parts":[[2021,10,17]]},"end":{"date-parts":[[2021,10,20]]}},"container-title":["2021 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9632687\/9632666\/09632721.pdf?arnumber=9632721","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,2]],"date-time":"2022-08-02T23:57:41Z","timestamp":1659484661000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9632721\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,10,17]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/waspaa52581.2021.9632721","relation":{},"subject":[],"published":{"date-parts":[[2021,10,17]]}}}