{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,30]],"date-time":"2025-05-30T06:08:04Z","timestamp":1748585284320,"version":"3.28.0"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9053415","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T16:21:13Z","timestamp":1586449273000},"page":"6299-6303","source":"Crossref","is-referenced-by-count":23,"title":["Visually Guided Self Supervised Learning of Speech Representations"],"prefix":"10.1109","author":[{"given":"Abhinav","family":"Shukla","sequence":"first","affiliation":[]},{"given":"Konstantinos","family":"Vougioukas","sequence":"additional","affiliation":[]},{"given":"Pingchuan","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Stavros","family":"Petridis","sequence":"additional","affiliation":[]},{"given":"Maja","family":"Pantic","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"45","article-title":"Prediction-based audiovisual fusion for classification of non-linguistic vocalisations","volume":"7","author":"petridis","year":"2015","journal-title":"IEEE Transactions on Affective Computing"},{"journal-title":"Representation learning with contrastive predictive coding","year":"2018","author":"oord","key":"ref11"},{"journal-title":"An unsupervised autoregressive model for speech representation learning","year":"2019","author":"chung","key":"ref12"},{"journal-title":"Learning speaker representations with mutual information","year":"2018","author":"ravanelli","key":"ref13"},{"journal-title":"wav2vec Unsupervised pretraining for speech recognition","year":"2019","author":"schneider","key":"ref14"},{"journal-title":"Self-supervised audio representation learning for mobile devices","year":"2019","author":"tagliasacchi","key":"ref15"},{"journal-title":"Learning problem-agnostic speech representations from multiple self-supervised tasks","year":"2019","author":"pascual","key":"ref16"},{"journal-title":"End-to-end speech-driven facial animation with temporal gans","year":"2018","author":"vougioukas","key":"ref17"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2014.2336244"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0196391"},{"key":"ref4","first-page":"7763","article-title":"Cooperative learning of audio and video models from self-supervised synchronization","author":"korbar","year":"2018","journal-title":"NeurIPS"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.607"},{"journal-title":"Deep contextualized word representations","year":"2018","author":"peters","key":"ref6"},{"journal-title":"Audio-visual scene analysis with self-supervised multisensory features","year":"2018","author":"owens","key":"ref5"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016892"},{"journal-title":"Bert Pretraining of deep bidirectional transformers for language understanding","year":"2018","author":"devlin","key":"ref7"},{"key":"ref2","first-page":"1422","article-title":"Unsupervised visual representation learning by context prediction","author":"doersch","year":"2015","journal-title":"ICCV"},{"journal-title":"Unsupervised representation learning by predicting image rotations","year":"2018","author":"gidaris","key":"ref1"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-018-1083-5"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1121\/1.2229005"},{"key":"ref22","first-page":"234","article-title":"U-net: Convolutional networks for biomedical image segmentation","author":"ronneberger","year":"2015","journal-title":"MICCAI"},{"journal-title":"Speech commands A dataset for limited-vocabulary speech recognition","year":"2018","author":"warden","key":"ref21"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2015.2407694"},{"key":"ref23","first-page":"435","article-title":"Objects that sound","author":"arandjelovic","year":"2018","journal-title":"ECCV"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1456"},{"key":"ref25","article-title":"Lip reading in the wild","author":"chung","year":"2016","journal-title":"ACCV"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2020,5,4]]},"location":"Barcelona, Spain","end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09053415.pdf?arnumber=9053415","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,27]],"date-time":"2022-06-27T20:11:44Z","timestamp":1656360704000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9053415\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9053415","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}