{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T19:30:46Z","timestamp":1771961446360,"version":"3.50.1"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9746475","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"7677-7681","source":"Crossref","is-referenced-by-count":13,"title":["Tts4pretrain 2.0: Advancing the use of Text and Speech in ASR Pretraining with Consistency and Contrastive Losses"],"prefix":"10.1109","author":[{"given":"Zhehuai","family":"Chen","sequence":"first","affiliation":[{"name":"Google, Inc."}]},{"given":"Yu","family":"Zhang","sequence":"additional","affiliation":[{"name":"Google, Inc."}]},{"given":"Andrew","family":"Rosenberg","sequence":"additional","affiliation":[{"name":"Google, Inc."}]},{"given":"Bhuvana","family":"Ramabhadran","sequence":"additional","affiliation":[{"name":"Google, Inc."}]},{"given":"Pedro","family":"Moreno","sequence":"additional","affiliation":[{"name":"Google, Inc."}]},{"given":"Gary","family":"Wang","sequence":"additional","affiliation":[{"name":"Google, Inc."}]}],"member":"263","reference":[{"key":"ref33","article-title":"Hierarchical generative modeling for controllable speech synthesis","author":"hsu","year":"2018"},{"key":"ref32","article-title":"Pushing the Limits of Semi-Supervised Learning for Automatic Speech Recognition","author":"zhang","year":"2020"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-2012"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref37","first-page":"3","article-title":"Group normalization","author":"wu","year":"2018","journal-title":"Proceedings of the European Conference on Computer Vision (ECCV)"},{"key":"ref36","article-title":"Visualizing data using t-SNE","volume":"9","author":"van der maaten","year":"2008","journal-title":"Journal of Machine Learning Research"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-236"},{"key":"ref34","article-title":"Generating sequences with recurrent neural networks","author":"graves","year":"2013"},{"key":"ref10","article-title":"SpeechStew: Simply mix all available speech recognition data to train one large neural network","author":"chan","year":"2021"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1891"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414227"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054438"},{"key":"ref14","article-title":"SPLAT: Speech-Language Joint Pre-Training for Spoken Language Understanding","author":"chung","year":"2020"},{"key":"ref15","article-title":"Fused acoustic and text encoding for multimodal bilingual pretraining and speech translation","author":"zheng","year":"2021"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1470"},{"key":"ref17","article-title":"Large-scale ASR Domain Adaptation using Self- and Semi-supervised Learning","author":"hwang","year":"2021"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746038"},{"key":"ref19","article-title":"Unsupervised data augmentation for consistency training","author":"xie","year":"2019"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/CHiME.2020-1"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688253"},{"key":"ref27","doi-asserted-by":"crossref","DOI":"10.21437\/Eurospeech.2003-466","article-title":"From Switchboard to Fisher: Telephone collection protocols, their uses and yields","author":"cieri","year":"2003","journal-title":"Eurospeech"},{"key":"ref3","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","author":"baevski","year":"2020"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053600"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-677"},{"key":"ref8","article-title":"A Simple Framework for Contrastive Learning of Visual Representations","author":"chen","year":"2020"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053831"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1873"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01549"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688018"},{"key":"ref20","article-title":"Speech SIMCLR: Combining Contrastive and Reconstruction Objective for Self-supervised Speech Representation Learning","author":"jiang","year":"2020"},{"key":"ref22","first-page":"7669","article-title":"Librilight: A benchmark for asr with limited or no supervision","author":"kahn","year":"2020","journal-title":"IEEE ICASSP"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref24","article-title":"The AMI meeting corpus: A pre-announcement","author":"carletta","year":"2005","journal-title":"Int Workshop Mach Learn Multimodal Interact"},{"key":"ref23","article-title":"The Kaldi speech recognition toolkit","author":"povey","year":"2011","journal-title":"2011 IEEE Workshop on Automatic Speech Recognition &amp; Understanding"},{"key":"ref26","article-title":"Common voice: A massively-multilingual speech corpus","author":"ardila","year":"2019"},{"key":"ref25","first-page":"125","article-title":"TED-LIUM: an Automatic Speech Recognition dedicated corpus","author":"rousseau","year":"2012","journal-title":"LREC"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Singapore, Singapore","start":{"date-parts":[[2022,5,23]]},"end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09746475.pdf?arnumber=9746475","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,23]],"date-time":"2024-09-23T03:20:53Z","timestamp":1727061653000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9746475\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9746475","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}