{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T15:32:06Z","timestamp":1771515126582,"version":"3.50.1"},"reference-count":44,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62076144"],"award-info":[{"award-number":["62076144"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100010877","name":"Science, Technology and Innovation Commission of Shenzhen Municipality","doi-asserted-by":"publisher","award":["WDZC20220816140515001"],"award-info":[{"award-number":["WDZC20220816140515001"]}],"id":[{"id":"10.13039\/501100010877","id-type":"DOI","asserted-by":"publisher"}]},{"name":"AMiner. Shenzhen SciBrain fund and Shenzhen Key Laboratory of next-generation interactive media innovative technology","award":["ZDSYS20210623092001004"],"award-info":[{"award-number":["ZDSYS20210623092001004"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/taslp.2023.3331813","type":"journal-article","created":{"date-parts":[[2023,11,10]],"date-time":"2023-11-10T19:08:12Z","timestamp":1699643292000},"page":"517-528","source":"Crossref","is-referenced-by-count":4,"title":["Joint Multiscale Cross-Lingual Speaking Style Transfer With Bidirectional Attention Mechanism for Automatic Dubbing"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6284-5979","authenticated-orcid":false,"given":"Jingbei","family":"Li","sequence":"first","affiliation":[{"name":"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems, Shenzhen International Graduate School, Tsinghua University, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3322-0547","authenticated-orcid":false,"given":"Sipan","family":"Li","sequence":"additional","affiliation":[{"name":"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems, Shenzhen International Graduate School, Tsinghua University, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-4445-4399","authenticated-orcid":false,"given":"Ping","family":"Chen","sequence":"additional","affiliation":[{"name":"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems, Shenzhen International Graduate School, Tsinghua University, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3576-6252","authenticated-orcid":false,"given":"Luwen","family":"Zhang","sequence":"additional","affiliation":[{"name":"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems, Shenzhen International Graduate School, Tsinghua University, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-8288-4226","authenticated-orcid":false,"given":"Yi","family":"Meng","sequence":"additional","affiliation":[{"name":"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems, Shenzhen International Graduate School, Tsinghua University, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8533-0524","authenticated-orcid":false,"given":"Zhiyong","family":"Wu","sequence":"additional","affiliation":[{"name":"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems, Shenzhen International Graduate School, Tsinghua University, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4427-3532","authenticated-orcid":false,"given":"Helen","family":"Meng","sequence":"additional","affiliation":[{"name":"Department of Systems Engineering and Engineering Management, The Chinese University of Hong Kong, Hong Kong, SAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4078-1273","authenticated-orcid":false,"given":"Qiao","family":"Tian","sequence":"additional","affiliation":[{"name":"ByteDance, Shanghai, China"}]},{"given":"Yuping","family":"Wang","sequence":"additional","affiliation":[{"name":"ByteDance, Shanghai, China"}]},{"given":"Yuxuan","family":"Wang","sequence":"additional","affiliation":[{"name":"ByteDance, Shanghai, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511816338"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1452"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016706"},{"key":"ref5","article-title":"FastSpeech 2: Fast and high-quality end-to-end text to speech","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ren","year":"2020"},{"key":"ref6","first-page":"5530","article-title":"Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kim","year":"2021"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1118"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413907"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3145293"},{"key":"ref10","first-page":"5180","article-title":"Style tokens: Unsupervised style modeling, control and transfer in end-to-end speech synthesis","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wang","year":"2018"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-947"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11223"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-1043"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2730"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1632"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-1621"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547831"},{"key":"ref18","first-page":"419","article-title":"Dubbing in practice: A large scale study of human localization with insights for automatic dubbing","volume-title":"Trans. Assoc. Comput. Linguistics","volume":"11","author":"Brannon","year":"2023"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747158"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2983"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11089"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-441"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414966"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2009.04.004"},{"key":"ref25","article-title":"Wavenet: A generative model for raw audio","author":"Oord","year":"2016"},{"key":"ref26","first-page":"17022","article-title":"Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kong","year":"2020"},{"key":"ref27","first-page":"1764","article-title":"Towards end-to-end speech recognition with recurrent neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Graves","year":"2014"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/d14-1179"},{"key":"ref29","article-title":"Neural machine translation by jointly learning to align and translate","volume-title":"Proc. 3rd Int. Conf. Learn. Representations","author":"Bahdanau","year":"2015"},{"key":"ref30","first-page":"577","article-title":"Attention-based models for speech recognition","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chorowski","year":"2015"},{"issue":"1","key":"ref31","first-page":"2096","article-title":"Domain-adversarial training of neural networks","volume":"17","author":"Ganin","year":"2016","journal-title":"J. Mach. Learn. Res."},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747085"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1951"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.63"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.21437\/Eurospeech.2003-150"},{"key":"ref36","doi-asserted-by":"crossref","first-page":"103","DOI":"10.1007\/978-3-030-42105-2_6","article-title":"Dubbing","volume-title":"The Palgrave Handbook of Audiovisual Translation and Media Accessibility","author":"Chaume","year":"2020"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/MASSP.1986.1165342"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.iwslt-1.31"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1386"},{"key":"ref40","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics: Hum. Lang. Technol.","author":"Devlin","year":"2019"},{"key":"ref41","first-page":"5998","article-title":"Attention is all you need","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Vaswani","year":"2017"},{"key":"ref42","first-page":"1180","article-title":"Unsupervised domain adaptation by backpropagation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ganin","year":"2015"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2668"},{"key":"ref44","first-page":"8024","article-title":"PyTorch: An imperative style, high-performance deep learning library","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Paszke","year":"2019"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6570655\/10304349\/10314740.pdf?arnumber=10314740","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,12]],"date-time":"2024-01-12T02:38:57Z","timestamp":1705027137000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10314740\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":44,"URL":"https:\/\/doi.org\/10.1109\/taslp.2023.3331813","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"value":"2329-9290","type":"print"},{"value":"2329-9304","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}