{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,7]],"date-time":"2024-09-07T08:45:26Z","timestamp":1725698726757},"reference-count":21,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9746164","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"9221-9225","source":"Crossref","is-referenced-by-count":3,"title":["Time Domain Adversarial Voice Conversion for ADD 2022"],"prefix":"10.1109","author":[{"given":"Cheng","family":"Wen","sequence":"first","affiliation":[{"name":"Beike,Beijing,China"}]},{"given":"Tingwei","family":"Guo","sequence":"additional","affiliation":[{"name":"Beike,Beijing,China"}]},{"given":"Xingjun","family":"Tan","sequence":"additional","affiliation":[{"name":"Beike,Beijing,China"}]},{"given":"Rui","family":"Yan","sequence":"additional","affiliation":[{"name":"Beike,Beijing,China"}]},{"given":"Shuran","family":"Zhou","sequence":"additional","affiliation":[{"name":"Beike,Beijing,China"}]},{"given":"Chuandong","family":"Xie","sequence":"additional","affiliation":[{"name":"Beike,Beijing,China"}]},{"given":"Wei","family":"Zou","sequence":"additional","affiliation":[{"name":"Beike,Beijing,China"}]},{"given":"Xiangang","family":"Li","sequence":"additional","affiliation":[{"name":"Beike,Beijing,China"}]}],"member":"263","reference":[{"article-title":"A survey on neural speech synthesis","year":"2021","author":"tan","key":"ref10"},{"article-title":"Neural voice cloning with a few samples","year":"2018","author":"arik","key":"ref11"},{"article-title":"Adaspeech: Adaptive text to speech for custom voice","year":"2021","author":"chen","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2307"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-2086"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462342"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639535"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-319"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746939"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414788"},{"article-title":"How deep are the fakes? focusing on audio deepfake: A survey","year":"2021","author":"khanjani","key":"ref4"},{"article-title":"Human perception of audio deepfakes","year":"2021","author":"m\u00fcller","key":"ref3"},{"article-title":"Deep voice 3: Scaling text-to-speech with convolutional sequence learning","year":"2017","author":"ping","key":"ref6"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"article-title":"Melgan: Generative adversarial networks for conditional waveform synthesis","year":"2019","author":"kumar","key":"ref8"},{"article-title":"Fastspeech: Fast, ro-bust and controllable text to speech","year":"2019","author":"ren","key":"ref7"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414423"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-755"},{"article-title":"Hifigan: Generative adversarial networks for efficient and high fidelity speech synthesis","year":"2020","author":"kong","key":"ref9"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-948"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2022,5,23]]},"location":"Singapore, Singapore","end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09746164.pdf?arnumber=9746164","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,22]],"date-time":"2022-08-22T20:09:48Z","timestamp":1661198988000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9746164\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":21,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9746164","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}