{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T10:42:16Z","timestamp":1742985736398,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":27,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819916443"},{"type":"electronic","value":"9789819916450"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-981-99-1645-0_43","type":"book-chapter","created":{"date-parts":[[2023,4,13]],"date-time":"2023-04-13T17:03:13Z","timestamp":1681405393000},"page":"517-527","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Investigating Effective Domain Adaptation Method for\u00a0Speaker Verification Task"],"prefix":"10.1007","author":[{"given":"Guangxing","family":"Li","sequence":"first","affiliation":[]},{"given":"Wangjin","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Sheng","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yi","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Jichen","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Hao","family":"Huang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,4,14]]},"reference":[{"key":"43_CR1","unstructured":"Baevski, A., Zhou, Y., Mohamed, A., Auli, M.: wav2vec 2.0: a framework for self-supervised learning of speech representations. In: Advances in Neural Information Processing Systems 33, pp. 12449\u201312460 (2020)"},{"key":"43_CR2","doi-asserted-by":"publisher","first-page":"7","DOI":"10.1016\/j.specom.2021.01.001","volume":"129","author":"F Bahmaninezhad","year":"2021","unstructured":"Bahmaninezhad, F., Zhang, C., Hansen, J.H.: An investigation of domain adaptation in speaker embedding space for speaker recognition. Speech Commun. 129, 7\u201316 (2021)","journal-title":"Speech Commun."},{"key":"43_CR3","unstructured":"Chen, S., et al.: WavLm: large-scale self-supervised pre-training for full stack speech processing. arXiv preprint arXiv:2110.13900 (2021)"},{"key":"43_CR4","doi-asserted-by":"crossref","unstructured":"Chen, S., et al.: Why does self-supervised learning for speech recognition benefit speaker recognition? arXiv preprint arXiv:2204.12765 (2022)","DOI":"10.21437\/Interspeech.2022-10019"},{"key":"43_CR5","doi-asserted-by":"crossref","unstructured":"Chowdhury, A., Cozzo, A., Ross, A.: Domain adaptation for speaker recognition in singing and spoken voice. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7192\u20137196. IEEE (2022)","DOI":"10.1109\/ICASSP43922.2022.9746111"},{"key":"43_CR6","doi-asserted-by":"crossref","unstructured":"Chung, J.S., Nagrani, A., Zisserman, A.: VoxCeleb2: deep speaker recognition. In: Interspeech 2018 (2018)","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"43_CR7","doi-asserted-by":"publisher","unstructured":"Fan, Y., et al.: CN-Celeb: a challenging Chinese speaker recognition dataset. In: ICASSP 2020\u20132020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7604\u20137608 (2020). https:\/\/doi.org\/10.1109\/ICASSP40776.2020.9054017","DOI":"10.1109\/ICASSP40776.2020.9054017"},{"key":"43_CR8","doi-asserted-by":"crossref","unstructured":"Fan, Z., Li, M., Zhou, S., Xu, B.: Exploring wav2vec 2.0 on speaker verification and language identification. arXiv preprint arXiv:2012.06185 (2020)","DOI":"10.21437\/Interspeech.2021-1280"},{"key":"43_CR9","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"43_CR10","doi-asserted-by":"publisher","first-page":"3451","DOI":"10.1109\/TASLP.2021.3122291","volume":"29","author":"WN Hsu","year":"2021","unstructured":"Hsu, W.N., Bolte, B., Tsai, Y.H.H., Lakhotia, K., Salakhutdinov, R., Mohamed, A.: HuBERT: self-supervised speech representation learning by masked prediction of hidden units. IEEE\/ACM Trans. Audio Speech Lang. Process. 29, 3451\u20133460 (2021)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"43_CR11","doi-asserted-by":"crossref","unstructured":"Ko, T., Peddinti, V., Povey, D., Seltzer, M.L., Khudanpur, S.: A study on data augmentation of reverberant speech for robust speech recognition. In: 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5220\u20135224. IEEE (2017)","DOI":"10.1109\/ICASSP.2017.7953152"},{"key":"43_CR12","doi-asserted-by":"publisher","unstructured":"Lee, K.A., Wang, Q., Koshinaka, T.: The CORAL+ algorithm for unsupervised domain adaptation of PLDA. In: ICASSP 2019\u20132019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5821\u20135825 (2019). https:\/\/doi.org\/10.1109\/ICASSP.2019.8682852","DOI":"10.1109\/ICASSP.2019.8682852"},{"key":"43_CR13","doi-asserted-by":"crossref","unstructured":"Li, L., et al.: CN-Celeb: multi-genre speaker recognition. Speech Communication (2022)","DOI":"10.1016\/j.specom.2022.01.002"},{"key":"43_CR14","doi-asserted-by":"crossref","unstructured":"Li, R., Zhang, W., Chen, D.: The CORAL++ algorithm for unsupervised domain adaptation of speaker recognition. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7172\u20137176. IEEE (2022)","DOI":"10.1109\/ICASSP43922.2022.9747792"},{"key":"43_CR15","doi-asserted-by":"crossref","unstructured":"Nagrani, A., Chung, J.S., Zisserman, A.: VoxCeleb: a large-scale speaker identification dataset. arXiv preprint arXiv:1706.08612 (2017)","DOI":"10.21437\/Interspeech.2017-950"},{"key":"43_CR16","unstructured":"Novoselov, S., Lavrentyeva, G., Avdeeva, A., Volokhov, V., Gusev, A.: Robust speaker recognition with transformers using wav2vec 2.0. arXiv preprint arXiv:2203.15095 (2022)"},{"key":"43_CR17","doi-asserted-by":"crossref","unstructured":"Ott, M., et al.: FAIRSEQ: a fast, extensible toolkit for sequence modeling. arXiv preprint arXiv:1904.01038 (2019)","DOI":"10.18653\/v1\/N19-4009"},{"key":"43_CR18","unstructured":"Reynolds, D., et al.: The 2016 NIST speaker recognition evaluation. Tech. Rep., MIT Lincoln Laboratory Lexington United States (2017)"},{"key":"43_CR19","doi-asserted-by":"crossref","unstructured":"Sadjadi, S.O., Greenberg, C., Singer, E., Reynolds, D., Hernandez-Cordero, J.: The 2018 NIST speaker recognition evaluation. In: Interspeech 2019 (2019)","DOI":"10.21437\/Interspeech.2019-1351"},{"key":"43_CR20","doi-asserted-by":"crossref","unstructured":"Saeki, T., Xin, D., Nakata, W., Koriyama, T., Takamichi, S., Saruwatari, H.: UTMOS: utokyo-sarulab system for voicemos challenge 2022. arXiv e-prints arXiv:2204.02152 (2022)","DOI":"10.21437\/Interspeech.2022-439"},{"key":"43_CR21","doi-asserted-by":"crossref","unstructured":"Schneider, S., Baevski, A., Collobert, R., Auli, M.: wav2vec: unsupervised pre-training for speech recognition. arXiv preprint arXiv:1904.05862 (2019)","DOI":"10.21437\/Interspeech.2019-1873"},{"key":"43_CR22","doi-asserted-by":"crossref","unstructured":"Snyder, D., Garcia-Romero, D., Povey, D., Khudanpur, S.: Deep neural network embeddings for text-independent speaker verification. In: Interspeech 2017 (2017)","DOI":"10.21437\/Interspeech.2017-620"},{"key":"43_CR23","unstructured":"Snyder, D., Chen, G., Povey, D.: MUSAN: a music, speech, and noise corpus. arXiv preprint arXiv:1510.08484 (2015)"},{"key":"43_CR24","doi-asserted-by":"crossref","unstructured":"Sun, B., Feng, J., Saenko, K.: Return of frustratingly easy domain adaptation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 30 (2016)","DOI":"10.1609\/aaai.v30i1.10306"},{"key":"43_CR25","doi-asserted-by":"crossref","unstructured":"Tong, F., Zhao, M., Zhou, J., Lu, H., Li, Z., Li, L., Hong, Q.: ASV-SUBTOOLS: open source toolkit for automatic speaker verification. In: ICASSP 2021\u20132021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6184\u20136188. IEEE (2021)","DOI":"10.1109\/ICASSP39728.2021.9414676"},{"issue":"7","key":"43_CR26","doi-asserted-by":"publisher","first-page":"926","DOI":"10.1109\/LSP.2018.2822810","volume":"25","author":"F Wang","year":"2018","unstructured":"Wang, F., Cheng, J., Liu, W., Liu, H.: Additive Margin Softmax for Face Verification. IEEE Signal Process. Lett. 25(7), 926\u2013930 (2018). https:\/\/doi.org\/10.1109\/LSP.2018.2822810","journal-title":"IEEE Signal Process. Lett."},{"key":"43_CR27","doi-asserted-by":"crossref","unstructured":"Yang, Z., et al.: Fusion of self-supervised learned models for MOS prediction. arXiv e-prints, pp. arXiv\u20132204 (2022)","DOI":"10.21437\/Interspeech.2022-10262"}],"container-title":["Communications in Computer and Information Science","Neural Information Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-1645-0_43","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,4,13]],"date-time":"2023-04-13T17:20:00Z","timestamp":1681406400000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-1645-0_43"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9789819916443","9789819916450"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-1645-0_43","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"14 April 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICONIP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Neural Information Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"New Delhi","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 November 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 November 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iconip2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/iconip2022.apnns.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easy Chair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"810","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"359","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"44% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.65","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"ICONIP 2022 consists of a two-volume set, LNCS & CCIS, which includes 146 and 213 papers","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}