{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,23]],"date-time":"2025-12-23T10:03:06Z","timestamp":1766484186647,"version":"3.37.3"},"reference-count":19,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,6,6]]},"DOI":"10.1109\/icassp39728.2021.9413421","type":"proceedings-article","created":{"date-parts":[[2021,5,13]],"date-time":"2021-05-13T19:53:45Z","timestamp":1620935625000},"page":"6683-6687","source":"Crossref","is-referenced-by-count":12,"title":["An Effective Deep Embedding Learning Method Based on Dense-Residual Networks for Speaker Verification"],"prefix":"10.1109","author":[{"given":"Ying","family":"Liu","sequence":"first","affiliation":[{"name":"University of Science and Technology of China,National Engineering Laboratory for Speech and Language Information Processing,Hefei,China"}]},{"given":"Yan","family":"Song","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China,National Engineering Laboratory for Speech and Language Information Processing,Hefei,China"}]},{"given":"Ian","family":"McLoughlin","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China,National Engineering Laboratory for Speech and Language Information Processing,Hefei,China"}]},{"given":"Lin","family":"Liu","sequence":"additional","affiliation":[{"name":"iFLYTEK Co. Ltd,iFLYTEK Research,Hefei,China"}]},{"given":"Li-rong","family":"Dai","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China,National Engineering Laboratory for Speech and Language Information Processing,Hefei,China"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-3146"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPAASC47483.2019.9023301"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1922"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref15","article-title":"VOXSRC 2019: The first voxceleb speaker recognition challenge","author":"chung","year":"2019","journal-title":"arXiv preprint arXiv 1912 02522"},{"article-title":"BUT system description to voxceleb speaker recognition challenge 2019","year":"2019","author":"zeinali","key":"ref16"},{"key":"ref17","article-title":"The kaldi speech recognition toolkit","author":"povey","year":"2011","journal-title":"ASRU"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2616"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683120"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472652"},{"key":"ref3","article-title":"Deep speaker: an end-to-end neural speaker embedding system","author":"li","year":"2017","journal-title":"InterSpeech"},{"key":"ref6","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2017-950","article-title":"Voxceleb: A large-scale speaker identification dataset","author":"nagrani","year":"2017","journal-title":"Proc INTERSPEECH"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-92"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1606"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461375"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"999","DOI":"10.21437\/Interspeech.2017-620","article-title":"Deep neural network embeddings for text-independent speaker verification","author":"snyder","year":"2017","journal-title":"Proc INTERSPEECH"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462665"}],"event":{"name":"ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2021,6,6]]},"location":"Toronto, ON, Canada","end":{"date-parts":[[2021,6,11]]}},"container-title":["ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9413349\/9413350\/09413421.pdf?arnumber=9413421","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,3]],"date-time":"2022-08-03T00:20:56Z","timestamp":1659486056000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9413421\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,6,6]]},"references-count":19,"URL":"https:\/\/doi.org\/10.1109\/icassp39728.2021.9413421","relation":{},"subject":[],"published":{"date-parts":[[2021,6,6]]}}}