{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,4]],"date-time":"2026-03-04T17:30:12Z","timestamp":1772645412121,"version":"3.50.1"},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9747166","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"7527-7531","source":"Crossref","is-referenced-by-count":18,"title":["Real Additive Margin Softmax for Speaker Verification"],"prefix":"10.1109","author":[{"given":"Lantian","family":"Li","sequence":"first","affiliation":[{"name":"Tsinghua University,Center for Speech and Language Technologies, BNRist,China"}]},{"given":"Ruiqian","family":"Nai","sequence":"additional","affiliation":[{"name":"Tsinghua University,Center for Speech and Language Technologies, BNRist,China"}]},{"given":"Dong","family":"Wang","sequence":"additional","affiliation":[{"name":"Tsinghua University,Center for Speech and Language Technologies, BNRist,China"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953194"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2021.03.004"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Odyssey.2018-11"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1769"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2357"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683245"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2011-53"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.3039573"},{"key":"ref18","article-title":"Unified hypersphere embedding for speaker recognition","author":"hajibabaei","year":"2018"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2018.2822810"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP.2016.7918380"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472652"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683393"},{"key":"ref5","article-title":"Deep speaker: an end-to-end neural speaker embedding system","author":"li","year":"2017"},{"key":"ref8","first-page":"4879","article-title":"Generalized end-to-end loss for speaker verification","author":"wan","year":"2018","journal-title":"ICASSP"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2538"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1064"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461375"},{"key":"ref9","article-title":"Deepvox: Discovering features from raw audio for speaker recognition in degraded audio signals","author":"chowdhury","year":"2020"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPAASC47483.2019.9023039"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/BF00994018"},{"key":"ref24","article-title":"CN-Celeb: multi-genre speaker recognition","author":"li","year":"2020"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-1129"},{"key":"ref25","article-title":"BUT system description to Voxceleb speaker recognition challenge 2019","author":"zeinali","year":"2019"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Singapore, Singapore","start":{"date-parts":[[2022,5,23]]},"end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09747166.pdf?arnumber=9747166","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,22]],"date-time":"2022-08-22T20:11:23Z","timestamp":1661199083000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9747166\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9747166","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}