{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T18:37:58Z","timestamp":1743014278212,"version":"3.40.3"},"publisher-location":"Cham","reference-count":25,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031202322"},{"type":"electronic","value":"9783031202339"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20233-9_37","type":"book-chapter","created":{"date-parts":[[2022,11,3]],"date-time":"2022-11-03T00:02:48Z","timestamp":1667433768000},"page":"364-372","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Online Neural Speaker Diarization with\u00a0Core Samples"],"prefix":"10.1007","author":[{"given":"Yanyan","family":"Yue","sequence":"first","affiliation":[]},{"given":"Jun","family":"Du","sequence":"additional","affiliation":[]},{"given":"Maokui","family":"He","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,3]]},"reference":[{"key":"37_CR1","doi-asserted-by":"crossref","unstructured":"Park, T.J., Kanda, N., Dimitriadis, D., Han, K., Watanabe, S., Narayanan, S.: A review of speaker diarization: recent advances with deep learning. Comput. Speech Lang. 72, 101317 (2022)","DOI":"10.1016\/j.csl.2021.101317"},{"issue":"7","key":"37_CR2","doi-asserted-by":"publisher","first-page":"1382","DOI":"10.1109\/TASL.2009.2015698","volume":"17","author":"D Vijayasenan","year":"2009","unstructured":"Vijayasenan, D., Valente, F., Bourlard, H.: An information theoretic approach to speaker diarization of meeting data. IEEE Trans. Audio Speech Lang. Process. 17(7), 1382\u20131393 (2009)","journal-title":"IEEE Trans. Audio Speech Lang. Process."},{"issue":"2","key":"37_CR3","doi-asserted-by":"publisher","first-page":"356","DOI":"10.1109\/TASL.2011.2125954","volume":"20","author":"X Anguera","year":"2012","unstructured":"Anguera, X., Bozonnet, S., Evans, N., Fredouille, C., Friedland, G., Vinyals, O.: Speaker diarization: a review of recent research. IEEE Trans. Audio Speech Lang. Process. 20(2), 356\u2013370 (2012)","journal-title":"IEEE Trans. Audio Speech Lang. Process."},{"key":"37_CR4","doi-asserted-by":"crossref","unstructured":"Wang, Q., Downey, C., Wan, L., Mansfield, P.A., Moreno, I.L.: Speaker diarization with LSTM. In: 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5239\u20135243 (2018)","DOI":"10.1109\/ICASSP.2018.8462628"},{"key":"37_CR5","doi-asserted-by":"crossref","unstructured":"Garcia-Romero, D., Snyder, D., Sell, G., Povey, D., McCree, A.: Speaker diarization using deep neural network embeddings. In: 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4930\u20134934 (2017)","DOI":"10.1109\/ICASSP.2017.7953094"},{"key":"37_CR6","doi-asserted-by":"crossref","unstructured":"Lin, Q., Yin, R., Li, M., Bredin, H., Barras, C.: LSTM based similarity measurement with spectral clustering for speaker diarization. In: Interspeech 2019, pp. 366\u2013370 (2019)","DOI":"10.21437\/Interspeech.2019-1388"},{"key":"37_CR7","doi-asserted-by":"crossref","unstructured":"Fujita, Y., Kanda, N., Horiguchi, S., Nagamatsu, K., Watanabe, S.: End-to-end neural speaker diarization with permutation-free objectives (2019)","DOI":"10.21437\/Interspeech.2019-2899"},{"key":"37_CR8","doi-asserted-by":"crossref","unstructured":"Fujita, Y., Kanda, N., Horiguchi, S., Xue, Y., Nagamatsu, K., Watanabe, S.: End-to-end neural speaker diarization with self-attention. In: 2019 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), pp. 296\u2013303 (2019)","DOI":"10.1109\/ASRU46091.2019.9003959"},{"key":"37_CR9","doi-asserted-by":"crossref","unstructured":"Horiguchi, S., Fujita, Y., Watanabe, S., Xue, Y., Nagamatsu, K.: End-to-end speaker diarization for an unknown number of speakers with encoder-decoder based attractors (2020)","DOI":"10.21437\/Interspeech.2020-1022"},{"key":"37_CR10","doi-asserted-by":"crossref","unstructured":"Medennikov, I., et al.: Target-speaker voice activity detection: a novel approach for multi-speaker diarization in a dinner party scenario. In: Interspeech 2020, pp. 274\u2013278 (2020)","DOI":"10.21437\/Interspeech.2020-1602"},{"key":"37_CR11","doi-asserted-by":"crossref","unstructured":"He, M., Raj, D., Huang, Z., Du, J., Chen, Z., Watanabe, S.: Target-speaker voice activity detection with improved i-vector estimation for unknown number of speaker (2021)","DOI":"10.21437\/Interspeech.2021-750"},{"key":"37_CR12","doi-asserted-by":"crossref","unstructured":"Kinoshita, K., Delcroix, M., Tawara, N.: Integrating end-to-end neural and clustering-based diarization: getting the best of both worlds. In: ICASSP 2021\u20132021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7198\u20137202 (2021)","DOI":"10.1109\/ICASSP39728.2021.9414333"},{"key":"37_CR13","doi-asserted-by":"crossref","unstructured":"Yu, F., et al.: Summary on the ICASSP 2022 multi-channel multi-party meeting transcription grand challenge. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 9156\u20139160 (2022)","DOI":"10.1109\/ICASSP43922.2022.9746270"},{"key":"37_CR14","doi-asserted-by":"crossref","unstructured":"Kinoshita, K., Delcroix, M., Iwata, T.: Tight integration of neural- and clustering-based diarization through deep unfolding of infinite gaussian mixture model. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 8382\u20138386 (2022)","DOI":"10.1109\/ICASSP43922.2022.9746234"},{"key":"37_CR15","doi-asserted-by":"crossref","unstructured":"Geiger, J.T., Wallhoff, F., Rigoll, G.: GMM-UBM based open-set online speaker diarization. In: Interspeech 2010, pp. 2330\u20132333 (2010)","DOI":"10.21437\/Interspeech.2010-638"},{"key":"37_CR16","doi-asserted-by":"crossref","unstructured":"Vaquero, C., Vinyals, O., Friedland, G.: A hybrid approach to online speaker diarization. In: Interspeech 2010, pp. 2638\u20132641 (2010)","DOI":"10.21437\/Interspeech.2010-700"},{"key":"37_CR17","doi-asserted-by":"crossref","unstructured":"Zhang, Y., et al.: Low-latency online speaker diarization with graph-based label generation (2021)","DOI":"10.21437\/Odyssey.2022-23"},{"key":"37_CR18","doi-asserted-by":"crossref","unstructured":"Xia, W., et al.: Turn-to-diarize: online speaker diarization constrained by transformer transducer speaker turn detection. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 8077\u20138081 (2022)","DOI":"10.1109\/ICASSP43922.2022.9746531"},{"key":"37_CR19","doi-asserted-by":"crossref","unstructured":"Yue, Y., Du, J., He, M., Yang, Y., Wang, R.: Online speaker diarization with core samples selection. In: Submitted for Interspeech 2022 (2022)","DOI":"10.21437\/Interspeech.2022-10363"},{"key":"37_CR20","doi-asserted-by":"crossref","unstructured":"Landini, F., Profant, J., Diez, M., Burget, L.: Bayesian HMM clustering of x-vector sequences (VBx) in speaker diarization: theory, implementation and analysis on standard tasks. Comput. Speech Lang. 71, 101254 (2021)","DOI":"10.1016\/j.csl.2021.101254"},{"key":"37_CR21","doi-asserted-by":"crossref","unstructured":"Zhang, A., Wang, Q., Zhu, Z., Paisley, J., Wang, C.: Fully supervised speaker diarization. In: ICASSP 2019\u20132019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6301\u20136305 (2019)","DOI":"10.1109\/ICASSP.2019.8683892"},{"key":"37_CR22","doi-asserted-by":"crossref","unstructured":"Fini, E., Brutti, A.: Supervised online diarization with sample mean loss for multi-domain data. In: ICASSP 2020\u20132020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7134\u20137138 (2020)","DOI":"10.1109\/ICASSP40776.2020.9053477"},{"key":"37_CR23","doi-asserted-by":"crossref","unstructured":"Xue, Y., et al.: Online streaming end-to-end neural diarization handling overlapping speech and flexible numbers of speakers (2021)","DOI":"10.21437\/Interspeech.2021-708"},{"key":"37_CR24","doi-asserted-by":"crossref","unstructured":"He, M., et al.: The USTC-Ximalaya system for the ICASSP 2022 multi-channel multi-party meeting transcription (M2MeT) challenge. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 9166\u20139170 (2022)","DOI":"10.1109\/ICASSP43922.2022.9747067"},{"key":"37_CR25","doi-asserted-by":"crossref","unstructured":"Yu, F., et al.: M2MeT: the ICASSP 2022 multi-channel multi-party meeting transcription challenge. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (2022)","DOI":"10.1109\/ICASSP43922.2022.9746465"}],"container-title":["Lecture Notes in Computer Science","Biometric Recognition"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20233-9_37","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,3]],"date-time":"2022-11-03T00:30:04Z","timestamp":1667435404000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20233-9_37"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031202322","9783031202339"],"references-count":25,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20233-9_37","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"3 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"CCBR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Biometric Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Beijing","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccbr2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/ccbr99.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"115","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"70","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"61% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}