{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:17:37Z","timestamp":1740100657595,"version":"3.37.3"},"reference-count":18,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9747262","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"8257-8261","source":"Crossref","is-referenced-by-count":2,"title":["Multi-Speaker Pitch Tracking via Embodied Self-Supervised Learning"],"prefix":"10.1109","author":[{"given":"Xiang","family":"Li","sequence":"first","affiliation":[{"name":"Peking University,Key Laboratory of Machine Perception (Ministry of Education),Department of Machine Intelligence, Speech and Hearing Research Center,Beijing,China"}]},{"given":"Yifan","family":"Sun","sequence":"additional","affiliation":[{"name":"Peking University,Key Laboratory of Machine Perception (Ministry of Education),Department of Machine Intelligence, Speech and Hearing Research Center,Beijing,China"}]},{"given":"Xihong","family":"Wu","sequence":"additional","affiliation":[{"name":"Peking University,Key Laboratory of Machine Perception (Ministry of Education),Department of Machine Intelligence, Speech and Hearing Research Center,Beijing,China"}]},{"given":"Jing","family":"Chen","sequence":"additional","affiliation":[{"name":"Peking University,Key Laboratory of Machine Perception (Ministry of Education),Department of Machine Intelligence, Speech and Hearing Research Center,Beijing,China"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461526"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-51662-2"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/0010-0277(85)90021-6"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1017\/cnj.2017.15"},{"article-title":"Embodied self-supervised learning by coordinated sampling and training","year":"2020","author":"sun","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2017.2726762"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2915167"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1121\/1.2229005"},{"key":"ref18","first-page":"341","article-title":"Praat, a system for doing phonetics by computer","volume":"5","author":"boersma","year":"2001","journal-title":"Glot Int"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953228"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472680"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-02558-7"},{"key":"ref5","first-page":"v","article-title":"Discriminative training of hidden markov models for multiple pitch tracking [speech processing examples]","volume":"5","author":"bach","year":"2005","journal-title":"Proceedings (ICASSP&#x2019;05) IEEE International Conference on Acoustics Speech and Signal Processing 2005"},{"key":"ref8","doi-asserted-by":"crossref","first-page":"229","DOI":"10.1109\/TSA.2003.811539","article-title":"A multipitch tracking algorithm for noisy speech","volume":"11","author":"wu","year":"2003","journal-title":"IEEE Transactions on Speech and Audio Processing"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2010.2077280"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1121\/1.1458024"},{"key":"ref1","first-page":"518","article-title":"A robust algorithm for pitch tracking (rapt)","volume":"495","author":"talkin","year":"1995","journal-title":"Speech Coding and Synthesis"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2010.2064309"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2022,5,23]]},"location":"Singapore, Singapore","end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09747262.pdf?arnumber=9747262","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,15]],"date-time":"2022-08-15T20:07:34Z","timestamp":1660594054000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9747262\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9747262","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}