{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,7]],"date-time":"2024-09-07T04:44:49Z","timestamp":1725684289648},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,8,21]]},"DOI":"10.1109\/icpr56361.2022.9956308","type":"proceedings-article","created":{"date-parts":[[2022,11,29]],"date-time":"2022-11-29T19:34:13Z","timestamp":1669750453000},"page":"4974-4980","source":"Crossref","is-referenced-by-count":1,"title":["ICPR 2022 Challenge on Multi-Modal Subtitle Recognition"],"prefix":"10.1109","author":[{"given":"Shan","family":"Huang","sequence":"first","affiliation":[{"name":"Beijing University of Post and Technology"}]},{"given":"Shen","family":"Huang","sequence":"additional","affiliation":[{"name":"Tencent"}]},{"given":"Li","family":"Lu","sequence":"additional","affiliation":[{"name":"Tencent"}]},{"given":"Pengfei","family":"Hu","sequence":"additional","affiliation":[{"name":"Tencent"}]},{"given":"Lijuan","family":"Wang","sequence":"additional","affiliation":[{"name":"Tencent"}]},{"given":"Xiang","family":"Wang","sequence":"additional","affiliation":[{"name":"Tencent"}]},{"given":"Jian","family":"Kang","sequence":"additional","affiliation":[{"name":"Tencent"}]},{"given":"Weida","family":"Liang","sequence":"additional","affiliation":[{"name":"Tencent"}]},{"given":"Lianwen","family":"Jin","sequence":"additional","affiliation":[{"name":"South China University of Technology"}]},{"given":"Yuliang","family":"Liu","sequence":"additional","affiliation":[{"name":"Huazhong University of Science and Technology"}]},{"given":"Yaqiang","family":"Wu","sequence":"additional","affiliation":[{"name":"Lenovo"}]},{"given":"Yong","family":"Liu","sequence":"additional","affiliation":[{"name":"Beijing University of Post and Technology"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.634"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746682"},{"key":"ref13","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","author":"baevski","year":"2020","journal-title":"NIPS"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-4009"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1983"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2005.186"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2017.2772828"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2404"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","year":"2020","author":"dosovitskiy","key":"ref4"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2019.00250"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1417"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2008.137"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICSDA.2017.8384449"},{"key":"ref7","article-title":"The kaldi speech recognition toolkit","author":"povey","year":"2011","journal-title":"ASRU(workshop)"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2016.2646371"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3350988"},{"key":"ref1","article-title":"The kaldi speech recognition toolkit","author":"povey","year":"2011","journal-title":"ASRU(workshop)"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1456"},{"key":"ref22","article-title":"Faster r-cnn: Towards real-time object detection with region proposal networks","author":"ren","year":"2015","journal-title":"NIPS"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054250"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3478328"},{"article-title":"Mmdetection: Open mmlab detection toolbox and benchmark","year":"2019","author":"chen","key":"ref23"},{"article-title":"End-to-end domain-adversarial voice activity detection","year":"2019","author":"lavechin","key":"ref26"},{"key":"ref25","article-title":"Pyannote. audio: neural building blocks for speaker diarization","author":"bredin","year":"2020","journal-title":"ICASSP"}],"event":{"name":"2022 26th International Conference on Pattern Recognition (ICPR)","start":{"date-parts":[[2022,8,21]]},"location":"Montreal, QC, Canada","end":{"date-parts":[[2022,8,25]]}},"container-title":["2022 26th International Conference on Pattern Recognition (ICPR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9956007\/9955631\/09956308.pdf?arnumber=9956308","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,19]],"date-time":"2022-12-19T20:05:26Z","timestamp":1671480326000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9956308\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,21]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/icpr56361.2022.9956308","relation":{},"subject":[],"published":{"date-parts":[[2022,8,21]]}}}