{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:36:01Z","timestamp":1740101761996,"version":"3.37.3"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6,4]]},"DOI":"10.1109\/icassp49357.2023.10096063","type":"proceedings-article","created":{"date-parts":[[2023,5,5]],"date-time":"2023-05-05T17:28:30Z","timestamp":1683307710000},"page":"1-5","source":"Crossref","is-referenced-by-count":1,"title":["WL-MSR: Watch and Listen for Multimodal Subtitle Recognition"],"prefix":"10.1109","author":[{"given":"Jiawei","family":"Liu","sequence":"first","affiliation":[{"name":"Chinese Academy of Sciences,The Laboratory of Cognition and Decision Intelligence for Complex Systems, Institute of Automation"}]},{"given":"Hao","family":"Wang","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,The Laboratory of Cognition and Decision Intelligence for Complex Systems, Institute of Automation"}]},{"given":"Weining","family":"Wang","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,The Laboratory of Cognition and Decision Intelligence for Complex Systems, Institute of Automation"}]},{"given":"Xingjian","family":"He","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,The Laboratory of Cognition and Decision Intelligence for Complex Systems, Institute of Automation"}]},{"given":"Jing","family":"Liu","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,The Laboratory of Cognition and Decision Intelligence for Complex Systems, Institute of Automation"}]}],"member":"263","reference":[{"key":"ref13","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"NIPS"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2017.143"},{"journal-title":"Continuously differentiable exponential linear units","year":"2017","author":"barron","key":"ref23"},{"key":"ref15","article-title":"UNITER: universal image-text representation learning","author":"chen","year":"2020","journal-title":"ECCV"},{"key":"ref14","article-title":"BERT: pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2019","journal-title":"NACACL-HLT"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICSDA.2017.8384449"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"journal-title":"Chinese text in the wild","year":"2018","author":"yuan","key":"ref22"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_4"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2019.00254"},{"journal-title":"Wenet Production oriented streaming and non-streaming end-to-end speech recognition toolkit","year":"2021","author":"yao","key":"ref2"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054250"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052974"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00553"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-4009"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR56361.2022.9956308"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICACCS48705.2020.9074180"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2285"},{"journal-title":"wav2vec 2 0 A framework for self-supervised learning of speech representations","year":"2020","author":"baevski","key":"ref9"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00314"},{"key":"ref3","article-title":"Faster r-cnn: Towards real-time object detection with region proposal networks","author":"ren","year":"2015","journal-title":"NIPS"},{"journal-title":"Read like humans Autonomous bidirectional and iterative language modeling for scene text recognition","year":"2021","author":"fang","key":"ref6"},{"key":"ref5","article-title":"An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition","author":"shi","year":"2016","journal-title":"IEEE T-PAMI"}],"event":{"name":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2023,6,4]]},"location":"Rhodes Island, Greece","end":{"date-parts":[[2023,6,10]]}},"container-title":["ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10094559\/10094560\/10096063.pdf?arnumber=10096063","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,20]],"date-time":"2023-11-20T19:00:29Z","timestamp":1700506829000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10096063\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,4]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/icassp49357.2023.10096063","relation":{},"subject":[],"published":{"date-parts":[[2023,6,4]]}}}