{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,13]],"date-time":"2026-03-13T22:12:25Z","timestamp":1773439945828,"version":"3.50.1"},"reference-count":76,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100004731","name":"Zhejiang Provincial Natural Science Foundation of China","doi-asserted-by":"publisher","award":["LY23F020005"],"award-info":[{"award-number":["LY23F020005"]}],"id":[{"id":"10.13039\/501100004731","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62125201"],"award-info":[{"award-number":["62125201"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62002314"],"award-info":[{"award-number":["62002314"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62020106007"],"award-info":[{"award-number":["62020106007"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/tip.2024.3390984","type":"journal-article","created":{"date-parts":[[2024,4,24]],"date-time":"2024-04-24T17:35:07Z","timestamp":1713980107000},"page":"3115-3129","source":"Crossref","is-referenced-by-count":13,"title":["Multi-Granularity Contrastive Cross-Modal Collaborative Generation for End-to-End Long-Term Video Question Answering"],"prefix":"10.1109","volume":"33","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4348-2671","authenticated-orcid":false,"given":"Ting","family":"Yu","sequence":"first","affiliation":[{"name":"School of Information Science and Technology, Hangzhou Normal University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-6139-509X","authenticated-orcid":false,"given":"Kunhao","family":"Fu","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, Hangzhou Normal University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6478-9192","authenticated-orcid":false,"given":"Jian","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, Hangzhou Normal University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7542-296X","authenticated-orcid":false,"given":"Qingming","family":"Huang","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1922-7283","authenticated-orcid":false,"given":"Jun","family":"Yu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Hangzhou Dianzi University, Hangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3051756"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00999"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3142526"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00688"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3205212"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3076556"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2922062"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3366710"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/178"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2940677"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20059-5_3"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2020.2995959"},{"key":"ref13","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00725"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00490"},{"key":"ref17","first-page":"26462","article-title":"Learning from inside: Self-driven Siamese sampling and reasoning for video question answering","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Yu"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.11238"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.515"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123364"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.149"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00210"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33018658"},{"key":"ref25","article-title":"Temporal pyramid transformer with multimodal interaction for video question answering","author":"Peng","year":"2021","journal-title":"arXiv:2109.04735"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6767"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6737"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00499"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00294"},{"key":"ref30","first-page":"1","article-title":"The neuro-symbolic concept learner: Interpreting scenes, words, and sentences from natural supervision","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Mao"},{"key":"ref31","first-page":"1039","article-title":"Neural-symbolic VQA: Disentangling reasoning from vision and language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yi"},{"key":"ref32","first-page":"1","article-title":"CLEVRER: Collision events for video representation and reasoning","volume-title":"Proc. ICLR","author":"Yi"},{"key":"ref33","first-page":"1","article-title":"Grounding physical concepts of objects and events through dynamic visual reasoning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Chen"},{"key":"ref34","first-page":"9112","article-title":"Attention over learned object embeddings enables complex visual reasoning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ding"},{"key":"ref35","article-title":"MONet: Unsupervised scene decomposition and representation","author":"Burgess","year":"2019","journal-title":"arXiv:1901.11390"},{"key":"ref36","first-page":"887","article-title":"Dynamic visual reasoning by learning differentiable physics models from video and language","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ding"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.432"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.501"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/d18-1167"},{"key":"ref40","article-title":"TVQA+: Spatio-temporal grounding for video question answering","author":"Lei","year":"2019","journal-title":"arXiv:1904.11574"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33019127"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/512"},{"key":"ref43","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014","journal-title":"arXiv:1409.1556"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00685"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01742"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00756"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00877"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.161"},{"key":"ref51","article-title":"UniVL: A unified video and language pre-training model for multimodal understanding and generation","author":"Luo","year":"2020","journal-title":"arXiv:2002.06353"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00990"},{"key":"ref53","first-page":"1","article-title":"Support-set bottlenecks for video-text representation learning","volume-title":"Proc. ICLR","author":"Patrick"},{"key":"ref54","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. ICML","author":"Li"},{"key":"ref55","article-title":"OmniVL: One foundation model for image-language and video-language tasks","author":"Wang","year":"2022","journal-title":"arXiv:2209.07526"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2102.05095"},{"key":"ref57","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref58","article-title":"SimVLM: Simple visual language model pretraining with weak supervision","author":"Wang","year":"2021","journal-title":"arXiv:2108.10904"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00965"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123427"},{"key":"ref61","first-page":"8026","article-title":"PyTorch: An imperative style, high-performance deep learning library","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Paszke"},{"key":"ref62","first-page":"1","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Dosovitskiy"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref64","article-title":"Decoupled weight decay regularization","author":"Loshchilov","year":"2017","journal-title":"arXiv:1711.05101"},{"key":"ref65","article-title":"A multi-world approach to question answering about real-world scenes based on uncertain input","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Malinowski"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-019-01189-x"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3350969"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00171"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00293"},{"key":"ref70","article-title":"Microsoft COCO captions: Data collection and evaluation server","author":"Chen","year":"2015","journal-title":"arXiv:1504.00325"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref72","first-page":"124","article-title":"Zero-shot video question answering via frozen bidirectional language models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yang"},{"key":"ref73","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","author":"Li","year":"2023","journal-title":"arXiv:2301.12597"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.325"},{"key":"ref75","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Alayrac"},{"key":"ref76","article-title":"OPT: Open pre-trained transformer language models","author":"Zhang","year":"2022","journal-title":"arXiv:2205.01068"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/83\/10346232\/10508294.pdf?arnumber=10508294","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,5,3]],"date-time":"2024-05-03T19:15:02Z","timestamp":1714763702000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10508294\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":76,"URL":"https:\/\/doi.org\/10.1109\/tip.2024.3390984","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"value":"1057-7149","type":"print"},{"value":"1941-0042","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}