{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T18:26:45Z","timestamp":1772908005006,"version":"3.50.1"},"reference-count":51,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"Zhejiang Province Natural Science Foundation","doi-asserted-by":"publisher","award":["LQ21F020014"],"award-info":[{"award-number":["LQ21F020014"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"Zhejiang Province Natural Science Foundation","doi-asserted-by":"publisher","award":["LZ23F020007"],"award-info":[{"award-number":["LZ23F020007"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"Zhejiang Province Natural Science Foundation","doi-asserted-by":"publisher","award":["LDT23F02025F02"],"award-info":[{"award-number":["LDT23F02025F02"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"Zhejiang Province Natural Science Foundation","doi-asserted-by":"publisher","award":["LR22F020001"],"award-info":[{"award-number":["LR22F020001"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62002091"],"award-info":[{"award-number":["62002091"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62422204"],"award-info":[{"award-number":["62422204"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62072147"],"award-info":[{"award-number":["62072147"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1109\/tcsvt.2024.3502736","type":"journal-article","created":{"date-parts":[[2024,11,20]],"date-time":"2024-11-20T19:13:25Z","timestamp":1732130005000},"page":"3383-3395","source":"Crossref","is-referenced-by-count":7,"title":["Action-Driven Semantic Representation and Aggregation for Video Captioning"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2131-9200","authenticated-orcid":false,"given":"Tingting","family":"Han","sequence":"first","affiliation":[{"name":"School of Computer Science, Hangzhou Dianzi University, Hangzhou, China"}]},{"given":"Yaochen","family":"Xu","sequence":"additional","affiliation":[{"name":"School of Computer Science, Hangzhou Dianzi University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1922-7283","authenticated-orcid":false,"given":"Jun","family":"Yu","sequence":"additional","affiliation":[{"name":"School of Computer Science, Hangzhou Dianzi University, Hangzhou, China"}]},{"given":"Zhou","family":"Yu","sequence":"additional","affiliation":[{"name":"School of Computer Science, Hangzhou Dianzi University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5843-6411","authenticated-orcid":false,"given":"Sicheng","family":"Zhao","sequence":"additional","affiliation":[{"name":"Beijing National Research Center for Information Science and Technology (BNRist), Tsinghua University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612863"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3318220"},{"key":"ref3","article-title":"Video summarization with long short-term memory","author":"Zhang","year":"2016","journal-title":"arXiv:1605.08110"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3312325"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3611873"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.149"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3317447"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01741"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i3.25484"},{"key":"ref10","article-title":"GL-RG: Global\u2013local representation granularity for video captioning","author":"Yan","year":"2022","journal-title":"arXiv:2205.10706"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01329"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.548"},{"key":"ref13","first-page":"213","article-title":"End-to-end object detection with transformers","volume-title":"Proc. Eur. Conf. Comput. Vis.","author":"Carion"},{"key":"ref14","first-page":"190","article-title":"Collecting highly parallel data for paraphrase evaluation","volume-title":"Proc. 49th Annu. Meeting Assoc. Comput. Linguistics, Hum. Lang. Technol.","author":"Chen"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.515"},{"key":"ref17","article-title":"Video description generation incorporating spatio-temporal features and a soft-attention mechanism","author":"Yao","year":"2015","journal-title":"arXiv:1502.08029"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-44781-0_1"},{"key":"ref19","article-title":"Hierarchical LSTM with adjusted temporal attention for video captioning","author":"Song","year":"2017","journal-title":"arXiv:1706.01231"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3479207"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01742"},{"key":"ref22","first-page":"2514","article-title":"Semantic grouping network for video captioning","volume-title":"Proc. 35th AAAI Conf. Artif. Intell.","author":"Ryu"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3177320"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3131721"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW59228.2023.00259"},{"key":"ref26","article-title":"Inception-ResNet and the impact of residual connections on learning","author":"Szegedy","year":"2016","journal-title":"arXiv:1602.07261"},{"key":"ref27","article-title":"Learning transferable visual models from natural language supervision","author":"Radford","year":"2021","journal-title":"arXiv:2103.00020"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00685"},{"key":"ref29","article-title":"Faster R-CNN: Towards real-time object detection with region proposal networks","author":"Ren","year":"2015","journal-title":"arXiv:1506.01497"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/d19-1410"},{"key":"ref32","doi-asserted-by":"crossref","first-page":"83","DOI":"10.1002\/nav.3800020109","article-title":"The Hungarian method for the assignment problem","volume":"2","author":"Kuhn","year":"1955","journal-title":"Nav. Res. Logistics Quart."},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00784"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00795"},{"key":"ref35","first-page":"358","article-title":"Less is more: Picking informative frames for video captioning","volume-title":"Proc. Eur. Conf. Comput. Vis. (ECCV)","author":"Chen"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00852"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00854"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00273"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01088"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01311"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/iccv48922.2021.00157"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25343"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01816"},{"key":"ref44","article-title":"EVCap: Retrieval-augmented image captioning with external visual-name memory for open-world comprehension","author":"Li","year":"2023","journal-title":"arXiv:2311.15879"},{"key":"ref45","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2023.109906","article-title":"Global semantic enhancement network for video captioning","volume":"145","author":"Luo","year":"2024","journal-title":"Pattern Recognit."},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3295098"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.3115\/1073083.1073135"},{"key":"ref48","first-page":"74","article-title":"Rouge: A package for automatic evaluation of summaries","volume-title":"Proc. Text Summarization Branches Out","author":"Lin"},{"key":"ref49","first-page":"65","article-title":"METEOR: An automatic metric for MT evaluation with improved correlation with human judgments","volume-title":"Proc. ACL Workshop Intrinsic Extrinsic Eval. Measures Mach. Transl. Summarization","author":"Banerjee"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref51","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/76\/10949577\/10759582.pdf?arnumber=10759582","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,5]],"date-time":"2025-04-05T07:20:53Z","timestamp":1743837653000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10759582\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4]]},"references-count":51,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2024.3502736","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,4]]}}}