{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T18:23:47Z","timestamp":1772907827940,"version":"3.50.1"},"reference-count":73,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2020AAA0109301"],"award-info":[{"award-number":["2020AAA0109301"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62132006"],"award-info":[{"award-number":["62132006"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62161013"],"award-info":[{"award-number":["62161013"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004479","name":"Natural Science Foundation of Jiangxi Province","doi-asserted-by":"publisher","award":["20202ACB202007"],"award-info":[{"award-number":["20202ACB202007"]}],"id":[{"id":"10.13039\/501100004479","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004479","name":"Natural Science Foundation of Jiangxi Province","doi-asserted-by":"publisher","award":["20203BBE53033"],"award-info":[{"award-number":["20203BBE53033"]}],"id":[{"id":"10.13039\/501100004479","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2022]]},"DOI":"10.1109\/tip.2022.3177318","type":"journal-article","created":{"date-parts":[[2022,5,30]],"date-time":"2022-05-30T22:08:36Z","timestamp":1653948516000},"page":"3920-3934","source":"Crossref","is-referenced-by-count":42,"title":["Visual Cluster Grounding for Image Captioning"],"prefix":"10.1109","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4144-6725","authenticated-orcid":false,"given":"Wenhui","family":"Jiang","sequence":"first","affiliation":[{"name":"School of Information Management, Jiangxi University of Finance and Economics, Nanchang, China"}]},{"given":"Minwei","family":"Zhu","sequence":"additional","affiliation":[{"name":"School of Information Management, Jiangxi University of Finance and Economics, Nanchang, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6946-3586","authenticated-orcid":false,"given":"Yuming","family":"Fang","sequence":"additional","affiliation":[{"name":"School of Information Management, Jiangxi University of Finance and Economics, Nanchang, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2179-3292","authenticated-orcid":false,"given":"Guangming","family":"Shi","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Xidian University, Xi&#x2019;an, China"}]},{"given":"Xiaowei","family":"Zhao","sequence":"additional","affiliation":[{"name":"Sany Heavy Industry Company Ltd., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9982-9887","authenticated-orcid":false,"given":"Yang","family":"Liu","sequence":"additional","affiliation":[{"name":"Sany Heavy Industry Company Ltd., Beijing, China"}]}],"member":"263","reference":[{"key":"ref73","first-page":"2","article-title":"Memory-augmented image captioning","author":"fei","year":"2021","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref71","first-page":"1","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2015","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01034"},{"key":"ref38","first-page":"2286","article-title":"Dual-level collaborative transformer for image captioning","author":"luo","year":"2021","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.131"},{"key":"ref32","first-page":"2584","article-title":"Image captioning with context-aware auxiliary guidance","author":"song","year":"2021","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2928144"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2951226"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00902"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3121705"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-020-00257-z"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3051476"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58523-5_21"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2017.10.001"},{"key":"ref61","first-page":"3394","article-title":"Consensus graph representation learning for better grounded image captioning","author":"zhang","year":"2021","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.89"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3072479"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.303"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.3004729"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00850"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01098"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.3115\/1073083.1073135"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/W14-3348"},{"key":"ref69","first-page":"74","article-title":"Rouge: A package for automatic evaluation of summaries","author":"lin","year":"2004","journal-title":"Proc Assoc Comput Linguistics Workshop"},{"key":"ref2","first-page":"740","article-title":"Microsoft COCO: Common objects in context","author":"lin","year":"2014","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00166"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1437"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00483"},{"key":"ref21","first-page":"4176","article-title":"Attention correctness in neural image captioning","author":"liu","year":"2017","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00674"},{"key":"ref26","first-page":"1112","article-title":"Hierarchical LSTMs with adaptive attention for visual captioning","volume":"42","author":"gao","year":"2020","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00478"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01661"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00182"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00556"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01387"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58580-8_44"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00269"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46448-0_49"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00425"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00179"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.345"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.667"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01045"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00473"},{"key":"ref13","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc Nerual Inf Process Syst"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00646"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01071"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01521"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475354"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01219-9_47"},{"key":"ref19","first-page":"1865","article-title":"Prophet attention: Predicting attention with future attention","author":"liu","year":"2020","journal-title":"Proc Nerual Inf Process Syst"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.117"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00636"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01059"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6898"},{"key":"ref8","first-page":"2048","article-title":"Show, attend and tell: Neural image caption generation with visual attention","author":"xu","year":"2015","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2016.2577031"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6833"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2015.2477044"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00437"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01258-8_16"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00430"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2797921"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.7005"},{"key":"ref44","first-page":"1","article-title":"SimVLM: Simple visual language model pretraining with weak supervision","author":"wang","year":"2022","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00553"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/83\/9626658\/09785461.pdf?arnumber=9785461","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,4]],"date-time":"2022-07-04T20:06:18Z","timestamp":1656965178000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9785461\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"references-count":73,"URL":"https:\/\/doi.org\/10.1109\/tip.2022.3177318","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"value":"1057-7149","type":"print"},{"value":"1941-0042","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]}}}