{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:28:18Z","timestamp":1763191698739,"version":"3.45.0"},"reference-count":47,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004479","name":"Natural Science Foundation of Jiangxi Province","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004479","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11227221","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["What Happens in the Surroundings: A Benchmark for 360\u00b0 image Captioning"],"prefix":"10.1109","author":[{"given":"Wenhui","family":"Jiang","sequence":"first","affiliation":[{"name":"Jiangxi University of Finance and Economics,Nanchang,China"}]},{"given":"Tiancong","family":"Xu","sequence":"additional","affiliation":[{"name":"Jiangxi University of Finance and Economics,Nanchang,China"}]},{"given":"Haijun","family":"Li","sequence":"additional","affiliation":[{"name":"Jiangxi University of Finance and Economics,Nanchang,China"}]},{"given":"Zichen","family":"Li","sequence":"additional","affiliation":[{"name":"Jiangxi University of Finance and Economics,Nanchang,China"}]},{"given":"Yuming","family":"Fang","sequence":"additional","affiliation":[{"name":"Jiangxi University of Finance and Economics,Nanchang,China"}]},{"given":"Zhen","family":"Tang","sequence":"additional","affiliation":[{"name":"Sany Heavy Industry CO., LTD,Sany,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3148210"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3617592"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2021.108358"},{"key":"ref4","first-page":"6940","article-title":"Query-based image captioning from multi-context 360cdegree images","volume-title":"Conf. Empirical Methods Natural Language Proc","author":"Maeda"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/LGRS.2024.3523134"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.displa.2022.102238"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00675"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3052073"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2021.3050888"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i1.19937"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2025.3539468"},{"article-title":"Spherical vision transformer for 360-degree video saliency prediction","volume-title":"Proceedings of the British Machine Vision Conference","author":"Cokelek","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_25"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3209015"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2020.3023636"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i1.19929"},{"key":"ref17","first-page":"595","article-title":"Multimodal neural language models","volume-title":"Proc. Int. Conf. Machine Learning","author":"Kiros"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2016.2582924"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.5555\/3045118.3045336"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00435"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00902"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3177318"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20059-5_10"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/s11760-022-02350-9"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/s11760-023-02725-6"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.5555\/3294771.3294822"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01240-3_32"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1405.0312"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00166"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/JSTARS.2017.2694890"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr42600.2020.00436"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/wacv45572.2020.9093452"},{"article-title":"Joint 2d-3d-semantic data for indoor scene understanding","year":"2017","author":"Armeni","key":"ref35"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3050861"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.644"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.3115\/1073083.1073135"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/W14-3348"},{"key":"ref41","first-page":"74","article-title":"Rouge: A package for automatic evaluation of summaries","author":"Lin","year":"2004","journal-title":"Ass. Comput. Linguistics Worksh"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00636"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01059"},{"article-title":"How much can CLIP benefit vision-and-language tasks?","volume-title":"Int. Conf. Learn. Represent","author":"Shen","key":"ref45"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3479207"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01098"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11227221.pdf?arnumber=11227221","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:25:05Z","timestamp":1763191505000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11227221\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11227221","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}