{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T18:12:22Z","timestamp":1771697542538,"version":"3.50.1"},"reference-count":41,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61925201"],"award-info":[{"award-number":["61925201"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61771025"],"award-info":[{"award-number":["61771025"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2020]]},"DOI":"10.1109\/tip.2019.2952085","type":"journal-article","created":{"date-parts":[[2019,11,22]],"date-time":"2019-11-22T21:09:20Z","timestamp":1574456960000},"page":"2728-2741","source":"Crossref","is-referenced-by-count":35,"title":["MAVA: Multi-Level Adaptive Visual-Textual Alignment by Cross-Media Bi-Attention Mechanism"],"prefix":"10.1109","volume":"29","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7658-3845","authenticated-orcid":false,"given":"Yuxin","family":"Peng","sequence":"first","affiliation":[]},{"given":"Jinwei","family":"Qi","sequence":"additional","affiliation":[]},{"given":"Yunkan","family":"Zhuo","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"ref38","article-title":"Dual-path convolutional image-text embedding with instance loss","author":"zheng","year":"2017","journal-title":"arXiv 1711 05535"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref32","first-page":"91","article-title":"Faster R-CNN: Towards real-time object detection with region proposal networks","author":"ren","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref37","first-page":"1","article-title":"VSE++: Improving visual-semantic embeddings with hard negatives","author":"faghri","year":"2018","journal-title":"Proc Brit Mach Vis Conf (BMVC)"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240712"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00645"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00636"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/2647868.2654902"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00166"},{"key":"ref11","first-page":"1247","article-title":"Deep canonical correlation analysis","author":"andrew","year":"2013","journal-title":"Proc 30th Int Conf Mach Learn (ICML)"},{"key":"ref12","first-page":"3846","article-title":"Cross-media shared representation by hierarchical learning with multiple deep networks","author":"peng","year":"2016","journal-title":"Proc Intern Joint Conf Artificial Intel (IJCAI)"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2015.2400779"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2017.2742704"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2852503"},{"key":"ref16","first-page":"201","article-title":"Stacked cross attention for image-text matching","author":"lee","year":"2018","journal-title":"Proc Eur Conf Comput Vis (ECCV)"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/124"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1093\/biomet\/28.3-4.321"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1162\/0899766042321814"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240535"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-013-0658-4"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123326"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/1873951.1873987"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2016.2592800"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2015.2466106"},{"key":"ref8","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2676345"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/264746a0"},{"key":"ref9","first-page":"649","article-title":"Character-level convolutional networks for text classification","author":"zhang","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2017.2705068"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2013.2276704"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/2775109"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2015.2505311"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123317"},{"key":"ref41","first-page":"740","article-title":"Microsoft COCO: Common objects in context","author":"lin","year":"2014","journal-title":"Proc Eur Conf Comput Vis (ECCV)"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2016.2519449"},{"key":"ref26","article-title":"CM-GANs: Cross-modal generative adversarial networks for common representation learning","author":"peng","year":"2017","journal-title":"arXiv 1710 05106"},{"key":"ref25","first-page":"2672","article-title":"Generative adversarial nets","author":"goodfellow","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/83\/8835130\/08910611.pdf?arnumber=8910611","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T14:39:03Z","timestamp":1651070343000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8910611\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/tip.2019.2952085","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"value":"1057-7149","type":"print"},{"value":"1941-0042","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]}}}