{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T05:33:09Z","timestamp":1730266389644,"version":"3.28.0"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T00:00:00Z","timestamp":1719705600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T00:00:00Z","timestamp":1719705600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,30]]},"DOI":"10.1109\/ijcnn60899.2024.10650444","type":"proceedings-article","created":{"date-parts":[[2024,9,9]],"date-time":"2024-09-09T17:35:05Z","timestamp":1725903305000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["A Multimodal Contrastive Network with Unbiased Distillation for Knowledge-based VQA"],"prefix":"10.1109","author":[{"given":"Zihan","family":"Hu","sequence":"first","affiliation":[{"name":"Guangdong University of Technology,School of Computer Science,China"}]},{"given":"Ruoyao","family":"Ding","sequence":"additional","affiliation":[{"name":"Guangdong University of Foreign Studies,School of Cyber Security,China"}]},{"given":"Haoran","family":"Xie","sequence":"additional","affiliation":[{"name":"Lingnan University,Department of Computing and Decision Sciences,Hong Kong,China"}]},{"given":"Zhenguo","family":"Yang","sequence":"additional","affiliation":[{"name":"Guangdong University of Technology,School of Computer Science,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.279"},{"key":"ref2","first-page":"1","article-title":"Reasoning over vision and language: Exploring the benefits of supplemental knowledge","author":"Shevchenko","year":"2021","journal-title":"Beyond Vision and Language: intergrating Real-world knowledge"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547870"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.118669"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00503"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00501"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20174"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.171"},{"key":"ref9","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","volume-title":"Annual Conference on Neural Information Processing Systems","author":"Li"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.265"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3404835.3462987"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9533769"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN48605.2020.9206989"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9534452"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW53098.2021.00175"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00146"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.552"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-emnlp.495"},{"key":"ref19","first-page":"21","article-title":"VL-BERT: pre-training of generic visual-linguistic representations","volume-title":"International Conference on Learning Representations","author":"Su"},{"key":"ref20","first-page":"4271","article-title":"Funnel-transformer: Filtering out sequential redundancy for efficient language processing","volume":"33","author":"Dai","year":"2020","journal-title":"Advances in neural information processing systems"},{"article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","year":"2023","author":"Li","key":"ref21"},{"key":"ref22","first-page":"1571","article-title":"Bilinear attention networks","volume-title":"Annual Conference on Neural Information Processing Systems","author":"Kim"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00644"},{"key":"ref24","first-page":"12888","article-title":"BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"International Conference on Machine Learning","author":"Li"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00121"},{"key":"ref26","first-page":"5485","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"The Journal of Machine Learning Research"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00197"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2023.3267036"},{"key":"ref29","first-page":"101944","article-title":"Learn to explain: Multimodal reasoning via thought chains for science question answering","volume-title":"Conference on Neural Information Processing Systems","author":"Lu"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1410"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00636"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.734"},{"key":"ref33","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"article-title":"Chameleon: Plug-and-play compositional reasoning with large language models","year":"2023","author":"Lu","key":"ref34"},{"article-title":"Iconqa: A new benchmark for abstract diagram understanding and visual language reasoning","volume-title":"Conference on Neural Information Processing Systems Datasets","author":"Lu","key":"ref35"},{"article-title":"Visualbert: A simple and performant baseline for vision and language","year":"2019","author":"Li","key":"ref36"},{"key":"ref37","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International Conference on Machine Learning","author":"Radford"},{"key":"ref38","first-page":"744","article-title":"UNIFIEDIO: A unified model for vision, language, and multi-modal tasks","volume-title":"International Conference on Learning Representations","author":"Lu"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.772"}],"event":{"name":"2024 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2024,6,30]]},"location":"Yokohama, Japan","end":{"date-parts":[[2024,7,5]]}},"container-title":["2024 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10649807\/10649898\/10650444.pdf?arnumber=10650444","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T05:19:58Z","timestamp":1725945598000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10650444\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,30]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/ijcnn60899.2024.10650444","relation":{},"subject":[],"published":{"date-parts":[[2024,6,30]]}}}