{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,6]],"date-time":"2026-01-06T13:51:54Z","timestamp":1767707514527,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":40,"publisher":"ACM","license":[{"start":{"date-parts":[[2021,8,14]],"date-time":"2021-08-14T00:00:00Z","timestamp":1628899200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"MOST of China","award":["2008AAA0101502"],"award-info":[{"award-number":["2008AAA0101502"]}]},{"name":"NNSF of China","award":["61806198, U1811463"],"award-info":[{"award-number":["61806198, U1811463"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2021,8,14]]},"DOI":"10.1145\/3447548.3467285","type":"proceedings-article","created":{"date-parts":[[2021,8,12]],"date-time":"2021-08-12T06:13:10Z","timestamp":1628748790000},"page":"2360-2368","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":15,"title":["Knowledge is Power"],"prefix":"10.1145","author":[{"given":"Wenbo","family":"Zheng","sequence":"first","affiliation":[{"name":"Xi'an Jiaotong University &amp; Institute of Automation, Chinese Academy of Sciences, Xi'an, China"}]},{"given":"Lan","family":"Yan","sequence":"additional","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences &amp; University of Chinese Academy of Sciences, Beijing, China"}]},{"given":"Chao","family":"Gou","sequence":"additional","affiliation":[{"name":"Sun Yat-sen University, Guangzhou, China"}]},{"given":"Fei-Yue","family":"Wang","sequence":"additional","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2021,8,14]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"crossref","unstructured":"Peter Anderson Xiaodong He Chris Buehler Damien Teney Mark Johnson Stephen Gould and Lei Zhang. 2018. Bottom-Up and Top-Down Attention for Image Captioning and Visual Question Answering. In CVPR.  Peter Anderson Xiaodong He Chris Buehler Damien Teney Mark Johnson Stephen Gould and Lei Zhang. 2018. Bottom-Up and Top-Down Attention for Image Captioning and Visual Question Answering. In CVPR.","DOI":"10.1109\/CVPR.2018.00636"},{"key":"e_1_3_2_1_2_1","volume-title":"VQA: Visual Question Answering. In ICCV.","author":"Antol Stanislaw","year":"2015","unstructured":"Stanislaw Antol , Aishwarya Agrawal , Jiasen Lu , Margaret Mitchell , Dhruv Batra , C. Lawrence Zitnick , and Devi Parikh . 2015 . VQA: Visual Question Answering. In ICCV. Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Margaret Mitchell, Dhruv Batra, C. Lawrence Zitnick, and Devi Parikh. 2015. VQA: Visual Question Answering. In ICCV."},{"key":"e_1_3_2_1_3_1","volume-title":"MUTAN: Multimodal Tucker Fusion for Visual Question Answering. In ICCV.","author":"Hedi","year":"2017","unstructured":"Hedi Ben-younes, Remi Cadene , Matthieu Cord , and Nicolas Thome . 2017 . MUTAN: Multimodal Tucker Fusion for Visual Question Answering. In ICCV. Hedi Ben-younes, Remi Cadene, Matthieu Cord, and Nicolas Thome. 2017. MUTAN: Multimodal Tucker Fusion for Visual Question Answering. In ICCV."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.3115\/1219044.1219075"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"crossref","unstructured":"Tianshui Chen Weihao Yu Riquan Chen and Liang Lin. 2019. Knowledge-Embedded Routing Network for Scene Graph Generation. In CVPR.  Tianshui Chen Weihao Yu Riquan Chen and Liang Lin. 2019. Knowledge-Embedded Routing Network for Scene Graph Generation. In CVPR.","DOI":"10.1109\/CVPR.2019.00632"},{"key":"#cr-split#-e_1_3_2_1_6_1.1","doi-asserted-by":"crossref","unstructured":"J. Deng W. Dong R. Socher L. Li Kai Li and Li Fei-Fei. 2009. ImageNet: A large-scale hierarchical image database. In CVPR. 248--255. https:\/\/doi.org\/10.1109\/CVPR.2009.5206848 10.1109\/CVPR.2009.5206848","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"#cr-split#-e_1_3_2_1_6_1.2","doi-asserted-by":"crossref","unstructured":"J. Deng W. Dong R. Socher L. Li Kai Li and Li Fei-Fei. 2009. ImageNet: A large-scale hierarchical image database. In CVPR. 248--255. https:\/\/doi.org\/10.1109\/CVPR.2009.5206848","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11573"},{"key":"e_1_3_2_1_8_1","volume-title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL.","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin , Ming-Wei Chang , Kenton Lee , and Kristina Toutanova . 2019 . BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL. Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"crossref","unstructured":"Xuanyi Dong Linchao Zhu De Zhang Yi Yang and Fei Wu. 2018. Fast Parameter Adaptation for Few-Shot Image Captioning and Visual Question Answering. In ACM MM. 54--62.  Xuanyi Dong Linchao Zhu De Zhang Yi Yang and Fei Wu. 2018. Fast Parameter Adaptation for Few-Shot Image Captioning and Visual Question Answering. In ACM MM. 54--62.","DOI":"10.1145\/3240508.3240527"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2938758"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"crossref","unstructured":"Noa Garcia and George Vogiatzis. 2018. How to Read Paintings: Semantic Art Understanding with Multi-Modal Retrieval. In ECCV.  Noa Garcia and George Vogiatzis. 2018. How to Read Paintings: Semantic Art Understanding with Multi-Modal Retrieval. In ECCV.","DOI":"10.1007\/978-3-030-11012-3_52"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"crossref","unstructured":"Noa Garcia Chentao Ye Zihua Liu Qingtao Hu Mayu Otani Chenhui Chu Yuta Nakashima and Teruko Mitamura. 2020. A Dataset and Baselines for Visual Question Answering on Art. In ECCV Adrien Bartoli and Andrea Fusiello (Eds.).  Noa Garcia Chentao Ye Zihua Liu Qingtao Hu Mayu Otani Chenhui Chu Yuta Nakashima and Teruko Mitamura. 2020. A Dataset and Baselines for Visual Question Answering on Art. In ECCV Adrien Bartoli and Andrea Fusiello (Eds.).","DOI":"10.1007\/978-3-030-66096-3_8"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"crossref","unstructured":"Francc ois Gard\u00e8res Maryam Ziaeefard Baptiste Abeloos and Freddy Lecue. 2020. ConceptBert: Concept-Aware Representation for Visual Question Answering. In Findings of EMNLP.  Francc ois Gard\u00e8res Maryam Ziaeefard Baptiste Abeloos and Freddy Lecue. 2020. ConceptBert: Concept-Aware Representation for Visual Question Answering. In Findings of EMNLP.","DOI":"10.18653\/v1\/2020.findings-emnlp.44"},{"key":"e_1_3_2_1_14_1","unstructured":"Ian Goodfellow Yoshua Bengio and Aaron Courville. 2016. Deep Learning.  Ian Goodfellow Yoshua Bengio and Aaron Courville. 2016. Deep Learning."},{"key":"e_1_3_2_1_15_1","unstructured":"Kaiming He Xiangyu Zhang Shaoqing Ren and Jian Sun. 2016. Deep Residual Learning for Image Recognition. In CVPR.  Kaiming He Xiangyu Zhang Shaoqing Ren and Jian Sun. 2016. Deep Residual Learning for Image Recognition. In CVPR."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"crossref","unstructured":"Jie Hu Li Shen and Gang Sun. 2018. Squeeze-and-Excitation Networks. In CVPR.  Jie Hu Li Shen and Gang Sun. 2018. Squeeze-and-Excitation Networks. In CVPR.","DOI":"10.1109\/CVPR.2018.00745"},{"key":"e_1_3_2_1_17_1","unstructured":"Jin-Hwa Kim Jaehyun Jun and Byoung-Tak Zhang. 2018. Bilinear Attention Networks. In NIPS. 1571--1581.  Jin-Hwa Kim Jaehyun Jun and Byoung-Tak Zhang. 2018. Bilinear Attention Networks. In NIPS. 1571--1581."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1989.1.4.541"},{"key":"e_1_3_2_1_19_1","unstructured":"Guohao Li Xin Wang and Wenwu Zhu. 2020. Boosting Visual Question Answering with Context-Aware Knowledge Aggregation. In ACM MM. 1227--1235.  Guohao Li Xin Wang and Wenwu Zhu. 2020. Boosting Visual Question Answering with Context-Aware Knowledge Aggregation. In ACM MM. 1227--1235."},{"key":"e_1_3_2_1_20_1","unstructured":"Min Lin Qiang Chen and Shuicheng Yan. 2014. Network In Network. In ICLR.  Min Lin Qiang Chen and Shuicheng Yan. 2014. Network In Network. In ICLR."},{"key":"e_1_3_2_1_21_1","unstructured":"Jinlai Liu Zehuan Yuan and Changhu Wang. 2018. Towards Good Practices for Multi-modal Fusion in Large-scale Video Classification. In ECCV.  Jinlai Liu Zehuan Yuan and Changhu Wang. 2018. Towards Good Practices for Multi-modal Fusion in Large-scale Video Classification. In ECCV."},{"key":"e_1_3_2_1_22_1","unstructured":"Jiasen Lu Dhruv Batra Devi Parikh and Stefan Lee. [n.d.]. ViLBERT: Pretraining Task-Agnostic Visiolinguistic Representations for Vision-and-Language Tasks. In NeurIPS. 13--23.  Jiasen Lu Dhruv Batra Devi Parikh and Stefan Lee. [n.d.]. ViLBERT: Pretraining Task-Agnostic Visiolinguistic Representations for Vision-and-Language Tasks. In NeurIPS. 13--23."},{"key":"e_1_3_2_1_23_1","unstructured":"Jiasen Lu Jianwei Yang Dhruv Batra and Devi Parikh. 2016. Hierarchical Question-Image Co-Attention for Visual Question Answering. In NIPS. 289--297.  Jiasen Lu Jianwei Yang Dhruv Batra and Devi Parikh. 2016. Hierarchical Question-Image Co-Attention for Visual Question Answering. In NIPS. 289--297."},{"key":"e_1_3_2_1_24_1","unstructured":"A.L. Maas A.Y. Hannun and A.Y. Ng. 2013. Rectifier Nonlinearities Improve Neural Network Acoustic Models. In ICML. Atlanta Georgia.  A.L. Maas A.Y. Hannun and A.Y. Ng. 2013. Rectifier Nonlinearities Improve Neural Network Acoustic Models. In ICML. Atlanta Georgia."},{"key":"e_1_3_2_1_25_1","volume-title":"KRISP: Integrating Implicit and Symbolic Knowledge for Open-Domain Knowledge-Based VQA. arXiv:2012.11014","author":"Marino Kenneth","year":"2020","unstructured":"Kenneth Marino , Xinlei Chen , Devi Parikh , Abhinav Gupta , and Marcus Rohrbach . 2020 . KRISP: Integrating Implicit and Symbolic Knowledge for Open-Domain Knowledge-Based VQA. arXiv:2012.11014 (2020). Kenneth Marino, Xinlei Chen, Devi Parikh, Abhinav Gupta, and Marcus Rohrbach. 2020. KRISP: Integrating Implicit and Symbolic Knowledge for Open-Domain Knowledge-Based VQA. arXiv:2012.11014 (2020)."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"crossref","unstructured":"Kenneth Marino Mohammad Rastegari Ali Farhadi and Roozbeh Mottaghi. 2019. OK-VQA: A Visual Question Answering Benchmark Requiring External Knowledge. In CVPR.  Kenneth Marino Mohammad Rastegari Ali Farhadi and Roozbeh Mottaghi. 2019. OK-VQA: A Visual Question Answering Benchmark Requiring External Knowledge. In CVPR.","DOI":"10.1109\/CVPR.2019.00331"},{"key":"e_1_3_2_1_27_1","first-page":"2204","article-title":"Recurrent Models of Visual Attention","volume":"27","author":"Mnih Volodymyr","year":"2014","unstructured":"Volodymyr Mnih , Nicolas Heess , Alex Graves , and koray kavukcuoglu. 2014 . Recurrent Models of Visual Attention . In NIPS , Vol. 27. 2204 -- 2212 . Volodymyr Mnih, Nicolas Heess, Alex Graves, and koray kavukcuoglu. 2014. Recurrent Models of Visual Attention. In NIPS, Vol. 27. 2204--2212.","journal-title":"NIPS"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"crossref","unstructured":"Deepak Nathani Jatin Chauhan Charu Sharma and Manohar Kaul. 2019. Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs. In ACL.  Deepak Nathani Jatin Chauhan Charu Sharma and Manohar Kaul. 2019. Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs. In ACL.","DOI":"10.18653\/v1\/P19-1466"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46547-0_19"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"crossref","unstructured":"Ajeet Kumar Singh Anand Mishra Shashank Shekhar and Anirban Chakraborty. 2019. From Strings to Things: Knowledge-Enabled VQA Model That Can Read and Reason. In ICCV.  Ajeet Kumar Singh Anand Mishra Shashank Shekhar and Anirban Chakraborty. 2019. From Strings to Things: Knowledge-Enabled VQA Model That Can Read and Reason. In ICCV.","DOI":"10.1109\/ICCV.2019.00470"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"crossref","unstructured":"Damien Teney and Anton van den Hengel. 2018. Visual Question Answering as a Meta Learning Task. In ECCV.  Damien Teney and Anton van den Hengel. 2018. Visual Question Answering as a Meta Learning Task. In ECCV.","DOI":"10.1007\/978-3-030-01267-0_14"},{"key":"e_1_3_2_1_32_1","volume-title":"undefinedukasz Kaiser, and Illia Polosukhin","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani , Noam Shazeer , Niki Parmar , Jakob Uszkoreit , Llion Jones , Aidan N. Gomez , undefinedukasz Kaiser, and Illia Polosukhin . 2017 . Attention is All You Need. In NIPS. 6000--6010. Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, undefinedukasz Kaiser, and Illia Polosukhin. 2017. Attention is All You Need. In NIPS. 6000--6010."},{"key":"e_1_3_2_1_33_1","volume-title":"Memory networks. arXiv preprint arXiv:1410.3916","author":"Weston Jason","year":"2014","unstructured":"Jason Weston , Sumit Chopra , and Antoine Bordes . 2014. Memory networks. arXiv preprint arXiv:1410.3916 ( 2014 ). Jason Weston, Sumit Chopra, and Antoine Bordes. 2014. Memory networks. arXiv preprint arXiv:1410.3916 (2014)."},{"key":"e_1_3_2_1_34_1","unstructured":"Zhiwen Xie Guangyou Zhou Jin Liu and Jimmy Xiangji Huang. 2020. ReInceptionE: Relation-Aware Inception Network with Joint Local-Global Structural Information for Knowledge Graph Embedding. In ACL.  Zhiwen Xie Guangyou Zhou Jin Liu and Jimmy Xiangji Huang. 2020. ReInceptionE: Relation-Aware Inception Network with Joint Local-Global Structural Information for Knowledge Graph Embedding. In ACL."},{"key":"e_1_3_2_1_35_1","volume-title":"Cross-modal knowledge reasoning for knowledge-based visual question answering. Pattern Recognition","author":"Yu Jing","year":"2020","unstructured":"Jing Yu , Zihao Zhu , Yujing Wang , Weifeng Zhang , Yue Hu , and Jianlong Tan . 2020. Cross-modal knowledge reasoning for knowledge-based visual question answering. Pattern Recognition ( 2020 ). Jing Yu, Zihao Zhu, Yujing Wang, Weifeng Zhang, Yue Hu, and Jianlong Tan. 2020. Cross-modal knowledge reasoning for knowledge-based visual question answering. Pattern Recognition (2020)."},{"key":"e_1_3_2_1_36_1","volume-title":"KM 4: Visual reasoning via Knowledge Embedding Memory Model with Mutual Modulation. Information Fusion","author":"Zheng Wenbo","year":"2021","unstructured":"Wenbo Zheng , Lan Yan , Chao Gou , and Fei-Yue Wang . 2021. KM 4: Visual reasoning via Knowledge Embedding Memory Model with Mutual Modulation. Information Fusion ( 2021 ). Wenbo Zheng, Lan Yan, Chao Gou, and Fei-Yue Wang. 2021. KM 4: Visual reasoning via Knowledge Embedding Memory Model with Mutual Modulation. Information Fusion (2021)."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"crossref","unstructured":"Wenbo Zheng Lan Yan Fei-Yue Wang and Chao Gou. 2020. Learning from the Guidance: Knowledge Embedded Meta-learning for Medical Visual Question Answering. In ICONIP.  Wenbo Zheng Lan Yan Fei-Yue Wang and Chao Gou. 2020. Learning from the Guidance: Knowledge Embedded Meta-learning for Medical Visual Question Answering. In ICONIP.","DOI":"10.1007\/978-3-030-63820-7_22"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"crossref","unstructured":"Peng Zhou Wei Shi Jun Tian Zhenyu Qi Bingchen Li Hongwei Hao and Bo Xu. 2016. Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification. In ACL. 207--212.  Peng Zhou Wei Shi Jun Tian Zhenyu Qi Bingchen Li Hongwei Hao and Bo Xu. 2016. Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification. In ACL. 207--212.","DOI":"10.18653\/v1\/P16-2034"},{"key":"e_1_3_2_1_39_1","volume-title":"Mucko: Multi-Layer Cross-Modal Knowledge Reasoning for Fact-based Visual Question Answering. In IJCAI.","author":"Zhu Zihao","year":"2020","unstructured":"Zihao Zhu , Jing Yu , Yajing Sun , Yue Hu , Yujing Wang , and Qi Wu . 2020 . Mucko: Multi-Layer Cross-Modal Knowledge Reasoning for Fact-based Visual Question Answering. In IJCAI. Zihao Zhu, Jing Yu, Yajing Sun, Yue Hu, Yujing Wang, and Qi Wu. 2020. Mucko: Multi-Layer Cross-Modal Knowledge Reasoning for Fact-based Visual Question Answering. In IJCAI."}],"event":{"name":"KDD '21: The 27th ACM SIGKDD Conference on Knowledge Discovery and Data Mining","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"],"location":"Virtual Event Singapore","acronym":"KDD '21"},"container-title":["Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery &amp; Data Mining"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3447548.3467285","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3447548.3467285","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T20:18:28Z","timestamp":1750191508000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3447548.3467285"}},"subtitle":["Hierarchical-Knowledge Embedded Meta-Learning for Visual Reasoning in Artistic Domains"],"short-title":[],"issued":{"date-parts":[[2021,8,14]]},"references-count":40,"alternative-id":["10.1145\/3447548.3467285","10.1145\/3447548"],"URL":"https:\/\/doi.org\/10.1145\/3447548.3467285","relation":{},"subject":[],"published":{"date-parts":[[2021,8,14]]},"assertion":[{"value":"2021-08-14","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}