{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,20]],"date-time":"2026-04-20T10:25:32Z","timestamp":1776680732564,"version":"3.51.2"},"reference-count":58,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2016,11,8]],"date-time":"2016-11-08T00:00:00Z","timestamp":1478563200000},"content-version":"unspecified","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2017,5]]},"DOI":"10.1007\/s11263-016-0966-6","type":"journal-article","created":{"date-parts":[[2016,11,8]],"date-time":"2016-11-08T13:09:54Z","timestamp":1478610594000},"page":"4-31","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":248,"title":["VQA: Visual Question Answering"],"prefix":"10.1007","volume":"123","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8620-8077","authenticated-orcid":false,"given":"Aishwarya","family":"Agrawal","sequence":"first","affiliation":[]},{"given":"Jiasen","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Stanislaw","family":"Antol","sequence":"additional","affiliation":[]},{"given":"Margaret","family":"Mitchell","sequence":"additional","affiliation":[]},{"given":"C. Lawrence","family":"Zitnick","sequence":"additional","affiliation":[]},{"given":"Devi","family":"Parikh","sequence":"additional","affiliation":[]},{"given":"Dhruv","family":"Batra","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2016,11,8]]},"reference":[{"key":"966_CR1","doi-asserted-by":"publisher","unstructured":"Agrawal, H., Mathialagan, C.S., Goyal, Y., Chavali, N., Banik, P., Mohapatra, A., et al. (2015). Cloudcv: Large-scale distributed computer vision as a cloud service. In G. Hua & X.-S. Hua (Eds.), Mobile cloud visual media computing (pp. 265\u2013290). Switzerland: Springer International Publishing.","DOI":"10.1007\/978-3-319-24702-1_11"},{"key":"966_CR2","doi-asserted-by":"publisher","unstructured":"Antol, S., Zitnick, C.L., Parikh, D. (2014). Zero-Shot learning via visual abstraction. In ECCV","DOI":"10.1007\/978-3-319-10593-2_27"},{"key":"966_CR3","doi-asserted-by":"crossref","unstructured":"Bigham, J.P., Jayant, C., Ji, H., Little, G., Miller, A., Miller, R.C., Miller, R., Tatarowicz, A., White, B., White, S., Yeh, T. (2010). VizWiz: Nearly real-time answers to visual questions. In User interface software and technology","DOI":"10.1145\/1866029.1866080"},{"key":"966_CR4","doi-asserted-by":"publisher","unstructured":"Bollacker, K., Evans, C., Paritosh, P., Sturge, T., & Taylor, J. (2008). Freebase: A collaboratively created graph database for structuring human knowledge. In International conference on management of data. doi: 10.1145\/1376616.1376746 .","DOI":"10.1145\/1376616.1376746"},{"key":"966_CR5","doi-asserted-by":"crossref","unstructured":"Carlson, A., Betteridge, J., Kisiel, B., Settles, B., Jr., E.R.H., Mitchell, T.M. (2010). Toward an architecture for never-ending language learning. In AAAI","DOI":"10.1609\/aaai.v24i1.7519"},{"key":"966_CR6","unstructured":"Chen, X., Fang, H., Lin, T., Vedantam, R., Gupta, S., Doll\u00e1r, P., Zitnick, C.L. (2015). Microsoft COCO captions: Data collection and evaluation server. CoRR arXiv:1504.00325"},{"key":"966_CR7","unstructured":"Chen, X., Fang, H., Lin, T.Y., Vedantam, R., Gupta, S., Doll\u00e1r, P., Zitnick, C.L. (2015). Microsoft COCO Captions: Data collection and evaluation server. arXiv:1504.00325"},{"key":"966_CR8","doi-asserted-by":"crossref","unstructured":"Chen, X., Shrivastava, A., Gupta, A. (2013). NEIL: Extracting visual knowledge from web data. In ICCV","DOI":"10.1109\/ICCV.2013.178"},{"key":"966_CR9","doi-asserted-by":"publisher","unstructured":"Chen, X., Zitnick, C.L. (2015). Mind\u2019s eye: A recurrent visual representation for image caption generation. In CVPR","DOI":"10.1109\/CVPR.2015.7298856"},{"key":"966_CR10","doi-asserted-by":"publisher","unstructured":"Coppersmith, G., Kelly, E. (2014). Dynamic wordclouds and vennclouds for exploratory data analysis. In ACL workshop on interactive language learning and visualization","DOI":"10.3115\/v1\/W14-3103"},{"key":"966_CR11","doi-asserted-by":"publisher","unstructured":"Deng, J., Berg, A.C., Fei-Fei, L. (2011). Hierarchical semantic indexing for large scale image retrieval. In CVPR","DOI":"10.1109\/CVPR.2011.5995516"},{"key":"966_CR12","doi-asserted-by":"publisher","unstructured":"Donahue, J., Hendricks, L.A., Guadarrama, S., Rohrbach, M., Venugopalan, S., Saenko, K., Darrell, T. (2015). Long-term recurrent convolutional networks for visual recognition and description. In CVPR","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"966_CR13","doi-asserted-by":"publisher","unstructured":"Elliott, D., Keller, F. (2014). Comparing automatic evaluation measures for image description. In ACL","DOI":"10.3115\/v1\/P14-2074"},{"key":"966_CR14","unstructured":"Fader, A., Zettlemoyer, L., Etzioni, O. (2013). Paraphrase-driven learning for open question answering. In ACL. http:\/\/www.aclweb.org\/anthology\/P13-1158"},{"key":"966_CR15","doi-asserted-by":"publisher","unstructured":"Fader, A., Zettlemoyer, L., Etzioni, O. (2014). Open Question answering over curated and extracted knowledge bases. In International conference on knowledge discovery and data mining","DOI":"10.1145\/2623330.2623677"},{"key":"966_CR16","doi-asserted-by":"publisher","unstructured":"Fang, H., Gupta, S., Iandola, F.N., Srivastava, R., Deng, L., Doll\u00e1r, P., Gao, J., He, X., Mitchell, M., Platt, J.C., Zitnick, C.L., Zweig, G. (2015). From captions to visual concepts and back. In CVPR","DOI":"10.1109\/CVPR.2015.7298754"},{"key":"966_CR17","doi-asserted-by":"crossref","unstructured":"Farhadi, A., Hejrati, M., Sadeghi, A., Young, P., Rashtchian, C., Hockenmaier, J., Forsyth, D. (2010). Every picture tells a story: Generating sentences for images. In ECCV","DOI":"10.1007\/978-3-642-15561-1_2"},{"key":"966_CR18","unstructured":"Gao, H., Mao, J., Zhou, J., Huang, Z., Yuille, A. (2015). Are you talking to a machine? dataset and methods for multilingual image question answering. In NIPS"},{"key":"966_CR19","doi-asserted-by":"crossref","unstructured":"Geman, D., Geman, S., Hallonquist, N., Younes, L. (2014). A visual turing test for computer vision systems. In PNAS","DOI":"10.1073\/pnas.1422953112"},{"key":"966_CR20","unstructured":"Gordon, J., Durme, B.V. (2013). Reporting bias and knowledge extraction. In Proceedings of the 3rd Workshop on Knowledge Extraction, at CIKM 2013"},{"key":"966_CR21","doi-asserted-by":"publisher","unstructured":"Guadarrama, S., Krishnamoorthy, N., Malkarnenkar, G., Venugopalan, S., Mooney, R., Darrell, T., Saenko, K. (2013). YouTube2Text: Recognizing and describing arbitrary activities using semantic hierarchies and zero-shot recognition. In ICCV. http:\/\/www.eecs.berkeley.edu\/~sguada\/pdfs\/2013-ICCV-youtube2text-final.pdf","DOI":"10.1109\/ICCV.2013.337"},{"key":"966_CR22","doi-asserted-by":"crossref","unstructured":"Hodosh, M., Young, P., & Hockenmaier, J. (2013). Framing image description as a ranking task: Data models and evaluation metrics. ournal of Artificial Intelligence Research, 47, 853\u2013899.","DOI":"10.1613\/jair.3994"},{"key":"966_CR23","doi-asserted-by":"publisher","unstructured":"Jia, Y., Shelhamer, E., Donahue, J., Karayev, S., Long, J., Girshick, R., Guadarrama, S., Darrell, T. (2014). Caffe: Convolutional architecture for fast feature embedding. arXiv:1408.5093","DOI":"10.1145\/2647868.2654889"},{"key":"966_CR24","doi-asserted-by":"publisher","unstructured":"Karpathy, A., Fei-Fei, L. (2015). Deep visual-semantic alignments for generating image descriptions. In CVPR . http:\/\/arxiv.org\/abs\/1412.2306","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"966_CR25","doi-asserted-by":"crossref","unstructured":"Kazemzadeh, S., Ordonez, V., Matten, M., Berg, T.L. (2014). ReferItGame: Referring to objects in photographs of natural scenes. In EMNLP","DOI":"10.3115\/v1\/D14-1086"},{"key":"966_CR26","unstructured":"Kiros, R., Salakhutdinov, R., Zemel, R.S. (2015). Unifying visual-semantic embeddings with multimodal neural language models. In TACL"},{"key":"966_CR27","unstructured":"Kiros, R., Zhu, Y., Salakhutdinov, R., Zemel, R.S., Torralba, A., Urtasun, R., Fidler, S. (2015). Skip-thought vectors. arXiv:1506.06726"},{"key":"966_CR28","volume-title":"What Are You Talking About?","author":"C Kong","year":"2014","unstructured":"Kong, C., Lin, D., Bansal, M., Urtasun, R., & Fidler, S. (2014). What Are You Talking About?. In CVPR: Text-to-image coreference."},{"key":"966_CR29","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E. (2012). ImageNet classification with deep convolutional neural networks. In NIPS"},{"key":"966_CR30","doi-asserted-by":"crossref","unstructured":"Kulkarni, G., Premraj, V., Sagnik Dhar\u00a0and, S.L., Choi, Y., Berg, A.C., Berg, T.L. (2011). Baby talk: Understanding and generating simple image descriptions. In CVPR","DOI":"10.1109\/CVPR.2011.5995466"},{"key":"966_CR31","volume-title":"Building large knowledge-based systems; representation and inference in the cyc project","author":"DB Lenat","year":"1989","unstructured":"Lenat, D. B., & Guha, R. V. (1989). Building large knowledge-based systems; representation and inference in the cyc project. Chicago: Addison-Wesley Longman Publishing Co., Inc."},{"key":"966_CR32","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Doll\u00e1r, P., Zitnick, C.L. (2014). Microsoft COCO: Common objects in context. In ECCV","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"966_CR33","doi-asserted-by":"crossref","unstructured":"Lin, X., Parikh, D. (2015). Don\u2019t just listen, use your imagination: Leveraging visual common sense for non-visual tasks. In CVPR","DOI":"10.1109\/CVPR.2015.7298917"},{"issue":"4","key":"966_CR34","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1023\/B:BTTJ.0000047600.45421.6d","volume":"22","author":"H Liu","year":"2014","unstructured":"Liu, H., & Singh, P. (2014). ConceptNet-A Practical Commonsense Reasoning Tool-Kit. BT Technology Journal, 22(4), 211\u2013226. doi: 10.1023\/B:BTTJ.0000047600.45421.6d .","journal-title":"BT Technology Journal"},{"key":"966_CR35","unstructured":"Malinowski, M., Fritz, M. (2014). A multi-world approach to question answering about real-world scenes based on uncertain input. In NIPS"},{"key":"966_CR36","doi-asserted-by":"publisher","unstructured":"Malinowski, M., Rohrbach, M., Fritz, M. (2015). Ask your neurons: A neural-based approach to answering questions about images. In ICCV","DOI":"10.1109\/ICCV.2015.9"},{"key":"966_CR37","unstructured":"Mao, J., Xu, W., Yang, Y., Wang, J., Yuille, A.L. (2014). Explain images with multimodal recurrent neural networks. CoRR arXiv:1410.1090"},{"key":"966_CR38","unstructured":"Mikolov, T., Sutskever, I., Chen, K., Corrado, G.S., Dean, J. (2013). Distributed representations of words and phrases and their compositionality. In NIPS"},{"key":"966_CR39","unstructured":"Mitchell, M., van Deemter, K., Reiter, E. (2013). Attributes in visual reference. In PRE-CogSci"},{"key":"966_CR40","unstructured":"Mitchell, M., Dodge, J., Goyal, A., Yamaguchi, K., Stratos, K., Han, X., Mensch, A., Berg, A., Berg, T.L., Daume\u00a0III, H. (2012). Midge: Generating image descriptions from computer vision detections. In ACL"},{"key":"966_CR41","unstructured":"Mitchell, M., Van\u00a0Deemter, K., Reiter, E. (2013). Generating expressions that refer to visible objects. In HLT-NAACL"},{"key":"966_CR42","doi-asserted-by":"publisher","unstructured":"Ramanathan, V., Joulin, A., Liang, P., Fei-Fei, L. (2014). Linking People with \u201cTheir\u201d names using coreference resolution. In ECCV","DOI":"10.1007\/978-3-319-10590-1_7"},{"key":"966_CR43","unstructured":"Ren, M., Kiros, R., Zemel, R. (2015). Exploring models and data for image question answering. In NIPS"},{"key":"966_CR44","doi-asserted-by":"crossref","unstructured":"Richardson, M., Burges, C.J., Renshaw, E. (2013). MCTest: A challenge dataset for the open-domain machine comprehension of text. In EMNLP","DOI":"10.18653\/v1\/D13-1020"},{"key":"966_CR45","doi-asserted-by":"publisher","unstructured":"Rohrbach, M., Qiu, W., Titov, I., Thater, S., Pinkal, M., Schiele, B. (2013). Translating video content to natural language descriptions. In ICCV","DOI":"10.1109\/ICCV.2013.61"},{"key":"966_CR46","doi-asserted-by":"crossref","unstructured":"Sadeghi, F., Kumar\u00a0Divvala, S.K., Farhadi, A. (2015). Viske: Visual knowledge extraction and question answering by visual verification of relation phrases. In CVPR","DOI":"10.1109\/CVPR.2015.7298752"},{"key":"966_CR47","unstructured":"Simonyan, K., Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. CoRR arxiv:1409.1556"},{"key":"966_CR48","doi-asserted-by":"publisher","unstructured":"Toutanova, K., Klein, D., Manning, C.D., Singer, Y. (2003). Feature-rich part-of-speech tagging with a cyclic dependency network. In ACL","DOI":"10.3115\/1073445.1073478"},{"issue":"2","key":"966_CR49","doi-asserted-by":"publisher","first-page":"42","DOI":"10.1109\/MMUL.2014.29","volume":"21","author":"K Tu","year":"2014","unstructured":"Tu, K., Meng, M., Lee, M. W., Choe, T. E., & Zhu, S. C. (2014). Joint video and text parsing for understanding events and answering queries. IEEE MultiMedia, 21(2), 42\u201370. doi: 10.1109\/MMUL.2014.29 .","journal-title":"IEEE MultiMedia"},{"key":"966_CR50","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Zitnick, C.L., Parikh, D.(2015). CIDEr: Consensus-based image description evaluation. In CVPR","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"966_CR51","doi-asserted-by":"crossref","unstructured":"Vendantam, R., Lin, X., Batra, T., Zitnick, C.L., Parikh, D. (2015). Learning common sense through visual abstraction. In ICCV","DOI":"10.1109\/ICCV.2015.292"},{"key":"966_CR52","doi-asserted-by":"publisher","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D. (2015). Show and tell: A neural image caption generator. In CVPR. arXiv:1411.4555","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"966_CR53","unstructured":"Weston, J., Bordes, A., Chopra, S., Mikolov, T. (2015). Towards AI-complete question answering: A set of prerequisite toy tasks. CoRR arXiv:1502.05698"},{"key":"966_CR54","doi-asserted-by":"crossref","unstructured":"Yu, L., Park, E., Berg, A.C., Berg, T.L. (2015). Visual madlibs: Fill-in-the-blank description generation and question answering. In ICCV","DOI":"10.1109\/ICCV.2015.283"},{"key":"966_CR55","unstructured":"Zhang, P., Goyal, Y., Summers-Stay, D., Batra, D., Parikh, D. (2015). Yin and yang: Balancing and answering binary visual questions. CoRR arXiv:1511.05099"},{"key":"966_CR56","doi-asserted-by":"publisher","unstructured":"Zitnick, C.L., Parikh, D. (2013). Bringing semantics into focus using visual abstraction. In CVPR","DOI":"10.1109\/CVPR.2013.387"},{"key":"966_CR57","doi-asserted-by":"publisher","unstructured":"Zitnick, C.L., Parikh, D., Vanderwende, L. (2013). Learning the visual interpretation of sentences. In ICCV","DOI":"10.1109\/ICCV.2013.211"},{"key":"966_CR58","doi-asserted-by":"publisher","first-page":"627","DOI":"10.1109\/TPAMI.2014.2366143","volume":"38","author":"CL Zitnick","year":"2015","unstructured":"Zitnick, C. L., Vedantam, R., & Parikh, D. (2015). Adopting abstract images for semantic scene understanding. IEEE transactions on pattern analysis and machine intelligence, 38, 627\u2013638.","journal-title":"IEEE transactions on pattern analysis and machine intelligence"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-016-0966-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/article\/10.1007\/s11263-016-0966-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-016-0966-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,12]],"date-time":"2025-06-12T04:50:26Z","timestamp":1749703826000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/s11263-016-0966-6"}},"subtitle":["www.visualqa.org"],"short-title":[],"issued":{"date-parts":[[2016,11,8]]},"references-count":58,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2017,5]]}},"alternative-id":["966"],"URL":"https:\/\/doi.org\/10.1007\/s11263-016-0966-6","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2016,11,8]]}}}