{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T18:03:21Z","timestamp":1775066601500,"version":"3.50.1"},"publisher-location":"Cham","reference-count":44,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030584511","type":"print"},{"value":"9783030584528","type":"electronic"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-58452-8_6","type":"book-chapter","created":{"date-parts":[[2020,11,3]],"date-time":"2020-11-03T00:34:03Z","timestamp":1604363643000},"page":"91-107","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":27,"title":["AiR: Attention with Reasoning Capability"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3749-4767","authenticated-orcid":false,"given":"Shi","family":"Chen","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6439-5476","authenticated-orcid":false,"given":"Ming","family":"Jiang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8322-1121","authenticated-orcid":false,"given":"Jinhui","family":"Yang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3054-8934","authenticated-orcid":false,"given":"Qi","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,11,3]]},"reference":[{"key":"6_CR1","doi-asserted-by":"crossref","unstructured":"Alers, H., Liu, H., Redi, J., Heynderickx, I.: Studying the effect of optimizing the image quality in saliency regions at the expense of background content. In: Image Quality and System Performance VII, vol. 7529, p. 752907. International Society for Optics and Photonics (2010)","DOI":"10.1117\/12.839545"},{"key":"6_CR2","doi-asserted-by":"crossref","unstructured":"Anderson, P., et al.: Bottom-up and top-down attention for image captioning and visual question answering. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00636"},{"key":"6_CR3","doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: visual question answering. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"6_CR4","doi-asserted-by":"crossref","unstructured":"Ben-Younes, H., Cad\u00e8ne, R., Thome, N., Cord, M.: MUTAN: multimodal tucker fusion for visual question answering. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.285"},{"key":"6_CR5","unstructured":"Borji, A., Itti, L.: CAT 2000: a large scale fixation dataset for boosting saliency research. arXiv preprint arXiv:1505.03581 (2015)"},{"key":"6_CR6","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1109\/TPAMI.2018.2815601","volume":"41","author":"Z Bylinskii","year":"2019","unstructured":"Bylinskii, Z., Judd, T., Oliva, A., Torralba, A., Durand, F.: What do different evaluation metrics tell us about saliency models? IEEE Trans. Pattern Anal. Mach. Intell. 41, 740\u2013757 (2019)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"6_CR7","doi-asserted-by":"crossref","unstructured":"Das, A., Agrawal, H., Zitnick, C.L., Parikh, D., Batra, D.: Human attention in visual question answering: do humans and deep networks look at the same regions? In: Conference on Empirical Methods in Natural Language Processing (EMNLP) (2016)","DOI":"10.18653\/v1\/D16-1092"},{"key":"6_CR8","doi-asserted-by":"crossref","unstructured":"Do, T., Do, T.T., Tran, H., Tjiputra, E., Tran, Q.D.: Compact trilinear interaction for visual question answering. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00048"},{"issue":"6\u20137","key":"6_CR9","doi-asserted-by":"publisher","first-page":"945","DOI":"10.1080\/13506280902834720","volume":"17","author":"KA Ehinger","year":"2009","unstructured":"Ehinger, K.A., Hidalgo-Sotelo, B., Torralba, A., Oliva, A.: Modelling search for people in 900 scenes: a combined source model of eye guidance. Vis. Cogn. 17(6\u20137), 945\u2013978 (2009)","journal-title":"Vis. Cogn."},{"key":"6_CR10","doi-asserted-by":"crossref","unstructured":"Fan, S., et al.: Emotional attention: a study of image sentiment and visual attention. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7521\u20137531 (2018)","DOI":"10.1109\/CVPR.2018.00785"},{"key":"6_CR11","doi-asserted-by":"crossref","unstructured":"Fukui, A., Park, D.H., Yang, D., Rohrbach, A., Darrell, T., Rohrbach, M.: Multimodal compact bilinear pooling for visual question answering and visual grounding. In: Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pp. 457\u2013468 (2016)","DOI":"10.18653\/v1\/D16-1044"},{"key":"6_CR12","doi-asserted-by":"crossref","unstructured":"Gao, P., et al.: Dynamic fusion with intra- and inter-modality attention flow for visual question answering. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00680"},{"key":"6_CR13","doi-asserted-by":"crossref","unstructured":"Goyal, Y., Khot, T., Summers-Stay, D., Batra, D., Parikh, D.: Making the V in VQA matter: elevating the role of image understanding in visual question answering. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.670"},{"key":"6_CR14","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"6_CR15","doi-asserted-by":"crossref","unstructured":"He, S., Tavakoli, H.R., Borji, A., Pugeault, N.: Human attention in image captioning: dataset and analysis. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00862"},{"key":"6_CR16","doi-asserted-by":"crossref","unstructured":"Hu, R., Andreas, J., Rohrbach, M., Darrell, T., Saenko, K.: Learning to reason: end-to-end module networks for visual question answering. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.93"},{"key":"6_CR17","unstructured":"Hudson, D.A., Manning, C.D.: Compositional attention networks for machine reasoning (2018)"},{"key":"6_CR18","doi-asserted-by":"crossref","unstructured":"Hudson, D.A., Manning, C.D.: GQA: a new dataset for real-world visual reasoning and compositional question answering. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00686"},{"key":"6_CR19","doi-asserted-by":"crossref","unstructured":"Huk Park, D., et al.: Multimodal explanations: justifying decisions and pointing to the evidence. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00915"},{"key":"6_CR20","doi-asserted-by":"crossref","unstructured":"Johnson, J., Hariharan, B., van der Maaten, L., Fei-Fei, L., Lawrence Zitnick, C., Girshick, R.: CLEVR: a diagnostic dataset for compositional language and elementary visual reasoning. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.215"},{"key":"6_CR21","doi-asserted-by":"crossref","unstructured":"Judd, T., Ehinger, K., Durand, F., Torralba, A.: Learning to predict where humans look. In: 2009 IEEE 12th International Conference on Computer Vision, pp. 2106\u20132113. IEEE (2009)","DOI":"10.1109\/ICCV.2009.5459462"},{"key":"6_CR22","doi-asserted-by":"publisher","first-page":"121","DOI":"10.1016\/j.jneumeth.2014.01.032","volume":"227","author":"SD K\u00f6nig","year":"2014","unstructured":"K\u00f6nig, S.D., Buffalo, E.A.: A nonparametric method for detecting fixations and saccades using cluster analysis: removing the need for arbitrary thresholds. J. Neurosci. Methods 227, 121\u2013131 (2014)","journal-title":"J. Neurosci. Methods"},{"key":"6_CR23","unstructured":"Kim, J.H., Jun, J., Zhang, B.T.: Bilinear attention networks. In: NeurIPS, pp. 1571\u20131581 (2018)"},{"issue":"3","key":"6_CR24","doi-asserted-by":"publisher","first-page":"14","DOI":"10.1167\/14.3.14","volume":"14","author":"K Koehler","year":"2014","unstructured":"Koehler, K., Guo, F., Zhang, S., Eckstein, M.P.: What do saliency models predict? J. Vis. 14(3), 14 (2014)","journal-title":"J. Vis."},{"issue":"1","key":"6_CR25","doi-asserted-by":"publisher","first-page":"32","DOI":"10.1007\/s11263-016-0981-7","volume":"123","author":"R Krishna","year":"2017","unstructured":"Krishna, R., et al.: Visual genome: connecting language and vision using crowdsourced dense image annotations. Int. J. Comput. Vision 123(1), 32\u201373 (2017)","journal-title":"Int. J. Comput. Vision"},{"key":"6_CR26","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"145","DOI":"10.1007\/978-3-030-11018-5_13","volume-title":"Computer Vision \u2013 ECCV 2018 Workshops","author":"W Li","year":"2018","unstructured":"Li, W., Yuan, Z., Fang, X., Wang, C.: Knowing where to look? Analysis on attention of visual question answering system. In: Leal-Taix\u00e9, L., Roth, S. (eds.) ECCV 2018. LNCS, vol. 11132, pp. 145\u2013152. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-11018-5_13"},{"key":"6_CR27","doi-asserted-by":"crossref","unstructured":"Mascharka, D., Tran, P., Soklaski, R., Majumdar, A.: Transparency by design: closing the gap between performance and interpretability in visual reasoning. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00519"},{"key":"6_CR28","doi-asserted-by":"crossref","unstructured":"Patro, B.N., Anupriy, Namboodiri, V.P.: Explanation vs attention: a two-player game to obtain attention for VQA. In: AAAI (2020)","DOI":"10.1609\/aaai.v34i07.6858"},{"key":"6_CR29","doi-asserted-by":"crossref","unstructured":"Qiao, T., Dong, J., Xu, D.: Exploring human-like attention supervision in visual question answering. In: AAAI (2018)","DOI":"10.1609\/aaai.v32i1.12272"},{"key":"6_CR30","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., et al.: Taking a hint: leveraging explanations to make vision and language models more grounded. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00268"},{"key":"6_CR31","doi-asserted-by":"crossref","unstructured":"Tapaswi, M., Zhu, Y., Stiefelhagen, R., Torralba, A., Urtasun, R., Fidler, S.: MovieQA: understanding stories in movies through question-answering. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.501"},{"key":"6_CR32","doi-asserted-by":"crossref","unstructured":"Tavakoli, H.R., Ahmed, F., Borji, A., Laaksonen, J.: Saliency revisited: analysis of mouse movements versus fixations. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.673"},{"key":"6_CR33","doi-asserted-by":"crossref","unstructured":"Tavakoli, H.R., Shetty, R., Borji, A., Laaksonen, J.: Paying attention to descriptions generated by image captioning models. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.272"},{"key":"6_CR34","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS, pp. 5998\u20136008 (2017)"},{"key":"6_CR35","unstructured":"Wu, J., Mooney, R.: Self-critical reasoning for robust visual question answering. In: NeurIPS (2019)"},{"issue":"1","key":"6_CR36","doi-asserted-by":"publisher","first-page":"28","DOI":"10.1167\/14.1.28","volume":"14","author":"J Xu","year":"2014","unstructured":"Xu, J., Jiang, M., Wang, S., Kankanhalli, M.S., Zhao, Q.: Predicting human gaze beyond pixels. J. Vis. 14(1), 28 (2014)","journal-title":"J. Vis."},{"key":"6_CR37","unstructured":"Xu, K., et al.: Show, attend and tell: neural image caption generation with visual attention. In: ICML, pp. 2048\u20132057 (2015)"},{"key":"6_CR38","doi-asserted-by":"crossref","unstructured":"Yang, C.J., Grauman, K., Gurari, D.: Visual question answer diversity. In: Sixth AAAI Conference on Human Computation and Crowdsourcing (2018)","DOI":"10.1609\/hcomp.v6i1.13341"},{"key":"6_CR39","doi-asserted-by":"crossref","unstructured":"Yang, Z., He, X., Gao, J., Deng, L., Smola, A.: Stacked attention networks for image question answering. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.10"},{"key":"6_CR40","unstructured":"Yi, K., Wu, J., Gan, C., Torralba, A., Kohli, P., Tenenbaum, J.: Neural-symbolic VQA: disentangling reasoning from vision and language understanding. In: NeurIPS, pp. 1031\u20131042 (2018)"},{"key":"6_CR41","doi-asserted-by":"crossref","unstructured":"Yu, Z., Yu, J., Cui, Y., Tao, D., Tian, Q.: Deep modular co-attention networks for visual question answering. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00644"},{"key":"6_CR42","doi-asserted-by":"crossref","unstructured":"Yu, Z., Yu, J., Fan, J., Tao, D.: Multi-modal factorized bilinear pooling with co-attention learning for visual question answering. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.202"},{"key":"6_CR43","doi-asserted-by":"crossref","unstructured":"Zellers, R., Bisk, Y., Farhadi, A., Choi, Y.: From recognition to cognition: visual commonsense reasoning. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00688"},{"key":"6_CR44","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Niebles, J.C., Soto, A.: Interpretable visual question answering by visual grounding from attention supervision mining. In: WACV, pp. 349\u2013357 (2019)","DOI":"10.1109\/WACV.2019.00043"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2020"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-58452-8_6","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:05:22Z","timestamp":1730592322000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-58452-8_6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030584511","9783030584528"],"references-count":44,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-58452-8_6","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"3 November 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Glasgow","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 August 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2020.eu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"OpenReview","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5025","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1360","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"27% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"The conference was held virtually due to the COVID-19 pandemic.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}