{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T18:56:49Z","timestamp":1743101809049,"version":"3.40.3"},"publisher-location":"Cham","reference-count":29,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030863647"},{"type":"electronic","value":"9783030863654"}],"license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021]]},"DOI":"10.1007\/978-3-030-86365-4_37","type":"book-chapter","created":{"date-parts":[[2021,9,10]],"date-time":"2021-09-10T11:02:39Z","timestamp":1631271759000},"page":"459-470","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Improving Visual Question Answering by\u00a0Semantic Segmentation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6135-4601","authenticated-orcid":false,"given":"Viet-Quoc","family":"Pham","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9127-3041","authenticated-orcid":false,"given":"Nao","family":"Mishima","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8576-5322","authenticated-orcid":false,"given":"Toshiaki","family":"Nakasu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,9,7]]},"reference":[{"doi-asserted-by":"crossref","unstructured":"Agrawal, A., Batra, D., Parikh, D., Kembhavi, A.: Dont just assume; look and answer: overcoming priors for visual question answering. In: CVPR (2018)","key":"37_CR1","DOI":"10.1109\/CVPR.2018.00522"},{"doi-asserted-by":"crossref","unstructured":"Anderson, P., et al.: Bottom-up and top-down attention for image captioning and visual question answering. In: CVPR (2018)","key":"37_CR2","DOI":"10.1109\/CVPR.2018.00636"},{"doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: visual question answering. In: ICCV (2015)","key":"37_CR3","DOI":"10.1109\/ICCV.2015.279"},{"unstructured":"Cadene, R., Dancette, C., Ben-younes, H., Cord, M., Parikh, D.: Rubi: reducing unimodal biases in visual question answering. In: NeurIPS (2019)","key":"37_CR4"},{"doi-asserted-by":"crossref","unstructured":"Caesar, H., Uijlings, J., Ferrari, V.: Coco-stuff: thing and stuff classes in context. In: CVPR (2018)","key":"37_CR5","DOI":"10.1109\/CVPR.2018.00132"},{"key":"37_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"doi-asserted-by":"crossref","unstructured":"Chen, L.C., Papandreou, G., Kokkinos, I., Murphy, K., Yuille, A.L.: DeepLab: semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected CRFs. TPAMI (2018)","key":"37_CR7","DOI":"10.1109\/TPAMI.2017.2699184"},{"doi-asserted-by":"crossref","unstructured":"Chen, L., Yan, X., Xiao, J., Zhang, H., Pu, S., Zhuang, Y.: Counterfactual samples synthesizing for robust visual question answering. In: CVPR (2020)","key":"37_CR8","DOI":"10.1109\/CVPR42600.2020.01081"},{"doi-asserted-by":"crossref","unstructured":"Cordts, M., et al.: The cityscapes dataset for semantic urban scene understanding. In: CVPR (2016)","key":"37_CR9","DOI":"10.1109\/CVPR.2016.350"},{"doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: CVPR (2009)","key":"37_CR10","DOI":"10.1109\/CVPR.2009.5206848"},{"unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: NAACL (2019)","key":"37_CR11"},{"doi-asserted-by":"crossref","unstructured":"Gan, C., Li, Y., Li, H., Sun, C., Gong, B.: VQS: linking segmentations to questions and answers for supervised attention in VQA and question-focused semantic segmentation. In: ICCV (2017)","key":"37_CR12","DOI":"10.1109\/ICCV.2017.201"},{"doi-asserted-by":"crossref","unstructured":"Gokhale, T., Banerjee, P., Baral, C., Yang, Y.: MUTANT: a training paradigm for out-of-distribution generalization in visual question answering. In: EMNLP (2020)","key":"37_CR13","DOI":"10.18653\/v1\/2020.emnlp-main.63"},{"doi-asserted-by":"crossref","unstructured":"Kirillov, A., He, K., Girshick, R., Rother, C., Dollar, P.: Panoptic segmentation. In: CVPR (2019)","key":"37_CR14","DOI":"10.1109\/CVPR.2019.00963"},{"unstructured":"Krishna, R., et al.: Visual genome: connecting language and vision using crowdsourced dense image annotations. arxiv preprint (2016)","key":"37_CR15"},{"doi-asserted-by":"crossref","unstructured":"Li, L., Gan, Z., Cheng, Y., Liu, J.: Relation-aware graph attention network for visual question answering. In: ICCV (2019)","key":"37_CR16","DOI":"10.1109\/ICCV.2019.01041"},{"doi-asserted-by":"crossref","unstructured":"Long, J., Shelhamer, E., Darrell, T.: Fully convolutional networks for semantic segmentation. In: CVPR (2015)","key":"37_CR17","DOI":"10.1109\/CVPR.2015.7298965"},{"unstructured":"Lu, J., Yang, J., Batra, D., Parikh, D.: Hierarchical question-image co-attention for visual question answering. In: NeurIPS (2016)","key":"37_CR18"},{"doi-asserted-by":"crossref","unstructured":"Neuhold, G., Ollmann, T., Bulo, S.R., Kontschieder, P.: The Mapillary vistas dataset for semantic understanding of street scenes. In: ICCV (2017)","key":"37_CR19","DOI":"10.1109\/ICCV.2017.534"},{"doi-asserted-by":"crossref","unstructured":"Pham, V.Q., Ito, S., Kozakaya, T.: BiSeg: simultaneous instance segmentation and semantic segmentation with fully convolutional networks. In: BMVC (2017)","key":"37_CR20","DOI":"10.5244\/C.31.60"},{"unstructured":"Ramakrishnan, S., Agrawal, A., Lee, S.: Overcoming language priors in visual question answering with adversarial regularization. In: NeurIPS (2018)","key":"37_CR21"},{"unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: NeurIPS (2015)","key":"37_CR22"},{"doi-asserted-by":"crossref","unstructured":"Tan, H., Bansal, M.: LXMERT: learning cross-modality encoder representations from transformers. In: EMNLP (2019)","key":"37_CR23","DOI":"10.18653\/v1\/D19-1514"},{"key":"37_CR24","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"437","DOI":"10.1007\/978-3-030-58529-7_26","volume-title":"Computer Vision \u2013 ECCV 2020","author":"R Tang","year":"2020","unstructured":"Tang, R., Ma, C., Zhang, W.E., Wu, Q., Yang, X.: Semantic equivalent adversarial data augmentation for visual question answering. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12364, pp. 437\u2013453. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58529-7_26"},{"unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS (2017)","key":"37_CR25"},{"doi-asserted-by":"crossref","unstructured":"Wang, H., Zhu, Y., Adam, H., Yuille, A., Chen, L.C.: Max-DeepLab: end-to-end panoptic segmentation with mask transformers. arxiv preprint (2020)","key":"37_CR26","DOI":"10.1109\/CVPR46437.2021.00542"},{"doi-asserted-by":"crossref","unstructured":"Zhao, H., Shi, J., Qi, X., Wang, X., Jia, J.: Pyramid scene parsing network. In: CVPR (2017)","key":"37_CR27","DOI":"10.1109\/CVPR.2017.660"},{"doi-asserted-by":"crossref","unstructured":"Zheng, S., et al.: Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. arxiv preprint (2020)","key":"37_CR28","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"37_CR29","doi-asserted-by":"publisher","first-page":"302","DOI":"10.1007\/s11263-018-1140-0","volume":"127","author":"B Zhou","year":"2018","unstructured":"Zhou, B., et al.: Semantic understanding of scenes through the ade20k dataset. Int. J. Comput. Vis. 127, 302\u2013321 (2018)","journal-title":"Int. J. Comput. Vis."}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2021"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-86365-4_37","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,8]],"date-time":"2023-01-08T23:38:31Z","timestamp":1673221111000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-86365-4_37"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"ISBN":["9783030863647","9783030863654"],"references-count":29,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-86365-4_37","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2021]]},"assertion":[{"value":"7 September 2021","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Artificial Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Bratislava","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Slovakia","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2021","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 September 2021","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 September 2021","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icann2021","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/e-nns.org\/icann2021\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"OCS","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"496","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"265","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"53% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Conference was held online due to the COVID-19 pandemic.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}