{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,22]],"date-time":"2026-02-22T21:28:34Z","timestamp":1771795714986,"version":"3.50.1"},"publisher-location":"Cham","reference-count":28,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031164514","type":"print"},{"value":"9783031164521","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-16452-1_37","type":"book-chapter","created":{"date-parts":[[2022,9,15]],"date-time":"2022-09-15T21:25:46Z","timestamp":1663277146000},"page":"386-395","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":14,"title":["Consistency-Preserving Visual Question Answering in Medical Imaging"],"prefix":"10.1007","author":[{"given":"Sergio","family":"Tascon-Morales","sequence":"first","affiliation":[]},{"given":"Pablo","family":"M\u00e1rquez-Neila","sequence":"additional","affiliation":[]},{"given":"Raphael","family":"Sznitman","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,9,16]]},"reference":[{"key":"37_CR1","doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: visual question answering. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2425\u20132433 (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"37_CR2","first-page":"841","volume":"32","author":"R Cadene","year":"2019","unstructured":"Cadene, R., Dancette, C., Cord, M., Parikh, D., et al.: RUBi: reducing unimodal biases for visual question answering. Adv. Neural. Inf. Process. Syst. 32, 841\u2013852 (2019)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"issue":"2","key":"37_CR3","doi-asserted-by":"publisher","first-page":"196","DOI":"10.1016\/j.irbm.2013.01.010","volume":"34","author":"E Decenciere","year":"2013","unstructured":"Decenciere, E., et al.: TeleoOhta: machine learning and image processing methods for teleophthalmology. IRBM 34(2), 196\u2013203 (2013)","journal-title":"IRBM"},{"key":"37_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"357","DOI":"10.1007\/978-3-030-68790-8_28","volume-title":"Pattern Recognition. ICPR International Workshops and Challenges","author":"V Goel","year":"2021","unstructured":"Goel, V., Chandak, M., Anand, A., Guha, P.: IQ-VQA: intelligent visual question answering. In: Del Bimbo, A., et al. (eds.) ICPR 2021. LNCS, vol. 12662, pp. 357\u2013370. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-68790-8_28"},{"key":"37_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"379","DOI":"10.1007\/978-3-030-58589-1_23","volume-title":"Computer Vision \u2013 ECCV 2020","author":"T Gokhale","year":"2020","unstructured":"Gokhale, T., Banerjee, P., Baral, C., Yang, Y.: VQA-LOL: visual question answering under the lens of logic. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12366, pp. 379\u2013396. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58589-1_23"},{"key":"37_CR6","doi-asserted-by":"crossref","unstructured":"Gong, H., Chen, G., Liu, S., Yu, Y., Li, G.: Cross-modal self-attention with multi-task pre-training for medical visual question answering. In: Proceedings of the 2021 International Conference on Multimedia Retrieval, pp. 456\u2013460 (2021)","DOI":"10.1145\/3460426.3463584"},{"key":"37_CR7","doi-asserted-by":"crossref","unstructured":"Goyal, Y., Khot, T., Summers-Stay, D., Batra, D., Parikh, D.: Making the V in VQA matter: Elevating the role of image understanding in visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6904\u20136913 (2017)","DOI":"10.1109\/CVPR.2017.670"},{"key":"37_CR8","unstructured":"Hasan, S.A., Ling, Y., Farri, O., Liu, J., Lungren, M., M\u00fcller, H.: Overview of the ImageCLEF 2018 medical domain visual question answering task. In: CLEF2018 Working Notes. CEUR Workshop Proceedings, CEUR-WS.org http:\/\/ceur-ws.org, Avignon, France, 10\u201314 September 2018"},{"key":"37_CR9","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"8","key":"37_CR10","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Comput. 9(8), 1735\u20131780 (1997)","journal-title":"Neural Comput."},{"key":"37_CR11","unstructured":"Hudson, D.A., Manning, C.D.: GQA: a new dataset for compositional question answering over real-world images. arXiv preprint arXiv:1902.09506, vol. 3(8) (2019)"},{"key":"37_CR12","unstructured":"Liao, Z., Wu, Q., Shen, C., Van Den Hengel, A., Verjans, J.: AIML at VQA-Med 2020: knowledge inference via a skeleton-based sentence mapping approach for medical domain visual question answering (2020)"},{"key":"37_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"238","DOI":"10.1007\/978-3-030-28577-7_20","volume-title":"Experimental IR Meets Multilinguality, Multimodality, and Interaction","author":"F Liu","year":"2019","unstructured":"Liu, F., Peng, Y., Rosen, M.P.: An effective deep transfer learning and information fusion framework for medical visual question answering. In: Crestani, F., et al. (eds.) CLEF 2019. LNCS, vol. 11696, pp. 238\u2013247. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-28577-7_20"},{"key":"37_CR14","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"522","DOI":"10.1007\/978-3-030-32251-9_57","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2019","author":"BD Nguyen","year":"2019","unstructured":"Nguyen, B.D., Do, T.-T., Nguyen, B.X., Do, T., Tjiputra, E., Tran, Q.D.: Overcoming data limitation in medical visual question answering. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11767, pp. 522\u2013530. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32251-9_57"},{"key":"37_CR15","unstructured":"Porwal, P., et al.: Indian diabetic retinopathy image dataset (IDRiD) (2018). https:\/\/dx.doi.org\/10.21227\/H25W98"},{"key":"37_CR16","doi-asserted-by":"crossref","unstructured":"Ray, A., Sikka, K., Divakaran, A., Lee, S., Burachas, G.: Sunny and dark outside?! Improving answer consistency in VQA through entailed question generation. arXiv preprint arXiv:1909.04696 (2019)","DOI":"10.18653\/v1\/D19-1596"},{"issue":"S1","key":"37_CR17","doi-asserted-by":"publisher","first-page":"389","DOI":"10.3233\/THC-174704","volume":"26","author":"F Ren","year":"2018","unstructured":"Ren, F., Cao, P., Zhao, D., Wan, C.: Diabetic macular edema grading in retinal images using vector quantization and semi-supervised learning. Technol. Health Care 26(S1), 389\u2013397 (2018)","journal-title":"Technol. Health Care"},{"key":"37_CR18","doi-asserted-by":"crossref","unstructured":"Ribeiro, M.T., Guestrin, C., Singh, S.: Are red roses red? Evaluating consistency of question-answering models. In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6174\u20136184 (2019)","DOI":"10.18653\/v1\/P19-1621"},{"key":"37_CR19","unstructured":"Sarrouti, M.: NLM at VQA-Med 2020: visual question answering and generation in the medical domain. In: CLEF (Working Notes) (2020)"},{"key":"37_CR20","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., et al.: Squinting at VQA models: introspecting VQA models with sub-questions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10003\u201310011 (2020)","DOI":"10.1109\/CVPR42600.2020.01002"},{"key":"37_CR21","doi-asserted-by":"crossref","unstructured":"Shah, M., Chen, X., Rohrbach, M., Parikh, D.: Cycle-consistency for robust visual question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6649\u20136658 (2019)","DOI":"10.1109\/CVPR.2019.00681"},{"key":"37_CR22","doi-asserted-by":"crossref","unstructured":"Tan, H., Bansal, M.: LXMERT: learning cross-modality encoder representations from transformers. arXiv preprint arXiv:1908.07490 (2019)","DOI":"10.18653\/v1\/D19-1514"},{"key":"37_CR23","unstructured":"Teney, D., Abbasnejad, E., Hengel, A.V.D.: On incorporating semantic prior knowledge in deep learning through embedding-space constraints. arXiv preprint arXiv:1909.13471 (2019)"},{"issue":"9","key":"37_CR24","doi-asserted-by":"publisher","first-page":"2856","DOI":"10.1109\/TMI.2020.2978284","volume":"39","author":"MH Vu","year":"2020","unstructured":"Vu, M.H., L\u00f6fstedt, T., Nyholm, T., Sznitman, R.: A question-centric model for visual question answering in medical imaging. IEEE Trans. Med. Imaging 39(9), 2856\u20132868 (2020)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"37_CR25","unstructured":"Wang, P., Liao, R., Moyer, D., Berkowitz, S., Horng, S., Golland, P.: Image classification with consistent supporting evidence. In: Machine Learning for Health, pp. 168\u2013180. PMLR (2021)"},{"key":"37_CR26","unstructured":"Xu, K., et al.: Show, attend and tell: neural image caption generation with visual attention. In: International Conference on Machine Learning, pp. 2048\u20132057. PMLR (2015)"},{"key":"37_CR27","doi-asserted-by":"crossref","unstructured":"Yuan, Y., Wang, S., Jiang, M., Chen, T.Y.: Perception matters: detecting perception failures of VQA models using metamorphic testing. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16908\u201316917 (2021)","DOI":"10.1109\/CVPR46437.2021.01663"},{"key":"37_CR28","doi-asserted-by":"crossref","unstructured":"Zhan, L.M., Liu, B., Fan, L., Chen, J., Wu, X.M.: Medical visual question answering via conditional reasoning. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 2345\u20132354 (2020)","DOI":"10.1145\/3394171.3413761"}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-16452-1_37","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,12]],"date-time":"2024-03-12T11:48:13Z","timestamp":1710244093000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-16452-1_37"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031164514","9783031164521"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-16452-1_37","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"16 September 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Singapore","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Singapore","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 September 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 September 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft Conference","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1831","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"574","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"31% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}