{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,3]],"date-time":"2025-11-03T13:47:25Z","timestamp":1762177645848,"version":"3.40.3"},"publisher-location":"Cham","reference-count":25,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031438943"},{"type":"electronic","value":"9783031438950"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-43895-0_34","type":"book-chapter","created":{"date-parts":[[2023,9,30]],"date-time":"2023-09-30T23:08:23Z","timestamp":1696115303000},"page":"361-370","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["Localized Questions in\u00a0Medical Visual Question Answering"],"prefix":"10.1007","author":[{"given":"Sergio","family":"Tascon-Morales","sequence":"first","affiliation":[]},{"given":"Pablo","family":"M\u00e1rquez-Neila","sequence":"additional","affiliation":[]},{"given":"Raphael","family":"Sznitman","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,1]]},"reference":[{"key":"34_CR1","unstructured":"Allan, M., et al.: 2017 robotic instrument segmentation challenge. arXiv preprint arXiv:1902.06426 (2019)"},{"key":"34_CR2","doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: visual question answering. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2425\u20132433 (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"34_CR3","doi-asserted-by":"crossref","unstructured":"Ben-Younes, H., Cadene, R., Cord, M., Thome, N.: MUTAN: multimodal tucker fusion for visual question answering. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2612\u20132620 (2017)","DOI":"10.1109\/ICCV.2017.285"},{"key":"34_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"64","DOI":"10.1007\/978-3-030-87240-3_7","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2021","author":"T Do","year":"2021","unstructured":"Do, T., Nguyen, B.X., Tjiputra, E., Tran, M., Tran, Q.D., Nguyen, A.: Multiple meta-model quantifying for medical visual question answering. In: de Bruijne, M., et al. (eds.) MICCAI 2021. LNCS, vol. 12905, pp. 64\u201374. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87240-3_7"},{"key":"34_CR5","doi-asserted-by":"publisher","unstructured":"Fox, M., Taschwer, M., Schoeffmann, K.: Pixel-based tool segmentation in cataract surgery videos with mask R-CNN. In: de Herrera, A.G.S., Gonz\u00e1lez, A.R., Santosh, K.C., Temesgen, Z., Kane, B., Soda, P. (eds.) 33rd IEEE International Symposium on Computer-Based Medical Systems, CBMS 2020, Rochester, MN, USA, July 28\u201330, 2020, pp. 565\u2013568. IEEE (2020). https:\/\/doi.org\/10.1109\/CBMS49503.2020.00112","DOI":"10.1109\/CBMS49503.2020.00112"},{"key":"34_CR6","doi-asserted-by":"crossref","unstructured":"Fukui, A., Park, D.H., Yang, D., Rohrbach, A., Darrell, T., Rohrbach, M.: Multimodal compact bilinear pooling for visual question answering and visual grounding. arXiv preprint arXiv:1606.01847 (2016)","DOI":"10.18653\/v1\/D16-1044"},{"key":"34_CR7","doi-asserted-by":"crossref","unstructured":"Gong, H., Chen, G., Liu, S., Yu, Y., Li, G.: Cross-modal self-attention with multi-task pre-training for medical visual question answering. In: Proceedings of the 2021 International Conference on Multimedia Retrieval, pp. 456\u2013460 (2021)","DOI":"10.1145\/3460426.3463584"},{"key":"34_CR8","doi-asserted-by":"crossref","unstructured":"Goyal, Y., Khot, T., Summers-Stay, D., Batra, D., Parikh, D.: Making the v in VQA matter: elevating the role of image understanding in visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6904\u20136913 (2017)","DOI":"10.1109\/CVPR.2017.670"},{"key":"34_CR9","unstructured":"Hasan, S.A., Ling, Y., Farri, O., Liu, J., Lungren, M., M\u00fcller, H.: Overview of the ImageCLEF 2018 medical domain visual question answering task. In: CLEF2018 Working Notes. CEUR Workshop Proceedings, CEUR-WS.org $$<$$http:\/\/ceur-ws.org$$>$$, Avignon, France, 10-14 September 2018"},{"key":"34_CR10","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"8","key":"34_CR11","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Comput. 9(8), 1735\u20131780 (1997)","journal-title":"Neural Comput."},{"key":"34_CR12","unstructured":"Hudson, D.A., Manning, C.D.: GQA: a new dataset for compositional question answering over real-world images. arXiv preprint arXiv:1902.09506 3(8) (2019)"},{"key":"34_CR13","unstructured":"Kim, J.H., On, K.W., Lim, W., Kim, J., Ha, J.W., Zhang, B.T.: Hadamard product for low-rank bilinear pooling. arXiv preprint arXiv:1610.04325 (2016)"},{"key":"34_CR14","unstructured":"Liao, Z., Wu, Q., Shen, C., Van Den Hengel, A., Verjans, J.: AIML at VQA-Med 2020: knowledge inference via a skeleton-based sentence mapping approach for medical domain visual question answering (2020)"},{"key":"34_CR15","doi-asserted-by":"crossref","unstructured":"Liu, B., Zhan, L.M., Xu, L., Ma, L., Yang, Y., Wu, X.M.: Slake: a semantically-labeled knowledge-enhanced dataset for medical visual question answering. In: 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI), pp. 1650\u20131654. IEEE (2021)","DOI":"10.1109\/ISBI48211.2021.9434010"},{"key":"34_CR16","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"238","DOI":"10.1007\/978-3-030-28577-7_20","volume-title":"Experimental IR Meets Multilinguality, Multimodality, and Interaction","author":"F Liu","year":"2019","unstructured":"Liu, F., Peng, Y., Rosen, M.P.: An effective deep transfer learning and information fusion framework for medical visual question answering. In: Crestani, F., et al. (eds.) CLEF 2019. LNCS, vol. 11696, pp. 238\u2013247. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-28577-7_20"},{"key":"34_CR17","unstructured":"Mani, A., Yoo, N., Hinthorn, W., Russakovsky, O.: Point and ask: incorporating pointing into visual question answering. arXiv preprint arXiv:2011.13681 (2020)"},{"key":"34_CR18","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"522","DOI":"10.1007\/978-3-030-32251-9_57","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2019","author":"BD Nguyen","year":"2019","unstructured":"Nguyen, B.D., Do, T.-T., Nguyen, B.X., Do, T., Tjiputra, E., Tran, Q.D.: Overcoming data limitation in medical visual question answering. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11767, pp. 522\u2013530. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32251-9_57"},{"key":"34_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"180","DOI":"10.1007\/978-3-030-01364-6_20","volume-title":"Intravascular Imaging and Computer Assisted Stenting and Large-Scale Annotation of Biomedical Data and Expert Label Synthesis","author":"O Pelka","year":"2018","unstructured":"Pelka, O., Koitka, S., R\u00fcckert, J., Nensa, F., Friedrich, C.M.: Radiology objects in COntext (ROCO): a multimodal image dataset. In: Stoyanov, D., et al. (eds.) LABELS\/CVII\/STENT -2018. LNCS, vol. 11043, pp. 180\u2013189. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01364-6_20"},{"key":"34_CR20","doi-asserted-by":"publisher","first-page":"50626","DOI":"10.1109\/ACCESS.2020.2980024","volume":"8","author":"F Ren","year":"2020","unstructured":"Ren, F., Zhou, Y.: CGMVQA: a new classification and generative model for medical visual question answering. IEEE Access 8, 50626\u201350636 (2020)","journal-title":"IEEE Access"},{"key":"34_CR21","doi-asserted-by":"crossref","unstructured":"Tan, H., Bansal, M.: LXMERT: learning cross-modality encoder representations from transformers. arXiv preprint arXiv:1908.07490 (2019)","DOI":"10.18653\/v1\/D19-1514"},{"key":"34_CR22","doi-asserted-by":"publisher","unstructured":"Tascon-Morales, S., M\u00e1rquez-Neila, P., Sznitman, R.: Consistency-preserving visual question answering in medical imaging. In: Wang, L., Dou, Q., Fletcher, P.T., Speidel, S., Li, S. (eds.) Medical Image Computing and Computer Assisted Intervention. MICCAI 2022. LNCS, vol. 13438, pp. pp. 386\u2013395. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-16452-1_37","DOI":"10.1007\/978-3-031-16452-1_37"},{"issue":"9","key":"34_CR23","doi-asserted-by":"publisher","first-page":"2856","DOI":"10.1109\/TMI.2020.2978284","volume":"39","author":"MH Vu","year":"2020","unstructured":"Vu, M.H., L\u00f6fstedt, T., Nyholm, T., Sznitman, R.: A question-centric model for visual question answering in medical imaging. IEEE Trans. Med. Imaging 39(9), 2856\u20132868 (2020)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"34_CR24","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.119148","volume":"214","author":"Y Yu","year":"2023","unstructured":"Yu, Y., Li, H., Shi, H., Li, L., Xiao, J.: Question-guided feature pyramid network for medical visual question answering. Expert Syst. Appl. 214, 119148 (2023)","journal-title":"Expert Syst. Appl."},{"key":"34_CR25","doi-asserted-by":"crossref","unstructured":"Zhan, L.M., Liu, B., Fan, L., Chen, J., Wu, X.M.: Medical visual question answering via conditional reasoning. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 2345\u20132354 (2020)","DOI":"10.1145\/3394171.3413761"}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-43895-0_34","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,11]],"date-time":"2024-03-11T14:31:58Z","timestamp":1710167518000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-43895-0_34"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031438943","9783031438950"],"references-count":25,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-43895-0_34","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"1 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vancouver, BC","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Canada","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2023\/en\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2250","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"730","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"32% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}