{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,9]],"date-time":"2025-04-09T17:37:02Z","timestamp":1744220222298,"version":"3.40.3"},"publisher-location":"Cham","reference-count":24,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031258244"},{"type":"electronic","value":"9783031258251"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-25825-1_29","type":"book-chapter","created":{"date-parts":[[2023,2,3]],"date-time":"2023-02-03T19:02:52Z","timestamp":1675450972000},"page":"402-414","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Medical VQA: MixUp Helps Keeping it Simple"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8730-4722","authenticated-orcid":false,"given":"Jitender","family":"Singh","sequence":"first","affiliation":[]},{"given":"Dwarikanath","family":"Mahapatra","sequence":"additional","affiliation":[]},{"given":"Deepti R.","family":"Bathula","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,2,4]]},"reference":[{"unstructured":"Abacha, A.B., Datla, V.V., Hasan, S.A., Demner-Fushman, D., Muller, H.: Overview of the VQA-med task at ImageCLEF 2020: visual question answering and generation in the medical domain. In: CEUR Workshop Proceedings (2020)","key":"29_CR1"},{"unstructured":"Abacha, A.B., Sarrouti, M., Demner-Fushman, D., Hasan, S.A., M\u00fcller, H.: Overview of the VQA-med task at ImageCLEF 2021: visual question answering and generation in the medical domain. In: CEUR Workshop Proceedings (2021)","key":"29_CR2"},{"unstructured":"Al-Sadi, A., Al-Theiabat, H.A., Al-Ayyoub, M.: The inception team at VQA-med 2020: pretrained VGG with data augmentation for medical VQA and VQG. In: CEUR Workshop Proceedings, vol. 2696 (2020)","key":"29_CR3"},{"unstructured":"Castells, T., Weinzaepfel, P., Revaud, J.: Superloss: a generic loss for robust curriculum learning, vol. 33, pp. 4308\u20134319. Curran Associates, Inc. (2020)","key":"29_CR4"},{"unstructured":"Chen, G., Gong, H., Li, G.: HCP-mic at VQA-med 2020: effective visual representation for medical visual question answering, vol. 2696. CEUR (2020)","key":"29_CR5"},{"unstructured":"Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. CoRR, abs\/1412.3555 (2014)","key":"29_CR6"},{"unstructured":"Eslami, S., de Melo, G., Meinel, C.: Teams at VQA-med 2021: BBN-orchestra for long-tailed medical visual question answering. In: CEUR, vol. 2936, pp. 1211\u20131217 (2021)","key":"29_CR7"},{"unstructured":"Gong, H., Huang, R., Chen, G., Li, G.: SYSU-HCP at VQA-med 2021: a data-centric model with efficient training methodology for medical visual question answering. In: CLEF (2021)","key":"29_CR8"},{"doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. CoRR, abs\/1512.03385 (2015)","key":"29_CR9","DOI":"10.1109\/CVPR.2016.90"},{"issue":"8","key":"29_CR10","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Comput. 9(8), 1735\u20131780 (1997)","journal-title":"Neural Comput."},{"doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. CoRR, abs\/1608.06993 (2016)","key":"29_CR11","DOI":"10.1109\/CVPR.2017.243"},{"unstructured":"Jung, B., Gu, L., Harada, T.: bumjun jung at VQA-med 2020: VQA model based on feature extraction and multi-modal feature fusion. In: CEUR Workshop Proceedings, vol. 2696 (2020)","key":"29_CR12"},{"unstructured":"Lee, J., et al.: BioBERT: a pre-trained biomedical language representation model for biomedical text mining. CoRR, abs\/1901.08746 (2019)","key":"29_CR13"},{"unstructured":"Liao, Z., Wu, Q., Shen, C., Hengel, A.V., Verjans, J.W.: AIML at VQA-med 2020: knowledge inference via a skeleton-based sentence mapping approach for medical domain visual question answering, vol. 2696, pp. 1\u201314. CEUR (2020)","key":"29_CR14"},{"doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.C.: Inverted residuals and linear bottlenecks: mobile networks for classification, detection and segmentation. CoRR, abs\/1801.04381 (2018)","key":"29_CR15","DOI":"10.1109\/CVPR.2018.00474"},{"unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. CoRR, arXiv:1409.1556 (2014)","key":"29_CR16"},{"doi-asserted-by":"crossref","unstructured":"Virk, J.S., Bathula, D.R.: Domain-specific, semi-supervised transfer learning for medical imaging. In: CODS COMAD, pp. 145\u2013153 (2021)","key":"29_CR17","DOI":"10.1145\/3430984.3431022"},{"unstructured":"Xiao, Q., Zhou, X., Xiao, Y., Zhao, K.: Yunnan university at VQA-med 2021: pretrained BioBERT for medical domain visual question answering. In: CEUR Workshop Proceedings, vol. 2936, pp. 1405\u20131411 (2021)","key":"29_CR18"},{"doi-asserted-by":"crossref","unstructured":"Xie, S., Girshick, R.B., Doll\u00e1r, P., Tu, Z., He, K.: Aggregated residual transformations for deep neural networks. CoRR, abs\/1611.05431 (2016)","key":"29_CR19","DOI":"10.1109\/CVPR.2017.634"},{"doi-asserted-by":"crossref","unstructured":"Yang, Z., He, X., Gao, J., Deng, L., Smola, A.J.: Stacked attention networks for image question answering. CoRR, abs\/1511.02274 (2015)","key":"29_CR20","DOI":"10.1109\/CVPR.2016.10"},{"unstructured":"Zhang, H., Cisse, M., Dauphin, Y.N., Lopez-Paz, D.: mixup: beyond empirical risk minimization. CoRR, abs\/1710.09412 (2018)","key":"29_CR21"},{"unstructured":"Zhang, H., et al.: Resnest: split-attention networks. CoRR, abs\/2004.08955 (2020)","key":"29_CR22"},{"doi-asserted-by":"crossref","unstructured":"Zhou, B., Cui, Q., Wei, X.S., Chen, Z.: BBN: bilateral-branch network with cumulative learning for long-tailed visual recognition. CoRR, arXiv:1912.02413 (2019)","key":"29_CR23","DOI":"10.1109\/CVPR42600.2020.00974"},{"key":"29_CR24","doi-asserted-by":"publisher","first-page":"5947","DOI":"10.1109\/TNNLS.2018.2817340","volume":"29","author":"Y Zhou","year":"2018","unstructured":"Zhou, Y., Yu, J., Xiang, C., Fan, J., Tao, D.: Beyond bilinear: generalized multimodal factorized high-order pooling for visual question answering. IEEE Trans. Neural Netw. Learn. Syst. 29, 5947\u20135959 (2018)","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."}],"container-title":["Lecture Notes in Computer Science","Image and Vision Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-25825-1_29","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,2,3]],"date-time":"2023-02-03T19:09:36Z","timestamp":1675451376000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-25825-1_29"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031258244","9783031258251"],"references-count":24,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-25825-1_29","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"4 February 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"IVCNZ","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Image and Vision Computing New Zealand","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Auckland","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"New Zealand","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 November 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24 November 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"37","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ivcnz2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/ivcnz2022.aut.ac.nz\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"79","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"14","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"23","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"18% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.7","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.1","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}