{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,3]],"date-time":"2026-01-03T05:38:29Z","timestamp":1767418709264,"version":"3.48.0"},"publisher-location":"Cham","reference-count":23,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783032049803"},{"type":"electronic","value":"9783032049810"}],"license":[{"start":{"date-parts":[[2025,9,20]],"date-time":"2025-09-20T00:00:00Z","timestamp":1758326400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,20]],"date-time":"2025-09-20T00:00:00Z","timestamp":1758326400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-04981-0_60","type":"book-chapter","created":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T05:13:03Z","timestamp":1758258783000},"page":"636-646","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Towards Robust Medical Image Referring Segmentation with\u00a0Incomplete Textual Prompts"],"prefix":"10.1007","author":[{"given":"Qijie","family":"Wang","sequence":"first","affiliation":[]},{"given":"Xian","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Zengqiang","family":"Yan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,20]]},"reference":[{"issue":"2","key":"60_CR1","doi-asserted-by":"publisher","first-page":"2023","DOI":"10.1038\/s41592-020-01008-z","volume":"18","author":"F Isensee","year":"2021","unstructured":"Isensee, F., Jaeger, P.F., Kohl, S.A., Petersen, J., Maier-Hein, K.H.: nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nat. Methods 18(2), 2023\u20132011 (2021)","journal-title":"Nat. Methods"},{"key":"60_CR2","doi-asserted-by":"publisher","first-page":"4036","DOI":"10.1109\/TIP.2023.3293771","volume":"42","author":"HY Zhou","year":"2023","unstructured":"Zhou, H.Y., et al.: nnFormer: volumetric medical image segmentation via a 3D transformer. IEEE Trans. Image Process. 42, 4036\u20134045 (2023)","journal-title":"IEEE Trans. Image Process."},{"key":"60_CR3","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1007\/978-3-319-24574-4_28","volume-title":"Medical Image Computing and Computer-Assisted Intervention \u2013 MICCAI 2015","author":"O Ronneberger","year":"2015","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) MICCAI 2015. LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28"},{"issue":"5","key":"60_CR4","doi-asserted-by":"publisher","first-page":"1289","DOI":"10.1109\/TMI.2022.3226268","volume":"42","author":"G Chen","year":"2023","unstructured":"Chen, G., Li, L., Dai, Y., Zhang, J., Yap, M.H.: AAU-net: an adaptive attention U-net for breast lesions segmentation in ultrasound images. IEEE Trans. Med. Imag. 42(5), 1289\u20131300 (2023)","journal-title":"IEEE Trans. Med. Imag."},{"key":"60_CR5","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2023.102762","volume":"85","author":"J Li","year":"2023","unstructured":"Li, J., et al.: Transforming medical imaging with Transformers? A comparative review of key properties, current progresses, and future perspectives. Med. Image Anal. 85, 102672 (2023)","journal-title":"Med. Image Anal."},{"key":"60_CR6","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"578","DOI":"10.1007\/978-3-031-72111-3_54","volume-title":"MICCAI 2024","author":"Z Xing","year":"2024","unstructured":"Xing, Z., Ye, T., Yang, Y., Liu, G., Zhu, L.: Segmamba: long-range sequential modeling mamba for 3d medical image segmentation. In: Linguraru, M.G., et al. (eds.) MICCAI 2024. LNCS, vol. 15008, pp. 578\u2013588. Springer, Cham (2024). https:\/\/doi.org\/10.1007\/978-3-031-72111-3_54"},{"issue":"9","key":"60_CR7","doi-asserted-by":"publisher","first-page":"2763","DOI":"10.1109\/TMI.2023.3264513","volume":"42","author":"A He","year":"2023","unstructured":"He, A., Wang, K., Li, T., Du, C., Xia, S., Fu, H.: H2former: an efficient hierarchical hybrid transformer for medical image segmentation. IEEE Trans. Med. Imag. 42(9), 2763\u20132775 (2023)","journal-title":"IEEE Trans. Med. Imag."},{"key":"60_CR8","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1007\/978-3-031-43901-8_39","volume-title":"MICCAI 2023","author":"S Roy","year":"2023","unstructured":"Roy, S., et al.: Mednext: transformer-driven scaling of convnets for medical image segmentation. In: Greenspan, H., et al. (eds.) MICCAI 2023. LNCS, vol. 14223, pp. 405\u2013415. Springer, Cham (2023). https:\/\/doi.org\/10.1007\/978-3-031-43901-8_39"},{"key":"60_CR9","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2024.110491","volume":"152","author":"X Guo","year":"2024","unstructured":"Guo, X., Lin, X., Yang, X., Yu, L., Cheng, K.T., Yan, Z.: UCTNet: uncertainty-guided CNN-transformer hybrid networks for medical image segmentation. Pattern Recogn. 152, 110491 (2024)","journal-title":"Pattern Recogn."},{"issue":"1","key":"60_CR10","doi-asserted-by":"publisher","first-page":"96","DOI":"10.1109\/TMI.2023.3291719","volume":"43","author":"Z Li","year":"2023","unstructured":"Li, Z., et al.: LVIT: language meets vision transformer in medical image segmentation. IEEE Trans. Med. Imag. 43(1), 96\u2013107 (2023)","journal-title":"IEEE Trans. Med. Imag."},{"key":"60_CR11","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"555","DOI":"10.1007\/978-3-031-72120-5_52","volume-title":"MICCAI 2024","author":"X Lin","year":"2024","unstructured":"Lin, X., Wang, Z., Yan, Z., Yu, L.: Revisiting self-attention in medical transformers via dependency sparsification. In: Linguraru, M.G., et al. (eds.) MICCAI 2024. LNCS, vol. 15011, pp. 555\u2013566. Springer, Cham (2024). https:\/\/doi.org\/10.1007\/978-3-031-72120-5_52"},{"issue":"12","key":"60_CR12","doi-asserted-by":"publisher","first-page":"10076","DOI":"10.1109\/TPAMI.2024.3435571","volume":"46","author":"R Azad","year":"2024","unstructured":"Azad, R., et al.: Medical image segmentation review: the success of u-net. IEEE Trans. Pattern Anal. Mach. Intell. 46(12), 10076\u201310095 (2024)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"60_CR13","unstructured":"Ouyang, S., et al.: LSMS: language-guided scale-aware medsegmentor for medical image referring segmentation. arXiv preprint arXiv:2408.17347 (2024)"},{"key":"60_CR14","doi-asserted-by":"crossref","unstructured":"Chen, W., Liu, J., Liu, T., Yuan, Y.: Bi-VLGM: bi-level class-severity-aware vision-language graph matching for text guided medical image segmentation. Int. J. Comput. Vis. 1\u201317 (2024)","DOI":"10.1007\/s11263-024-02246-w"},{"key":"60_CR15","unstructured":"Qin, Z., Yi, H., Lao, Q., Li, K.: Medical image understanding with pretrained vision language models: a comprehensive study. In: International Conference on Learning Representations (2023)"},{"key":"60_CR16","unstructured":"Qin, Z., Yi, H., Lao, Q., Li, K.: Medical image understanding with pretrained vision language models: a comprehensive study. In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 4171\u20134186 (2019)"},{"key":"60_CR17","doi-asserted-by":"crossref","unstructured":"Degerli, A., Kiranyaz, S., Chowdhury, M.E.H., Gabbouj, M.: Osegnet: operational segmentation network for covid-19 detection using chest x-ray images. In: IEEE International Conference on Image Processing, pp. 2306\u20132310 (2022)","DOI":"10.1109\/ICIP46576.2022.9897412"},{"key":"60_CR18","doi-asserted-by":"crossref","unstructured":"Morozov, S.P., et al.: Mosmeddata: chest CT scans with covid-19 related findings dataset. arXiv preprint arXiv:2005.06465 (2020)","DOI":"10.1101\/2020.05.20.20100362"},{"issue":"1","key":"60_CR19","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1186\/s41747-019-0127-0","volume":"4","author":"J Hofmanninger","year":"2020","unstructured":"Hofmanninger, J., Prayer, F., Pan, J., R\u00f6hrich, S., Prosch, H., Langs, G.: Automatic lung segmentation in routine imaging is primarily a data diversity problem, not a methodology problem. Eur. Radiol. Experim. 4(1), 1\u201313 (2020)","journal-title":"Eur. Radiol. Experim."},{"key":"60_CR20","unstructured":"Yang, Z., et al.: Language-aware vision transformer for referring segmentation. IEEE Trans. Pattern Anal. Mach. Intell. 1\u201318 (2024)"},{"key":"60_CR21","doi-asserted-by":"crossref","unstructured":"Liu, Y., Zhang, C., Wang, Y., Wang, J., Yang, Y., Tang, Y.: Universal segmentation at arbitrary granularity with language instruction. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3459\u20133469 (2024)","DOI":"10.1109\/CVPR52733.2024.00332"},{"key":"60_CR22","doi-asserted-by":"publisher","first-page":"1782","DOI":"10.1109\/TIP.2024.3371348","volume":"33","author":"J Wu","year":"2024","unstructured":"Wu, J., Li, X., Li, X., Ding, H., Tong, Y., Tao, D.: Towards robust referring image segmentation. IEEE Trans. Image Process. 33, 1782\u20131794 (2024)","journal-title":"IEEE Trans. Image Process."},{"key":"60_CR23","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"151","DOI":"10.1007\/978-3-031-16437-8_15","volume-title":"MICCAI 2022","author":"NK Tomar","year":"2022","unstructured":"Tomar, N.K., Jha, D., Bagci, U., Ali, S.: TGANet: text-guided attention for improved polyp segmentation. In: Wang, L., Dou, Q., Fletcher, P.T., Speidel, S., Li, S. (eds.) MICCAI 2022. LNCS, vol. 13433, pp. 151\u2013160. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-16437-8_15"}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2025"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-04981-0_60","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,3]],"date-time":"2026-01-03T05:34:01Z","timestamp":1767418441000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-04981-0_60"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,20]]},"ISBN":["9783032049803","9783032049810"],"references-count":23,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-04981-0_60","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025,9,20]]},"assertion":[{"value":"20 September 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Daejeon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Korea (Republic of)","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2025\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}