{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T14:51:46Z","timestamp":1778079106506,"version":"3.51.4"},"publisher-location":"Cham","reference-count":32,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032051264","type":"print"},{"value":"9783032051271","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,9,20]],"date-time":"2025-09-20T00:00:00Z","timestamp":1758326400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,20]],"date-time":"2025-09-20T00:00:00Z","timestamp":1758326400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-05127-1_42","type":"book-chapter","created":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T21:15:55Z","timestamp":1758316555000},"page":"435-445","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["ReSurgSAM2: Referring Segment Anything in\u00a0Surgical Video via\u00a0Credible Long-Term Tracking"],"prefix":"10.1007","author":[{"given":"Haofeng","family":"Liu","sequence":"first","affiliation":[]},{"given":"Mingqi","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Xuxiao","family":"Luo","sequence":"additional","affiliation":[]},{"given":"Ziyue","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Guanyi","family":"Qin","sequence":"additional","affiliation":[]},{"given":"Junde","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Yueming","family":"Jin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,20]]},"reference":[{"key":"42_CR1","unstructured":"Allan, M., et\u00a0al.: 2018 robotic scene segmentation challenge. arXiv preprint arXiv:2001.11190 (2020)"},{"key":"42_CR2","unstructured":"Allan, M., et\u00a0al.: 2017 robotic instrument segmentation challenge. arXiv preprint arXiv:1902.06426 (2019)"},{"key":"42_CR3","doi-asserted-by":"crossref","unstructured":"Cuttano, C., Trivigno, G., Rosi, G., Masone, C., Averta, G.: SAMWISE: infusing wisdom in SAM2 for text-driven video segmentation. arXiv preprint arXiv:2411.17646 (2024)","DOI":"10.1109\/CVPR52734.2025.00322"},{"key":"42_CR4","unstructured":"Gu, A., Dao, T.: Mamba: linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752 (2023)"},{"issue":"11","key":"42_CR5","doi-asserted-by":"publisher","first-page":"2991","DOI":"10.1109\/TMI.2022.3177077","volume":"41","author":"Y Jin","year":"2022","unstructured":"Jin, Y., Yu, Y., Chen, C., Zhao, Z., Heng, P.A., Stoyanov, D.: Exploring intra-and inter-video relation for surgical semantic scene segmentation. IEEE Trans. Med. Imaging 41(11), 2991\u20133002 (2022)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"42_CR6","doi-asserted-by":"crossref","unstructured":"Khoreva, A., Rohrbach, A., Schiele, B.: Video object segmentation with language referring expressions. In: Asian Conference on Computer Vision (ACCV), pp. 123\u2013141 (2019)","DOI":"10.1007\/978-3-030-20870-7_8"},{"key":"42_CR7","doi-asserted-by":"crossref","unstructured":"Kirillov, A., et\u00a0al.: Segment anything. In: International Conference on Computer Vision, pp. 4015\u20134026 (2023)","DOI":"10.1109\/ICCV51070.2023.00371"},{"issue":"1","key":"42_CR8","doi-asserted-by":"publisher","first-page":"88","DOI":"10.1016\/j.surg.2021.01.051","volume":"170","author":"JG Kovoor","year":"2021","unstructured":"Kovoor, J.G., Gupta, A.K., Gladman, M.A.: Validity and effectiveness of augmented reality in surgical education: a systematic review. Surgery 170(1), 88\u201398 (2021)","journal-title":"Surgery"},{"key":"42_CR9","doi-asserted-by":"crossref","unstructured":"Li, H., et al.: AIF-SFDA: autonomous information filter-driven source-free domain adaptation for medical image segmentation. In: Proceedings of the AAAI Conference on Artificial Intelligence (2025)","DOI":"10.1609\/aaai.v39i5.32498"},{"key":"42_CR10","doi-asserted-by":"crossref","unstructured":"Li, H., et al.: Multi-view test-time adaptation for semantic segmentation in clinical cataract surgery. IEEE Trans. Med. Imaging (2025)","DOI":"10.1109\/TMI.2025.3529875"},{"key":"42_CR11","doi-asserted-by":"crossref","unstructured":"Li, Y., Zhang, J., Teng, X., Lan, L., Liu, X.: RefSAM: efficiently adapting segmenting anything model for referring video object segmentation. arXiv preprint arXiv:2307.00997 (2024)","DOI":"10.2139\/ssrn.5244602"},{"key":"42_CR12","unstructured":"Liu, H., Zhang, E., Wu, J., Hong, M., Jin, Y.: Surgical SAM 2: real-time segment anything in surgical video by efficient frame pruning. In: Advancements In Medical Foundation Models: Explainability, Robustness, Security, and Beyond (2024)"},{"key":"42_CR13","doi-asserted-by":"crossref","unstructured":"Liu, Y., et al.: Learning quality-aware dynamic memory for video object segmentation. In: European Conference on Computer Vision, pp. 468\u2013486 (2022)","DOI":"10.1007\/978-3-031-19818-2_27"},{"key":"42_CR14","unstructured":"Low, C.H., et al.: SurgRAW: multi-agent workflow with chain-of-thought reasoning for surgical intelligence. arXiv preprint arXiv:2503.10265 (2025)"},{"key":"42_CR15","doi-asserted-by":"publisher","first-page":"106151","DOI":"10.1016\/j.ijsu.2021.106151","volume":"95","author":"A Moglia","year":"2021","unstructured":"Moglia, A., Georgiou, K., Georgiou, E., Satava, R.M., Cuschieri, A.: A systematic review on artificial intelligence in robot-assisted surgery. Int. J. Surg. 95, 106151 (2021)","journal-title":"Int. J. Surg."},{"key":"42_CR16","doi-asserted-by":"crossref","unstructured":"Ou, M., et al.: MVD-net: semantic segmentation of cataract surgery using multi-view learning. In: IEEE EMBC, pp. 5035\u20135038 (2022)","DOI":"10.1109\/EMBC48229.2022.9871673"},{"key":"42_CR17","doi-asserted-by":"crossref","unstructured":"Perazzi, F., Pont-Tuset, J., McWilliams, B., Van\u00a0Gool, L., Gross, M., Sorkine-Hornung, A.: A benchmark dataset and evaluation methodology for video object segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 724\u2013732 (2016)","DOI":"10.1109\/CVPR.2016.85"},{"key":"42_CR18","unstructured":"Ravi, N., et\u00a0al.: SAM 2: segment anything in images and videos. arXiv preprint arXiv:2408.00714 (2024)"},{"key":"42_CR19","unstructured":"Ryali, C., et\u00a0al.: Hiera: a hierarchical vision transformer without the bells-and-whistles. In: International Conference on Machine Learning, pp. 29441\u201329454 (2023)"},{"key":"42_CR20","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.C.: MobileNetv2: inverted residuals and linear bottlenecks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4510\u20134520 (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"42_CR21","unstructured":"Sheik-Ali, S., Edgcombe, H., Paton, C.: Next-generation virtual and augmented reality in surgical education: a narrative review. Surg. Technol. Int. 33 (2019)"},{"issue":"6","key":"42_CR22","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3530811","volume":"55","author":"Y Tay","year":"2022","unstructured":"Tay, Y., Dehghani, M., Bahri, D., Metzler, D.: Efficient transformers: a survey. ACM Comput. Surv. 55(6), 1\u201328 (2022)","journal-title":"ACM Comput. Surv."},{"key":"42_CR23","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural. Inf. Process. Syst. 30 (2017)"},{"issue":"12","key":"42_CR24","doi-asserted-by":"publisher","first-page":"4457","DOI":"10.1109\/TMI.2024.3426953","volume":"43","author":"H Wang","year":"2024","unstructured":"Wang, H., et al.: Video-instrument synergistic network for referring video instrument segmentation in robotic surgery. IEEE Trans. Med. Imaging 43(12), 4457\u20134469 (2024)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"42_CR25","doi-asserted-by":"crossref","unstructured":"Wang, Z., Wu, J., Low, C.H., Jin, Y.: MedAgent-pro: towards multi-modal evidence-based medical diagnosis via reasoning agentic workflow. arXiv preprint arXiv:2503.18968 (2025)","DOI":"10.20944\/preprints202503.1751.v2"},{"key":"42_CR26","doi-asserted-by":"crossref","unstructured":"Wu, D., Wang, T., Zhang, Y., Zhang, X., Shen, J.: OnlineRefer: a simple online baseline for referring video object segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2761\u20132770 (2023)","DOI":"10.1109\/ICCV51070.2023.00259"},{"key":"42_CR27","doi-asserted-by":"crossref","unstructured":"Wu, J., Jiang, Y., Sun, P., Yuan, Z., Luo, P.: Language as queries for referring video object segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4974\u20134984 (2022)","DOI":"10.1109\/CVPR52688.2022.00492"},{"key":"42_CR28","doi-asserted-by":"crossref","unstructured":"Yan, S., et al.: Referred by multi-modality: a unified temporal transformer for video object segmentation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a038, pp. 6449\u20136457 (2024)","DOI":"10.1609\/aaai.v38i6.28465"},{"key":"42_CR29","doi-asserted-by":"crossref","unstructured":"Yang, Y., Xing, Z., Yu, L., Huang, C., Fu, H., Zhu, L.: Vivim: a video vision mamba for medical video segmentation. arXiv preprint arXiv:2401.14168 (2024)","DOI":"10.1109\/TCSVT.2025.3563411"},{"key":"42_CR30","doi-asserted-by":"crossref","unstructured":"Yue, W., Zhang, J., Hu, K., Xia, Y., Luo, J., Wang, Z.: SurgicalSAM: efficient class promptable surgical instrument segmentation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a038, pp. 6890\u20136898 (2024)","DOI":"10.1609\/aaai.v38i7.28514"},{"key":"42_CR31","unstructured":"Zeng, Z., et\u00a0al.: SurgVLM: a large vision-language model and systematic evaluation benchmark for surgical intelligence. arXiv preprint arXiv:2506.02555 (2025)"},{"key":"42_CR32","first-page":"28611","volume":"36","author":"Z Zhou","year":"2023","unstructured":"Zhou, Z., Alabi, O., Wei, M., Vercauteren, T., Shi, M.: Text promptable surgical instrument segmentation with vision-language models. Adv. Neural. Inf. Process. Syst. 36, 28611\u201328623 (2023)","journal-title":"Adv. Neural. Inf. Process. Syst."}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2025"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-05127-1_42","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T21:16:04Z","timestamp":1758316564000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-05127-1_42"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,20]]},"ISBN":["9783032051264","9783032051271"],"references-count":32,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-05127-1_42","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,20]]},"assertion":[{"value":"20 September 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Daejeon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Korea (Republic of)","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2025\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}