{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T16:21:15Z","timestamp":1773246075331,"version":"3.50.1"},"publisher-location":"Cham","reference-count":31,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032060037","type":"print"},{"value":"9783032060044","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,9,22]],"date-time":"2025-09-22T00:00:00Z","timestamp":1758499200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,22]],"date-time":"2025-09-22T00:00:00Z","timestamp":1758499200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-06004-4_18","type":"book-chapter","created":{"date-parts":[[2025,9,22]],"date-time":"2025-09-22T17:21:59Z","timestamp":1758561719000},"page":"177-186","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Can DeepSeek Reason Like a\u00a0Surgeon? An Empirical Evaluation for\u00a0Vision-Language Understanding in\u00a0Robotic-Assisted Surgery"],"prefix":"10.1007","author":[{"given":"Boyi","family":"Ma","sequence":"first","affiliation":[]},{"given":"Yanguang","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Jie","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Guankun","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Kun","family":"Yuan","sequence":"additional","affiliation":[]},{"given":"Tong","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Long","family":"Bai","sequence":"additional","affiliation":[]},{"given":"Hongliang","family":"Ren","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,22]]},"reference":[{"key":"18_CR1","unstructured":"Allan, M., et\u00a0al.: 2018 robotic scene segmentation challenge. arXiv preprint arXiv:2001.11190 (2020)"},{"key":"18_CR2","doi-asserted-by":"crossref","unstructured":"Bai, L., Islam, M., Ren, H.: Revisiting distillation for continual learning on visual question localized-answering in robotic surgery. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 68\u201378. Springer (2023)","DOI":"10.1007\/978-3-031-43996-4_7"},{"key":"18_CR3","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102602","volume":"113","author":"L Bai","year":"2025","unstructured":"Bai, L., Wang, G., Islam, M., Seenivasan, L., Wang, A., Ren, H.: Surgical-VQLA++: adversarial contrastive learning for calibrated robust visual question-localized answering in robotic surgery. Inf. Fusion 113, 102602 (2025)","journal-title":"Inf. Fusion"},{"key":"18_CR4","doi-asserted-by":"crossref","unstructured":"Chen, T., Yuan, K., Srivastav, V., Navab, N., Padoy, N.: Text-driven adaptation of foundation models for few-shot surgical workflow analysis. arXiv preprint arXiv:2501.09555 (2025)","DOI":"10.1007\/s11548-025-03341-0"},{"key":"18_CR5","unstructured":"Chen, X., et al.: Janus-pro: unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811 (2025)"},{"key":"18_CR6","unstructured":"Chen, Z., et al.: VS-assistant: versatile surgery assistant on the demand of surgeons. arXiv preprint arXiv:2405.08272 (2024)"},{"key":"18_CR7","doi-asserted-by":"crossref","unstructured":"Ding, D., Yao, T., Luo, R., Sun, X.: Visual question answering in robotic surgery: a comprehensive review. IEEE Access (2025)","DOI":"10.1109\/ACCESS.2024.3525145"},{"issue":"7","key":"18_CR8","doi-asserted-by":"publisher","first-page":"993","DOI":"10.1109\/JPROC.2022.3176828","volume":"110","author":"P Fiorini","year":"2022","unstructured":"Fiorini, P., Goldberg, K.Y., Liu, Y., Taylor, R.H.: Concepts and trends in autonomy for robot-assisted surgery. Proc. IEEE 110(7), 993\u20131011 (2022)","journal-title":"Proc. IEEE"},{"key":"18_CR9","unstructured":"Ge, Y., et al.: Making llama see and draw with seed tokenizer. arXiv preprint arXiv:2310.01218 (2023)"},{"key":"18_CR10","unstructured":"Guo, D., et\u00a0al.: DeepSeek-R1: incentivizing reasoning capability in LLMs via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)"},{"key":"18_CR11","doi-asserted-by":"crossref","unstructured":"Hao, P., Wang, H., Yang, G., Zhu, L.: Enhancing visual reasoning with LLM-powered knowledge graphs for visual question localized-answering in robotic surgery. IEEE J. Biomed. Health Inform. (2025)","DOI":"10.1109\/JBHI.2025.3538324"},{"key":"18_CR12","doi-asserted-by":"crossref","unstructured":"Huang, Y., et al.: Endo-4DGS: endoscopic monocular scene reconstruction with 4d gaussian splatting. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 197\u2013207. Springer (2024)","DOI":"10.1007\/978-3-031-72089-5_19"},{"key":"18_CR13","unstructured":"Hurst, A., et\u00a0al.: GPT-4O system card. arXiv preprint arXiv:2410.21276 (2024)"},{"key":"18_CR14","unstructured":"Jiang, A.Q., et al.: Mistral 7b. arXiv preprint arXiv:2310.06825 (2023)"},{"key":"18_CR15","unstructured":"Lai, Y., Zhong, J., Li, M., Zhao, S., Yang, X.: Med-R1: reinforcement learning for generalizable medical reasoning in vision-language models. arXiv preprint arXiv:2503.13939 (2025)"},{"key":"18_CR16","unstructured":"Li, J., et al.: LLaVA-SURG: towards multimodal surgical assistant via structured surgical video learning. arXiv preprint arXiv:2408.07981 (2024)"},{"key":"18_CR17","unstructured":"Liu, A., et\u00a0al.: DeepSeek-V3 technical report. arXiv preprint arXiv:2412.19437 (2024)"},{"issue":"1","key":"18_CR18","doi-asserted-by":"publisher","first-page":"61","DOI":"10.1038\/s41591-023-02732-7","volume":"30","author":"HJ Marcus","year":"2024","unstructured":"Marcus, H.J., et al.: The ideal framework for surgical robotics: development, comparative evaluation and long-term monitoring. Nat. Med. 30(1), 61\u201375 (2024)","journal-title":"Nat. Med."},{"key":"18_CR19","unstructured":"Nwoye, C.I., Padoy, N.: Data splits and metrics for benchmarking methods on surgical action triplet datasets. arXiv preprint arXiv:2204.05235 (2022)"},{"key":"18_CR20","doi-asserted-by":"crossref","unstructured":"Pan, J., et al.: MedVLM-R1: incentivizing medical reasoning capability of vision-language models (VLMs) via reinforcement learning. arXiv preprint arXiv:2502.19634 (2025)","DOI":"10.1007\/978-3-032-04981-0_32"},{"key":"18_CR21","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"18_CR22","doi-asserted-by":"crossref","unstructured":"Seenivasan, L., Islam, M., Kannan, G., Ren, H.: SurgicalGPT: end-to-end language-vision u for visual question answering in surgery. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 281\u2013290. Springer (2023)","DOI":"10.1007\/978-3-031-43996-4_27"},{"key":"18_CR23","doi-asserted-by":"crossref","unstructured":"Shen, Y., et al.: Medical multimodal model stealing attacks via adversarial domain alignment. arXiv preprint arXiv:2502.02438 (2025)","DOI":"10.1609\/aaai.v39i7.32734"},{"key":"18_CR24","unstructured":"Wang, G., et al.: Surgical-LVLM: learning to adapt large vision-language model for grounded visual question answering in robotic surgery. arXiv preprint arXiv:2405.10948 (2024)"},{"key":"18_CR25","doi-asserted-by":"crossref","unstructured":"Wang, G., et\u00a0al.: Endochat: grounded multimodal large language model for endoscopic surgery. arXiv preprint arXiv:2501.11347 (2025)","DOI":"10.1016\/j.media.2025.103789"},{"key":"18_CR26","unstructured":"Wang, G., et al.: CoPESD: a multi-level surgical motion dataset for training large vision-language models to co-pilot endoscopic submucosal dissection. arXiv preprint arXiv:2410.07540 (2024)"},{"key":"18_CR27","doi-asserted-by":"crossref","unstructured":"Wang, H., Jin, Y., Zhu, L.: Dynamic interactive relation capturing via scene graph learning for robotic surgical report generation. In: 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 2702\u20132709. IEEE (2023)","DOI":"10.1109\/ICRA48891.2023.10160647"},{"key":"18_CR28","unstructured":"Wang, P., et\u00a0al.: Qwen2-VL: enhancing vision-language model\u2019s perception of the world at any resolution. arXiv preprint arXiv:2409.12191 (2024)"},{"key":"18_CR29","unstructured":"Wu, Z., et\u00a0al.: DeepSeek-VL2: mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302 (2024)"},{"issue":"7","key":"18_CR30","doi-asserted-by":"publisher","first-page":"1409","DOI":"10.1007\/s11548-024-03141-y","volume":"19","author":"K Yuan","year":"2024","unstructured":"Yuan, K., Kattel, M., Lavanchy, J.L., Navab, N., Srivastav, V., Padoy, N.: Advancing surgical VQA with scene graph knowledge. Int. J. Comput. Assist. Radiol. Surg. 19(7), 1409\u20131417 (2024)","journal-title":"Int. J. Comput. Assist. Radiol. Surg."},{"issue":"7","key":"18_CR31","doi-asserted-by":"publisher","first-page":"504","DOI":"10.1177\/02783649231179753","volume":"42","author":"F Zhong","year":"2023","unstructured":"Zhong, F., Liu, Y.H.: Integrated planning and control of robotic surgical instruments for task autonomy. Int. J. Robot. Res. 42(7), 504\u2013536 (2023)","journal-title":"Int. J. Robot. Res."}],"container-title":["Lecture Notes in Computer Science","AI for Clinical Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-06004-4_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,22]],"date-time":"2025-09-22T17:22:11Z","timestamp":1758561731000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-06004-4_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,22]]},"ISBN":["9783032060037","9783032060044"],"references-count":31,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-06004-4_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,22]]},"assertion":[{"value":"22 September 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"CREATE","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Clinical-Driven Robotics and Embodied AI Technology","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Daejeon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Korea (Republic of)","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"create2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/sites.google.com\/view\/create-2025\/home","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}