{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:45:06Z","timestamp":1777657506072,"version":"3.51.4"},"publisher-location":"Cham","reference-count":27,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032076939","type":"print"},{"value":"9783032076946","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,9,30]],"date-time":"2025-09-30T00:00:00Z","timestamp":1759190400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,30]],"date-time":"2025-09-30T00:00:00Z","timestamp":1759190400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-07694-6_12","type":"book-chapter","created":{"date-parts":[[2025,9,29]],"date-time":"2025-09-29T18:04:54Z","timestamp":1759169094000},"page":"119-129","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Towards Robust Algorithms for\u00a0Surgical Phase Recognition via\u00a0Digital Twin Representation"],"prefix":"10.1007","author":[{"given":"Hao","family":"Ding","sequence":"first","affiliation":[]},{"given":"Yuqian","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Wenzheng","family":"Cheng","sequence":"additional","affiliation":[]},{"given":"Xinyu","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xu","family":"Lian","sequence":"additional","affiliation":[]},{"given":"Chenhao","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Hongchao","family":"Shu","sequence":"additional","affiliation":[]},{"given":"Ji Woong","family":"Kim","sequence":"additional","affiliation":[]},{"given":"Axel","family":"Krieger","sequence":"additional","affiliation":[]},{"given":"Mathias","family":"Unberath","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,30]]},"reference":[{"key":"12_CR1","unstructured":"Bertasius, G., Wang, H., Torresani, L.: Is space-time attention all you need for video understanding?. In: ICML, vol.\u00a02, p.\u00a04 (2021)"},{"key":"12_CR2","doi-asserted-by":"crossref","unstructured":"Colleoni, E., Edwards, P., Stoyanov, D.: Synthetic and real inputs for tool segmentation in robotic surgery. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 700\u2013710. Springer (2020)","DOI":"10.1007\/978-3-030-59716-0_67"},{"key":"12_CR3","unstructured":"Ding, H., et al.: SegSTRONG-C: segmenting surgical tools robustly on non-adversarial generated corruptions\u2013an Endovis\u2019 24 challenge. arXiv preprint arXiv:2407.11906 (2024)"},{"key":"12_CR4","doi-asserted-by":"crossref","unstructured":"Ding, H., Seenivasan, L., Killeen, B.D., Cho, S.M., Unberath, M.: Digital twins as a unifying framework for surgical data science: the enabling role of geometric scene understanding. Artif. Intell. Surge. 4(3), 109\u2013138 (2024)","DOI":"10.20517\/ais.2024.16"},{"key":"12_CR5","unstructured":"Ding, H., et al.: Towards robust automation of surgical systems via digital twin-based scene representations from foundation models. arXiv preprint arXiv:2409.13107 (2024)"},{"issue":"6","key":"12_CR6","doi-asserted-by":"publisher","first-page":"1009","DOI":"10.1007\/s11548-023-02872-8","volume":"18","author":"H Ding","year":"2023","unstructured":"Ding, H., Wu, J.Y., Li, Z., Unberath, M.: Rethinking causality-driven robot tool segmentation with temporal constraints. Int. J. Comput. Assist. Radiol. Surg. 18(6), 1009\u20131016 (2023)","journal-title":"Int. J. Comput. Assist. Radiol. Surg."},{"key":"12_CR7","doi-asserted-by":"crossref","unstructured":"Ding, H., Zhang, J., Kazanzides, P., Wu, J.Y., Unberath, M.: Carts: causality-driven robot tool segmentation from vision and kinematics data. In: Proc. MICCAI, pp. 387\u2013398. Springer (2022)","DOI":"10.1007\/978-3-031-16449-1_37"},{"key":"12_CR8","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. In: Proc. ICLR (2021)"},{"key":"12_CR9","unstructured":"Drenkow, N., Sani, N., Shpitser, I., Unberath, M.: A systematic review of robustness in deep learning for computer vision: mind the gap?. arXiv preprint arXiv:2112.00639 (2021)"},{"key":"12_CR10","doi-asserted-by":"crossref","unstructured":"Gao, X., Jin, Y., Long, Y., Dou, Q., Heng, P.: Trans-SVNet: accurate phase recognition from surgical videos via hybrid embedding aggregation transformer. In: Proc. MICCAI (2021)","DOI":"10.1007\/978-3-030-87202-1_57"},{"key":"12_CR11","doi-asserted-by":"crossref","unstructured":"Hein, J., et\u00a0al.: Creating a digital twin of spinal surgery: a proof of concept. In: Proc. CVPR, pp. 2355\u20132364 (2024)","DOI":"10.1109\/CVPRW63382.2024.00241"},{"key":"12_CR12","unstructured":"Hendrycks, D., Dietterich, T.: Benchmarking neural network robustness to common corruptions and perturbations. arXiv preprint arXiv:1903.12261 (2019)"},{"key":"12_CR13","doi-asserted-by":"crossref","unstructured":"Jin, Y., Dou, Q., Chen, H., Yu, L., Qin, J., Fu, C., Heng, P.: SV-RCNet: workflow recognition from surgical videos using recurrent convolutional network. IEEE Trans, Med. Imag. 37(5) (2018)","DOI":"10.1109\/TMI.2017.2787657"},{"key":"12_CR14","doi-asserted-by":"crossref","unstructured":"Kazanzides, P., Chen, Z., Deguet, A., Fischer, G.S., Taylor, R.H., DiMaio, S.P.: An open-source research kit for the da Vinci\u00ae Surgical System. In: Proc. ICRA, pp. 6434\u20136439. IEEE (2014)","DOI":"10.1109\/ICRA.2014.6907809"},{"key":"12_CR15","unstructured":"Killeen, B.D., et al.: Stand in surgeon\u2019s shoes: virtual reality cross-training to enhance teamwork in surgery. In: Int. J. CARS, pp. 1\u201310 (2024)"},{"key":"12_CR16","doi-asserted-by":"crossref","unstructured":"Kleinbeck, C., Zhang, H., Killeen, B.D., Roth, D., Unberath, M.: Neural digital twins: reconstructing complex medical environments for spatial planning in virtual reality. Int. J. CARS 19, 1\u201312 (2024)","DOI":"10.1007\/s11548-024-03143-w"},{"key":"12_CR17","doi-asserted-by":"crossref","unstructured":"Liu, Y., et al.: SKiT: a fast key information video transformer for online surgical phase recognition. In: Proc. ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01927"},{"key":"12_CR18","doi-asserted-by":"crossref","unstructured":"Maier-Hein, L., et al.: Surgical data science-from concepts toward clinical translation. Med. Image Anal. 76, 102306 (2022)","DOI":"10.1016\/j.media.2021.102306"},{"key":"12_CR19","doi-asserted-by":"crossref","unstructured":"Oh, K.H., et al.: Comprehensive robotic cholecystectomy dataset (CRCD): integrating kinematics, pedal signals, and endoscopic videos. In: Proc ISMR (2024)","DOI":"10.1109\/ISMR63436.2024.10585836"},{"key":"12_CR20","unstructured":"Ravi, N., et\u00a0al.: SAM 2: segment anything in images and videos. arXiv preprint arXiv:2408.00714 (2024)"},{"key":"12_CR21","doi-asserted-by":"crossref","unstructured":"Shen, Y., Ding, H., Shao, X., Unberath, M.: Performance and non-adversarial robustness of the segment anything model 2 in surgical video segmentation. arXiv preprint arXiv:2408.04098 (2024)","DOI":"10.1117\/12.3047383"},{"key":"12_CR22","doi-asserted-by":"crossref","unstructured":"Shu, H., et al.: Twin-s: a digital twin for skull base surgery. Int. J. CARS 18(6), 1077\u20131084 (2023)","DOI":"10.1007\/s11548-023-02863-9"},{"key":"12_CR23","doi-asserted-by":"crossref","unstructured":"Twinanda, A.P., Shehata, S., Mutter, D., Marescaux, J., de Mathelin, M., Padoy, N.: EndoNet: a deep architecture for recognition tasks on laparoscopic videos. IEEE Trans, Med. Imag. 36(1) (2017)","DOI":"10.1109\/TMI.2016.2593957"},{"key":"12_CR24","doi-asserted-by":"crossref","unstructured":"Wang, Z., et al.: AutoLaparo: a new dataset of integrated multi-tasks for image-guided surgical automation in laparoscopic hysterectomy. In: Proc. MICCAI (2022)","DOI":"10.1007\/978-3-031-16449-1_46"},{"key":"12_CR25","doi-asserted-by":"crossref","unstructured":"Yang, L., Kang, B., Huang, Z., Xu, X., Feng, J., Zhao, H.: Depth anything: unleashing the power of large-scale unlabeled data. In: Proc. CVPR, pp. 10371\u201310381 (2024)","DOI":"10.1109\/CVPR52733.2024.00987"},{"key":"12_CR26","doi-asserted-by":"crossref","unstructured":"Yang, S., Luo, L., Wang, Q., Chen, H.: SurgFormer: surgical transformer with hierarchical temporal attention for surgical phase recognition. In: Proc. MICCAI, pp. 606\u2013616. Springer (2024)","DOI":"10.1007\/978-3-031-72089-5_57"},{"key":"12_CR27","doi-asserted-by":"crossref","unstructured":"Yi, F., Jiang, T.: Hard frame detection and online mapping for surgical phase recognition. In: Proc. MICCAI (2019)","DOI":"10.1007\/978-3-030-32254-0_50"}],"container-title":["Lecture Notes in Computer Science","Digital Twin for Healthcare"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-07694-6_12","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,29]],"date-time":"2025-09-29T18:05:07Z","timestamp":1759169107000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-07694-6_12"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,30]]},"ISBN":["9783032076939","9783032076946"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-07694-6_12","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,30]]},"assertion":[{"value":"30 September 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"DT4H","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Digital Twin for Healthcare","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Daejeon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Korea (Republic of)","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"dt4h2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/digitaltwinforhealthcare.com\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}