{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T07:24:15Z","timestamp":1767338655112,"version":"3.40.3"},"publisher-location":"Cham","reference-count":32,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031250651"},{"type":"electronic","value":"9783031250668"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-25066-8_32","type":"book-chapter","created":{"date-parts":[[2023,2,17]],"date-time":"2023-02-17T08:18:05Z","timestamp":1676621885000},"page":"556-568","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["Surgical Workflow Recognition: From Analysis of\u00a0Challenges to\u00a0Architectural Study"],"prefix":"10.1007","author":[{"given":"Tobias","family":"Czempiel","sequence":"first","affiliation":[]},{"given":"Aidean","family":"Sharghi","sequence":"additional","affiliation":[]},{"given":"Magdalini","family":"Paschali","sequence":"additional","affiliation":[]},{"given":"Nassir","family":"Navab","sequence":"additional","affiliation":[]},{"given":"Omid","family":"Mohareri","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,2,18]]},"reference":[{"key":"32_CR1","unstructured":"Bahdanau, D., Cho, K.H., Bengio, Y.: Neural machine translation by jointly learning to align and translate. In: 3rd International Conference on Learning Representations, ICLR 2015 - Conference Track Proceedings, pp. 1\u201315 (2015)"},{"key":"32_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"122","DOI":"10.1007\/978-3-030-87735-4_12","volume-title":"Uncertainty for Safe Utilization of Machine Learning in Medical Imaging, and Perinatal Imaging, Placental and Preterm Image Analysis","author":"C Berger","year":"2021","unstructured":"Berger, C., Paschali, M., Glocker, B., Kamnitsas, K.: Confidence-based out-of-distribution detection: a comparative study and\u00a0analysis. In: Sudre, C.H., et al. (eds.) UNSURE\/PIPPI -2021. LNCS, vol. 12959, pp. 122\u2013132. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87735-4_12"},{"key":"32_CR3","doi-asserted-by":"crossref","unstructured":"Berlet, M., et al.: Surgical reporting for laparoscopic cholecystectomy based on phase annotation by a convolutional neural network (CNN) and the phenomenon of phase flickering: a proof of concept. Int. J. Comput. Assit. Radiol. Surg. 17, 1991\u20131999 (2022)","DOI":"10.1007\/s11548-022-02680-6"},{"key":"32_CR4","unstructured":"Bodenstedt, S., et al.: Unsupervised temporal context learning using convolutional neural networks for laparoscopic workflow analysis (February 2017). http:\/\/arxiv.org\/1702.03684arxiv.org\/abs\/1702.03684"},{"key":"32_CR5","doi-asserted-by":"publisher","unstructured":"Bodenstedt, S., et al.: Prediction of laparoscopic procedure duration using unlabeled, multimodal sensor data. Int. J. Computer Assit. Radiol. Surg. 14(6), 1089\u20131095 (2019). https:\/\/doi.org\/10.1007\/s11548-019-01966-6","DOI":"10.1007\/s11548-019-01966-6"},{"key":"32_CR6","doi-asserted-by":"publisher","unstructured":"Cubuk, E.D., Zoph, B., Shlens, J., Le, Q.V.: Randaugment: practical automated data augmentation with a reduced search space. In: IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops 2020-June, 3008\u20133017 (2020). https:\/\/doi.org\/10.1109\/CVPRW50498.2020.00359","DOI":"10.1109\/CVPRW50498.2020.00359"},{"key":"32_CR7","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"343","DOI":"10.1007\/978-3-030-59716-0_33","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2020","author":"T Czempiel","year":"2020","unstructured":"Czempiel, T., et al.: TeCNO: surgical phase recognition with multi-stage temporal convolutional networks. In: Martel, A.L., Abolmaesumi, P., Stoyanov, D., Mateus, D., Zuluaga, M.A., Zhou, S.K., Racoceanu, D., Joskowicz, L. (eds.) MICCAI 2020. LNCS, vol. 12263, pp. 343\u2013352. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-59716-0_33"},{"key":"32_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"604","DOI":"10.1007\/978-3-030-87202-1_58","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2021","author":"T Czempiel","year":"2021","unstructured":"Czempiel, T., Paschali, M., Ostler, D., Kim, S.T., Busam, B., Navab, N.: OperA: attention-regularized transformers for surgical phase recognition. In: de Bruijne, M., et al. (eds.) MICCAI 2021. LNCS, vol. 12904, pp. 604\u2013614. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87202-1_58"},{"key":"32_CR9","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Ccomputer Vision and Pattern Recognition, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"32_CR10","doi-asserted-by":"publisher","unstructured":"Farha, Y.A., Gall, J.: MS-TCN: multi-stage Temporal Convolutional Network for Action Segmentation. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR). vol. 2019-June, pp. 3570\u20133579. IEEE (June 2019). https:\/\/doi.org\/10.1109\/CVPR.2019.00369, https:\/\/ieeexplore.ieee.org\/document\/8953830","DOI":"10.1109\/CVPR.2019.00369"},{"key":"32_CR11","doi-asserted-by":"publisher","unstructured":"Fathi, A., Ren, X., Rehg, J.M.: Learning to recognize objects in egocentric activities. In: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pp. 3281\u20133288 (2011). https:\/\/doi.org\/10.1109\/CVPR.2011.5995444","DOI":"10.1109\/CVPR.2011.5995444"},{"key":"32_CR12","doi-asserted-by":"publisher","unstructured":"Feichtenhofer, C.: X3D: expanding architectures for efficient video recognition. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 200\u2013210. IEEE (June 2020). https:\/\/doi.org\/10.1109\/CVPR42600.2020.00028","DOI":"10.1109\/CVPR42600.2020.00028"},{"key":"32_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"593","DOI":"10.1007\/978-3-030-87202-1_57","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2021","author":"X Gao","year":"2021","unstructured":"Gao, X., Jin, Y., Long, Y., Dou, Q., Heng, P.-A.: Trans-SVNet: accurate phase recognition from surgical videos via\u00a0hybrid embedding aggregation transformer. In: de Bruijne, M., et al. (eds.) MICCAI 2021. LNCS, vol. 12904, pp. 593\u2013603. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87202-1_57"},{"key":"32_CR14","doi-asserted-by":"crossref","unstructured":"Garrow, C.R., et al.: Machine learning for surgical phase recognition: a systematic review. Ann. Surg. 273(4), 684\u2013693 (2021). https:\/\/journals.lww.com\/annalsofsurgery\/pages\/default.aspxhttps:\/\/pubmed.ncbi.nlm.nih.gov\/33201088\/","DOI":"10.1097\/SLA.0000000000004425"},{"key":"32_CR15","doi-asserted-by":"publisher","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 770\u2013778. IEEE (June 2016). https:\/\/doi.org\/10.1109\/CVPR.2016.90, https:\/\/image-net.org\/challenges\/LSVRC\/2015\/,https:\/\/ieeexplore.ieee.org\/document\/7780459","DOI":"10.1109\/CVPR.2016.90"},{"key":"32_CR16","doi-asserted-by":"publisher","unstructured":"Huaulm\u00e9, A., Jannin, P., Reche, F., Faucheron, J.L., Moreau-Gaudry, A., Voros, S.: Offline identification of surgical deviations in laparoscopic rectopexy. Artiff. Intell. Med. 104(2019) (2020). https:\/\/doi.org\/10.1016\/j.artmed.2020.101837","DOI":"10.1016\/j.artmed.2020.101837"},{"key":"32_CR17","doi-asserted-by":"crossref","unstructured":"Idrees, H., et al.: The THUMOS challenge on action recognition for videos \u201cin the wild\u201d. Comput. Vis. Image Underst. 155, 1\u201323 (2017)","DOI":"10.1016\/j.cviu.2016.10.018"},{"issue":"5","key":"32_CR18","doi-asserted-by":"publisher","first-page":"1114","DOI":"10.1109\/TMI.2017.2787657","volume":"37","author":"Y Jin","year":"2018","unstructured":"Jin, Y., Dou, Q., Chen, H., Yu, L., Qin, J., Fu, C.W., Heng, P.A.: SV-RCNet: workflow recognition from surgical videos using recurrent convolutional network. IEEE Trans. Med. Imaging 37(5), 1114\u20131126 (2018). https:\/\/doi.org\/10.1109\/TMI.2017.2787657","journal-title":"IEEE Trans. Med. Imaging"},{"key":"32_CR19","doi-asserted-by":"publisher","unstructured":"Jin, Y., et al.: Multi-task recurrent convolutional network with correlation loss for surgical video analysis. Med. Image Anal. 59 (2020). https:\/\/doi.org\/10.1016\/j.media.2019.101572","DOI":"10.1016\/j.media.2019.101572"},{"issue":"6","key":"32_CR20","doi-asserted-by":"publisher","first-page":"881","DOI":"10.1007\/s11548-016-1379-2","volume":"11","author":"D Kati\u0107","year":"2016","unstructured":"Kati\u0107, D., et al.: Bridging the gap between formal and experience-based knowledge for context-aware laparoscopy. Int. J. Comput. Assist. Radiol. Surg. 11(6), 881\u2013888 (2016). https:\/\/doi.org\/10.1007\/s11548-016-1379-2","journal-title":"Int. J. Comput. Assist. Radiol. Surg."},{"key":"32_CR21","unstructured":"Kay, W., et al.: The Kinetics Human Action Video Dataset (may 2017). http:\/\/arxiv.org\/1705.06950"},{"key":"32_CR22","doi-asserted-by":"publisher","unstructured":"Kuehne, H., Arslan, A., Serre, T.: The language of actions: Recovering the syntax and semantics of goal-directed human activities. In: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pp. 780\u2013787 (2014). https:\/\/doi.org\/10.1109\/CVPR.2014.105","DOI":"10.1109\/CVPR.2014.105"},{"key":"32_CR23","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 9992\u201310002 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"32_CR24","doi-asserted-by":"crossref","unstructured":"Padoy, N., Blum, T., Ahmadi, S.A., Feussner, H., Berger, M.O., Navab, N.: Statistical modeling and recognition of surgical workflow. Med. Image Anal. 16(3), 632\u2013641 (2012)","DOI":"10.1016\/j.media.2010.10.001"},{"key":"32_CR25","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"493","DOI":"10.1007\/978-3-030-00928-1_56","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2018","author":"M Paschali","year":"2018","unstructured":"Paschali, M., Conjeti, S., Navarro, F., Navab, N.: Generalizability vs. robustness: investigating medical imaging networks using adversarial examples. In: Frangi, A.F., Schnabel, J.A., Davatzikos, C., Alberola-L\u00f3pez, C., Fichtinger, G. (eds.) MICCAI 2018. LNCS, vol. 11070, pp. 493\u2013501. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-00928-1_56"},{"key":"32_CR26","unstructured":"Reinke, A., et al.: Metrics reloaded-a new recommendation framework for biomedical image analysis validation. In: Medical Imaging with Deep Learning (2022)"},{"key":"32_CR27","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"385","DOI":"10.1007\/978-3-030-59716-0_37","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2020","author":"A Sharghi","year":"2020","unstructured":"Sharghi, A., Haugerud, H., Oh, D., Mohareri, O.: Automatic operating room surgical activity recognition for robot-assisted surgery. In: Martel, A.L., et al. (eds.) MICCAI 2020. LNCS, vol. 12263, pp. 385\u2013395. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-59716-0_37"},{"key":"32_CR28","unstructured":"Smaira, L., Carreira, J., Noland, E., Clancy, E., Wu, A., Zisserman, A.: A Short Note on the Kinetics-700-2020 Human Action Dataset. arXiv (i) (2020). http:\/\/arxiv.org\/2010.10864"},{"key":"32_CR29","unstructured":"Srivastav, V., Issenhuth, T., Kadkhodamohammadi, A., de Mathelin, M., Gangi, A., Padoy, N.: MVOR: a multi-view RGB-D operating room dataset for 2D and 3D human pose estimation. In: MICCAI-LABELS, pp. 1\u201310 (2018). http:\/\/arxiv.org\/1808.08180"},{"key":"32_CR30","doi-asserted-by":"publisher","unstructured":"Twinanda, A.P., Shehata, S., Mutter, D., Marescaux, J., De Mathelin, M., Padoy, N.: EndoNet: a Deep Architecture for Recognition Tasks on Laparoscopic Videos. IEEE Trans. Med. Imaging 36, 86\u201397 (2017).https:\/\/doi.org\/10.1109\/TMI.2016.2593957","DOI":"10.1109\/TMI.2016.2593957"},{"key":"32_CR31","unstructured":"Twinanda, A.P., Padoy, N., Troccaz, M.J., Hager, G.: Vision-based Approaches for surgical activity recognition using laparoscopic and RBGD Videos. Thesis (7357) (2017), https:\/\/theses.hal.science\/tel-01557522\/document"},{"key":"32_CR32","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Proceedings of Advances in Neural Information Processing Systems 2017-Decem(Nips), pp. 5999\u20136009 (2017)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-25066-8_32","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,7]],"date-time":"2024-03-07T13:23:43Z","timestamp":1709817823000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-25066-8_32"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031250651","9783031250668"],"references-count":32,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-25066-8_32","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"18 February 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}