{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T17:28:47Z","timestamp":1770917327491,"version":"3.50.1"},"publisher-location":"Cham","reference-count":36,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031250651","type":"print"},{"value":"9783031250668","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-25066-8_22","type":"book-chapter","created":{"date-parts":[[2023,2,17]],"date-time":"2023-02-17T08:18:05Z","timestamp":1676621885000},"page":"406-421","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Bounded Future MS-TCN++ for\u00a0Surgical Gesture Recognition"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0800-5936","authenticated-orcid":false,"given":"Adam","family":"Goldbraikh","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0719-1327","authenticated-orcid":false,"given":"Netanell","family":"Avisdris","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9139-8082","authenticated-orcid":false,"given":"Carla M.","family":"Pugh","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1469-5985","authenticated-orcid":false,"given":"Shlomi","family":"Laufer","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,2,18]]},"reference":[{"issue":"1","key":"22_CR1","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1007\/s10107-003-0436-0","volume":"97","author":"S Albers","year":"2003","unstructured":"Albers, S.: Online algorithms: a survey. Math. Program. 97(1), 3\u201326 (2003)","journal-title":"Math. Program."},{"issue":"CSCW1","key":"22_CR2","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3449214","volume":"5","author":"I Avellino","year":"2021","unstructured":"Avellino, I., Nozari, S., Canlorbe, G., Jansen, Y.: Surgical video summarization: multifarious uses, summarization process and ad-hoc coordination. Proc. ACM Hum.-Comput. Interact. 5(CSCW1), 1\u201323 (2021)","journal-title":"Proc. ACM Hum.-Comput. Interact."},{"key":"22_CR3","doi-asserted-by":"publisher","first-page":"1497","DOI":"10.1007\/s11548-022-02691-3","volume":"17","author":"K Basiev","year":"2022","unstructured":"Basiev, K., Goldbraikh, A., Pugh, C.M., Laufer, S.: Open surgery tool classification and hand utilization using a multi-camera system. Int. J. Comput. Assisted Radiol. Surg. 17, 1497\u20131505 (2022)","journal-title":"Int. J. Comput. Assisted Radiol. Surg."},{"key":"22_CR4","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo Vadis, action recognition? A new model and the kinetics dataset. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6299\u20136308 (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"22_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"343","DOI":"10.1007\/978-3-030-59716-0_33","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2020","author":"T Czempiel","year":"2020","unstructured":"Czempiel, T., et al.: TeCNO: surgical phase recognition with multi-stage temporal convolutional networks. In: Martel, A.L., et al. (eds.) MICCAI 2020. LNCS, vol. 12263, pp. 343\u2013352. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-59716-0_33"},{"key":"22_CR6","doi-asserted-by":"crossref","unstructured":"Donahue, J., et al.: Long-term recurrent convolutional networks for visual recognition and description. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2625\u20132634 (2015)","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"22_CR7","doi-asserted-by":"publisher","first-page":"1325","DOI":"10.1111\/2041-210X.12584","volume":"7","author":"O Friard","year":"2016","unstructured":"Friard, O., Gamba, M.: Boris: a free, versatile open-source event-logging software for video\/audio coding and live observations. Methods Ecol. Evol. 7, 1325\u20131330 (2016). https:\/\/doi.org\/10.1111\/2041-210X.12584","journal-title":"Methods Ecol. Evol."},{"key":"22_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"467","DOI":"10.1007\/978-3-030-32254-0_52","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2019","author":"I Funke","year":"2019","unstructured":"Funke, I., Bodenstedt, S., Oehme, F., von Bechtolsheim, F., Weitz, J., Speidel, S.: Using 3D convolutional neural networks to learn spatiotemporal features for automatic surgical gesture recognition in video. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11768, pp. 467\u2013475. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32254-0_52"},{"issue":"7","key":"22_CR9","doi-asserted-by":"publisher","first-page":"1217","DOI":"10.1007\/s11548-019-01995-1","volume":"14","author":"I Funke","year":"2019","unstructured":"Funke, I., Mees, S.T., Weitz, J., Speidel, S.: Video-based surgical skill assessment using 3D convolutional neural networks. Int. J. Comput. Assist. Radiol. Surg. 14(7), 1217\u20131225 (2019)","journal-title":"Int. J. Comput. Assist. Radiol. Surg."},{"issue":"3","key":"22_CR10","doi-asserted-by":"publisher","first-page":"437","DOI":"10.1007\/s11548-022-02559-6","volume":"17","author":"A Goldbraikh","year":"2022","unstructured":"Goldbraikh, A., D\u2019Angelo, A.L., Pugh, C.M., Laufer, S.: Video-based fully automatic assessment of open surgery suturing skills. Int. J. Comput. Assist. Radiol. Surg. 17(3), 437\u2013448 (2022)","journal-title":"Int. J. Comput. Assist. Radiol. Surg."},{"key":"22_CR11","doi-asserted-by":"publisher","first-page":"965","DOI":"10.1007\/s11548-022-02615-1","volume":"17","author":"A Goldbraikh","year":"2022","unstructured":"Goldbraikh, A., Volk, T., Pugh, C.M., Laufer, S.: Using open surgery simulation kinematic data for tool and gesture recognition. Int. J. Comput. Assisted Radiol. Surg. 17, 965\u2013979 (2022)","journal-title":"Int. J. Comput. Assisted Radiol. Surg."},{"key":"22_CR12","doi-asserted-by":"publisher","first-page":"280","DOI":"10.1016\/j.patrec.2020.03.016","volume":"133","author":"C Huang","year":"2020","unstructured":"Huang, C., et al.: Sample imbalance disease classification model based on association rule feature selection. Pattern Recogn. Lett. 133, 280\u2013286 (2020)","journal-title":"Pattern Recogn. Lett."},{"key":"22_CR13","unstructured":"Hutter, F., Hoos, H., Leyton-Brown, K.: An efficient approach for assessing hyperparameter importance. In: International Conference on Machine Learning, pp. 754\u2013762. PMLR (2014)"},{"key":"22_CR14","doi-asserted-by":"crossref","unstructured":"Jacob, M.G., Li, Y.T., Wachs, J.P.: A gesture driven robotic scrub nurse. In: 2011 IEEE International Conference on Systems, Man, and Cybernetics, pp. 2039\u20132044. IEEE (2011)","DOI":"10.1109\/ICSMC.2011.6083972"},{"issue":"1","key":"22_CR15","doi-asserted-by":"publisher","first-page":"122","DOI":"10.1097\/SLA.0000000000002863","volume":"271","author":"JJ Jung","year":"2020","unstructured":"Jung, J.J., J\u00fcni, P., Lebovic, G., Grantcharov, T.: First-year analysis of the operating room black box study. Ann. Surg. 271(1), 122\u2013127 (2020)","journal-title":"Ann. Surg."},{"key":"22_CR16","doi-asserted-by":"crossref","unstructured":"Lea, C., Flynn, M.D., Vidal, R., Reiter, A., Hager, G.D.: Temporal convolutional networks for action segmentation and detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 156\u2013165 (2017)","DOI":"10.1109\/CVPR.2017.113"},{"key":"22_CR17","doi-asserted-by":"crossref","unstructured":"Lea, C., Vidal, R., Hager, G.D.: Learning convolutional action primitives for fine-grained action recognition. In: 2016 IEEE International Conference on Robotics and Automation (ICRA), pp. 1642\u20131649. IEEE (2016)","DOI":"10.1109\/ICRA.2016.7487305"},{"key":"22_CR18","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"47","DOI":"10.1007\/978-3-319-49409-8_7","volume-title":"Computer Vision \u2013 ECCV 2016 Workshops","author":"C Lea","year":"2016","unstructured":"Lea, C., Vidal, R., Reiter, A., Hager, G.D.: Temporal convolutional networks: a unified approach to action segmentation. In: Hua, G., J\u00e9gou, H. (eds.) ECCV 2016. LNCS, vol. 9915, pp. 47\u201354. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-49409-8_7"},{"key":"22_CR19","doi-asserted-by":"publisher","unstructured":"Li, S.J., AbuFarha, Y., Liu, Y., Cheng, M.M., Gall, J.: MS-TCN++: multi-stage temporal convolutional network for action segmentation. IEEE Trans. Pattern Anal. Mach. Intell. 1 (2020). https:\/\/doi.org\/10.1109\/TPAMI.2020.3021756","DOI":"10.1109\/TPAMI.2020.3021756"},{"issue":"2","key":"22_CR20","doi-asserted-by":"publisher","first-page":"521","DOI":"10.1007\/s11042-009-0353-1","volume":"46","author":"M Lux","year":"2010","unstructured":"Lux, M., Marques, O., Sch\u00f6ffmann, K., B\u00f6sz\u00f6rmenyi, L., Lajtai, G.: A novel tool for summarization of arthroscopic videos. Multimed. Tools Appl. 46(2), 521\u2013544 (2010)","journal-title":"Multimed. Tools Appl."},{"key":"22_CR21","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2021.102306","volume":"76","author":"L Maier-Hein","year":"2022","unstructured":"Maier-Hein, L., et al.: Surgical data science-from concepts toward clinical translation. Med. Image Anal. 76, 102306 (2022)","journal-title":"Med. Image Anal."},{"issue":"9","key":"22_CR22","doi-asserted-by":"publisher","first-page":"691","DOI":"10.1038\/s41551-017-0132-7","volume":"1","author":"L Maier-Hein","year":"2017","unstructured":"Maier-Hein, L., et al.: Surgical data science for next-generation interventions. Nat. Biomed. Eng. 1(9), 691\u2013696 (2017)","journal-title":"Nat. Biomed. Eng."},{"issue":"1","key":"22_CR23","doi-asserted-by":"publisher","first-page":"e93","DOI":"10.1097\/SLA.0000000000004736","volume":"274","author":"P Mascagni","year":"2021","unstructured":"Mascagni, P., et al.: A computer vision platform to automatically locate critical events in surgical videos: documenting safety in laparoscopic cholecystectomy. Ann. Surg. 274(1), e93\u2013e95 (2021)","journal-title":"Ann. Surg."},{"issue":"5","key":"22_CR24","doi-asserted-by":"publisher","first-page":"955","DOI":"10.1097\/SLA.0000000000004351","volume":"275","author":"P Mascagni","year":"2022","unstructured":"Mascagni, P., et al.: Artificial intelligence for surgical safety: automatic assessment of the critical view of safety in laparoscopic cholecystectomy using deep learning. Ann. Surg. 275(5), 955\u2013961 (2022)","journal-title":"Ann. Surg."},{"key":"22_CR25","doi-asserted-by":"crossref","unstructured":"Neimark, D., Bar, O., Zohar, M., Asselmann, D.: Video transformer network. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3163\u20133172 (2021)","DOI":"10.1109\/ICCVW54120.2021.00355"},{"issue":"2","key":"22_CR26","doi-asserted-by":"publisher","first-page":"82","DOI":"10.1080\/13645706.2019.1584116","volume":"28","author":"N Padoy","year":"2019","unstructured":"Padoy, N.: Machine and deep learning for workflow recognition during surgery. Minim. Invasive Ther. Allied Technol. 28(2), 82\u201390 (2019)","journal-title":"Minim. Invasive Ther. Allied Technol."},{"key":"22_CR27","doi-asserted-by":"crossref","unstructured":"Pandey, A., Wang, D.: TCNN: temporal convolutional neural network for real-time speech enhancement in the time domain. In: 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), ICASSP 2019, pp. 6875\u20136879. IEEE (2019)","DOI":"10.1109\/ICASSP.2019.8683634"},{"key":"22_CR28","doi-asserted-by":"crossref","unstructured":"Peddinti, V., Povey, D., Khudanpur, S.: A time delay neural network architecture for efficient modeling of long temporal contexts. In: Sixteenth Annual Conference of the International Speech Communication Association (2015)","DOI":"10.21437\/Interspeech.2015-647"},{"issue":"7","key":"22_CR29","doi-asserted-by":"publisher","first-page":"1111","DOI":"10.1007\/s11548-021-02388-z","volume":"16","author":"S Ramesh","year":"2021","unstructured":"Ramesh, S., et al.: Multi-task temporal convolutional networks for joint recognition of surgical phases and steps in gastric bypass procedures. Int. J. Comput. Assist. Radiol. Surg. 16(7), 1111\u20131119 (2021). https:\/\/doi.org\/10.1007\/s11548-021-02388-z","journal-title":"Int. J. Comput. Assist. Radiol. Surg."},{"issue":"3","key":"22_CR30","doi-asserted-by":"publisher","first-page":"333","DOI":"10.1007\/s43154-021-00055-4","volume":"2","author":"X Sun","year":"2021","unstructured":"Sun, X., Okamoto, J., Masamune, K., Muragaki, Y.: Robotic technology in operating rooms: a review. Curr. Robot. Rep. 2(3), 333\u2013341 (2021)","journal-title":"Curr. Robot. Rep."},{"key":"22_CR31","unstructured":"Tan, M., Le, Q.: EfficientNetV2: smaller models and faster training. In: International Conference on Machine Learning, pp. 10096\u201310106. PMLR (2021)"},{"issue":"4","key":"22_CR32","doi-asserted-by":"publisher","first-page":"1069","DOI":"10.1109\/TMI.2018.2878055","volume":"38","author":"AP Twinanda","year":"2018","unstructured":"Twinanda, A.P., Yengera, G., Mutter, D., Marescaux, J., Padoy, N.: RSDNet: learning to predict remaining surgery duration from laparoscopic videos without manual annotations. IEEE Trans. Med. Imaging 38(4), 1069\u20131078 (2018)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"22_CR33","doi-asserted-by":"publisher","first-page":"1155","DOI":"10.1109\/ACCESS.2017.2778011","volume":"6","author":"A Ullah","year":"2017","unstructured":"Ullah, A., Ahmad, J., Muhammad, K., Sajjad, M., Baik, S.W.: Action recognition in video sequences using deep bi-directional LSTM with CNN features. IEEE Access 6, 1155\u20131166 (2017)","journal-title":"IEEE Access"},{"key":"22_CR34","unstructured":"Yi, F., Wen, H., Jiang, T.: ASFormer: transformer for action segmentation. In: The British Machine Vision Conference (BMVC) (2021)"},{"key":"22_CR35","doi-asserted-by":"crossref","unstructured":"Yue-Hei Ng, J., Hausknecht, M., Vijayanarasimhan, S., Vinyals, O., Monga, R., Toderici, G.: Beyond short snippets: deep networks for video classification. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4694\u20134702 (2015)","DOI":"10.1109\/CVPR.2015.7299101"},{"key":"22_CR36","unstructured":"Zhang, B., Ghanem, A., Simes, A., Choi, H., Yoo, A., Min, A.: Swnet: surgical workflow recognition with deep convolutional network. In: Medical Imaging with Deep Learning, pp. 855\u2013869. PMLR (2021)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-25066-8_22","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,7]],"date-time":"2024-03-07T13:22:08Z","timestamp":1709817728000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-25066-8_22"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031250651","9783031250668"],"references-count":36,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-25066-8_22","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"18 February 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}