{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,10]],"date-time":"2025-09-10T23:11:19Z","timestamp":1757545879184,"version":"3.40.3"},"publisher-location":"Cham","reference-count":56,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031198328"},{"type":"electronic","value":"9783031198335"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19833-5_13","type":"book-chapter","created":{"date-parts":[[2022,11,4]],"date-time":"2022-11-04T00:40:30Z","timestamp":1667522430000},"page":"213-229","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["Delta Distillation for\u00a0Efficient Video Processing"],"prefix":"10.1007","author":[{"given":"Amirhossein","family":"Habibian","sequence":"first","affiliation":[]},{"given":"Haitam","family":"Ben Yahia","sequence":"additional","affiliation":[]},{"given":"Davide","family":"Abati","sequence":"additional","affiliation":[]},{"given":"Efstratios","family":"Gavves","sequence":"additional","affiliation":[]},{"given":"Fatih","family":"Porikli","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,4]]},"reference":[{"key":"13_CR1","doi-asserted-by":"crossref","unstructured":"Chai, Y.: Patchwork: a patch-wise attention network for efficient object detection and segmentation in video streams. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00351"},{"key":"13_CR2","unstructured":"Chen, W., Gong, X., Liu, X., Zhang, Q., Li, Y., Wang, Z.: FasterSeg: searching for faster real-time semantic segmentation. In: ICLR (2020)"},{"key":"13_CR3","doi-asserted-by":"crossref","unstructured":"Chen, Y., Cao, Y., Hu, H., Wang, L.: Memory enhanced global-local aggregation for video object detection. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01035"},{"key":"13_CR4","doi-asserted-by":"crossref","unstructured":"Cordts, M., et al.: The cityscapes dataset for semantic urban scene understanding. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.350"},{"key":"13_CR5","doi-asserted-by":"crossref","unstructured":"Dai, X., et al.: General instance distillation for object detection. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00775"},{"key":"13_CR6","unstructured":"Denil, M., Shakibi, B., Dinh, L., Ranzato, M., de Freitas, N.: Predicting parameters in deep learning. In: NeurIPS (2013)"},{"key":"13_CR7","doi-asserted-by":"crossref","unstructured":"Gou, J., Yu, B., Maybank, S.J., Tao, D.: Knowledge distillation: a survey. In: IJCV (2021)","DOI":"10.1007\/s11263-021-01453-z"},{"key":"13_CR8","doi-asserted-by":"crossref","unstructured":"Guo, Q., et al.: Online knowledge distillation via collaborative learning. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01103"},{"key":"13_CR9","unstructured":"Gupta, S., Agrawal, A., Gopalakrishnan, K., Narayanan, P.: Deep learning with limited numerical precision. In: ICML (2015)"},{"key":"13_CR10","doi-asserted-by":"crossref","unstructured":"Habibian, A., Abati, D., Cohen, T.S., Bejnordi, B.E.: Skip-convolutions for efficient video processing. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00272"},{"key":"13_CR11","doi-asserted-by":"crossref","unstructured":"He, Y., Zhang, X., Sun, J.: Channel pruning for accelerating very deep neural networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.155"},{"key":"13_CR12","unstructured":"Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531 (2015)"},{"key":"13_CR13","unstructured":"Hong, Y., Pan, H., Sun, W., Jia, Y., et al.: Deep dual-resolution networks for real-time and accurate semantic segmentation of road scenes. arXiv preprint arXiv:2101.06085 (2021)"},{"key":"13_CR14","doi-asserted-by":"crossref","unstructured":"Hu, P., Caba, F., Wang, O., Lin, Z., Sclaroff, S., Perazzi, F.: Temporally distributed networks for fast video semantic segmentation. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00884"},{"key":"13_CR15","unstructured":"Hu, P., et al.: Real-time semantic segmentation with fast attention. In: ICRA (2020)"},{"key":"13_CR16","doi-asserted-by":"crossref","unstructured":"Jacob, B., et al.: Quantization and training of neural networks for efficient integer-arithmetic-only inference. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00286"},{"key":"13_CR17","doi-asserted-by":"crossref","unstructured":"Jaderberg, M., Vedaldi, A., Zisserman, A.: Speeding up convolutional neural networks with low rank expansions. In: BMVC (2014)","DOI":"10.5244\/C.28.88"},{"key":"13_CR18","doi-asserted-by":"crossref","unstructured":"Jain, S., Wang, X., Gonzalez, J.E.: Accel: a corrective fusion network for efficient semantic segmentation on video. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00907"},{"key":"13_CR19","unstructured":"Jang, E., Gu, S., Poole, B.: Categorical reparameterization with gumbel-softmax. In: ICLR (2017)"},{"key":"13_CR20","unstructured":"Krishnamoorthi, R.: Quantizing deep convolutional networks for efficient inference: a whitepaper. arXiv preprint arXiv:1806.08342 (2018)"},{"key":"13_CR21","unstructured":"Lan, X., Zhu, X., Gong, S., et al.: Knowledge distillation by on-the-fly native ensemble. In: NeurIPS (2018)"},{"key":"13_CR22","unstructured":"Lei, C., Xing, Y., Chen, Q.: Blind video temporal consistency via deep video prior. In: NeurIPS (2020)"},{"key":"13_CR23","unstructured":"Li, H., Kadav, A., Durdanovic, I., Samet, H., Graf, H.P.: Pruning filters for efficient convnets. arXiv preprint arXiv:1608.08710 (2017)"},{"key":"13_CR24","doi-asserted-by":"crossref","unstructured":"Li, Y., Shi, J., Lin, D.: Low-latency video semantic segmentation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00628"},{"key":"13_CR25","unstructured":"Liu, M., Zhu, M.: Mobile video object detection with temporally-aware feature maps. In: CVPR (2018)"},{"key":"13_CR26","unstructured":"Liu, M., Zhu, M., White, M., Li, Y., Kalenichenko, D.: Looking fast and slow: memory-guided mobile video object detection. arXiv preprint arXiv:1903.10172 (2019)"},{"key":"13_CR27","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"352","DOI":"10.1007\/978-3-030-58607-2_21","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y Liu","year":"2020","unstructured":"Liu, Y., Shen, C., Yu, C., Wang, J.: Efficient semantic video segmentation with per-frame inference. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12355, pp. 352\u2013368. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58607-2_21"},{"key":"13_CR28","unstructured":"Maddison, C.J., Mnih, A., Teh, Y.W.: The concrete distribution: a continuous relaxation of discrete random variables. In: ICLR (2017)"},{"key":"13_CR29","unstructured":"Mao, H., Zhu, S., Han, S., Dally, W.J.: PatchNet-short-range template matching for efficient video processing. arXiv preprint arXiv:2103.07371 (2021)"},{"key":"13_CR30","doi-asserted-by":"crossref","unstructured":"Moons, B., et al.: Distilling optimal neural networks: rapid search in diverse spaces. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01201"},{"key":"13_CR31","doi-asserted-by":"crossref","unstructured":"Nagel, M., van Baalen, M., Blankevoort, T., Welling, M.: Data-free quantization through weight equalization and bias correction. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00141"},{"key":"13_CR32","doi-asserted-by":"crossref","unstructured":"Orsic, M., Kreso, I., Bevandic, P., Segvic, S.: In defense of pre-trained imagenet architectures for real-time semantic segmentation of road-driving images. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.01289"},{"key":"13_CR33","unstructured":"Rebol, M., Kn\u00f6belreiter, P.: Frame-to-frame consistent semantic segmentation. In: Joint Austrian Computer Vision And Robotics Workshop (ACVRW) (2020)"},{"key":"13_CR34","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: NeurIPS (2015)"},{"key":"13_CR35","doi-asserted-by":"crossref","unstructured":"Romera, E., Alvarez, J.M., Bergasa, L.M., Arroyo, R.: ERFNet: efficient residual factorized convnet for real-time semantic segmentation. IEEE Trans. Intell. Transp. Syst. (2017)","DOI":"10.1109\/IVS.2017.7995966"},{"key":"13_CR36","unstructured":"Romero, A., Ballas, N., Kahou, S.E., Chassang, A., Gatta, C., Bengio, Y.: FitNets: hints for thin deep nets. In: ICLR (2015)"},{"key":"13_CR37","doi-asserted-by":"crossref","unstructured":"Russakovsky, O., et al.: ImageNet large scale visual recognition challenge. In: IJCV (2015)","DOI":"10.1007\/s11263-015-0816-y"},{"key":"13_CR38","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.C.: MobileNetv 2: inverted residuals and linear bottlenecks. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"13_CR39","doi-asserted-by":"crossref","unstructured":"Shrivastava, A., Gupta, A., Girshick, R.: Training region-based object detectors with online hard example mining. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.89"},{"key":"13_CR40","doi-asserted-by":"crossref","unstructured":"Sibechi, R., Booij, O., Baka, N., Bloem, P.: Exploiting temporality for semi-supervised video segmentation. In: ICCV Workshops (2019)","DOI":"10.1109\/ICCVW.2019.00122"},{"key":"13_CR41","unstructured":"Tan, M., Le, Q.: EfficientNet: rethinking model scaling for convolutional neural networks. In: ICML (2019)"},{"key":"13_CR42","doi-asserted-by":"crossref","unstructured":"Tan, M., Pang, R., Le, Q.V.: EfficientDET: scalable and efficient object detection. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01079"},{"key":"13_CR43","unstructured":"Tao, A., Sapra, K., Catanzaro, B.: Hierarchical multi-scale attention for semantic segmentation. arXiv preprint arXiv:2005.10821 (2020)"},{"key":"13_CR44","unstructured":"Wang, J., et al.: Deep high-resolution representation learning for visual recognition. TPAMI (2019)"},{"key":"13_CR45","doi-asserted-by":"crossref","unstructured":"Wang, T., Yuan, L., Zhang, X., Feng, J.: Distilling object detectors with fine-grained feature imitation. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00507"},{"key":"13_CR46","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: LedNet: a lightweight encoder-decoder network for real-time semantic segmentation. In: ICIP (2019)","DOI":"10.1109\/ICIP.2019.8803154"},{"key":"13_CR47","doi-asserted-by":"crossref","unstructured":"Wu, G., Gong, S.: Peer collaborative learning for online knowledge distillation. In: AAAI (2021)","DOI":"10.1609\/aaai.v35i12.17234"},{"key":"13_CR48","doi-asserted-by":"crossref","unstructured":"Yu, C., Gao, C., Wang, J., Yu, G., Shen, C., Sang, N.: BiseNet v2: bilateral network with guided aggregation for real-time semantic segmentation. In: IJCV (2021)","DOI":"10.1007\/s11263-021-01515-2"},{"key":"13_CR49","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"334","DOI":"10.1007\/978-3-030-01261-8_20","volume-title":"Computer Vision \u2013 ECCV 2018","author":"C Yu","year":"2018","unstructured":"Yu, C., Wang, J., Peng, C., Gao, C., Yu, G., Sang, N.: BiSeNet: bilateral segmentation network for real-time semantic segmentation. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11217, pp. 334\u2013349. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01261-8_20"},{"key":"13_CR50","doi-asserted-by":"crossref","unstructured":"Zhang, X., Zou, J., He, K., Sun, J.: Accelerating very deep convolutional networks for classification and detection. TPAMI (2016)","DOI":"10.1109\/TPAMI.2015.2502579"},{"key":"13_CR51","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Xiang, T., Hospedales, T.M., Lu, H.: Deep mutual learning. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00454"},{"key":"13_CR52","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"418","DOI":"10.1007\/978-3-030-01219-9_25","volume-title":"Computer Vision \u2013 ECCV 2018","author":"H Zhao","year":"2018","unstructured":"Zhao, H., Qi, X., Shen, X., Shi, J., Jia, J.: ICNet for real-time semantic segmentation on high-resolution images. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11207, pp. 418\u2013434. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01219-9_25"},{"key":"13_CR53","doi-asserted-by":"crossref","unstructured":"Zhao, H., Shi, J., Qi, X., Wang, X., Jia, J.: Pyramid scene parsing network. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.660"},{"key":"13_CR54","doi-asserted-by":"crossref","unstructured":"Zhu, X., Dai, J., Zhu, X., Wei, Y., Yuan, L.: Towards high performance video object detection for mobiles. arXiv preprint arXiv:1804.05830 (2018)","DOI":"10.1109\/CVPR.2018.00753"},{"key":"13_CR55","doi-asserted-by":"crossref","unstructured":"Zhu, X., Wang, Y., Dai, J., Yuan, L., Wei, Y.: Flow-guided feature aggregation for video object detection. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.52"},{"key":"13_CR56","doi-asserted-by":"crossref","unstructured":"Zhu, X., Xiong, Y., Dai, J., Yuan, L., Wei, Y.: Deep feature flow for video recognition. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.441"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19833-5_13","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T15:34:57Z","timestamp":1673278497000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19833-5_13"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198328","9783031198335"],"references-count":56,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19833-5_13","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}