{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,16]],"date-time":"2026-04-16T16:38:38Z","timestamp":1776357518045,"version":"3.51.2"},"publisher-location":"Cham","reference-count":42,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031919787","type":"print"},{"value":"9783031919794","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-91979-4_22","type":"book-chapter","created":{"date-parts":[[2025,5,31]],"date-time":"2025-05-31T19:07:18Z","timestamp":1748718438000},"page":"295-311","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["MCUBench: A Benchmark of\u00a0Tiny Object Detectors on\u00a0MCUs"],"prefix":"10.1007","author":[{"given":"Sudhakar","family":"Sah","sequence":"first","affiliation":[]},{"given":"Darshan C.","family":"Ganji","sequence":"additional","affiliation":[]},{"given":"Matteo","family":"Grimaldi","sequence":"additional","affiliation":[]},{"given":"Ravish","family":"Kumar","sequence":"additional","affiliation":[]},{"given":"Alexander","family":"Hoffman","sequence":"additional","affiliation":[]},{"given":"Honnesh","family":"Rohmetra","sequence":"additional","affiliation":[]},{"given":"Ehsan","family":"Saboori","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,12]]},"reference":[{"key":"22_CR1","unstructured":"Agarap, A.F.: Deep learning using rectified linear units (ReLU). arXiv:1803.08375v2 (2019)"},{"key":"22_CR2","unstructured":"Banbury, C., et al.: MLPerf tiny benchmark (2021). https:\/\/arxiv.org\/abs\/2106.07597"},{"key":"22_CR3","unstructured":"Bochkovskiy, A., Wang, C.Y., Liao, H.Y.M.: YOLOv4: Optimal speed and accuracy of object detection. arXiv preprint arXiv:2004.10934 (2020)"},{"key":"22_CR4","doi-asserted-by":"crossref","unstructured":"Diwan, T., Anirudh, G., Tembhurne, J.V.: Object detection using YOLO: challenges, architectural successors, datasets and applications. Multimedia Tools Appl. 82(6), 9243\u20139275 (2023)","DOI":"10.1007\/s11042-022-13644-y"},{"key":"22_CR5","unstructured":"Everingham, M., Van\u00a0Gool, L., Williams, C.K.I., Winn, J., Zisserman, A.: The PASCAL Visual Object Classes Challenge 2012 (VOC2012) Results (2012). http:\/\/www.pascal-network.org\/challenges\/VOC\/voc2012\/workshop\/index.html"},{"issue":"2","key":"22_CR6","doi-asserted-by":"publisher","first-page":"16","DOI":"10.3390\/cryptography6020016","volume":"6","author":"H Feng","year":"2022","unstructured":"Feng, H., Mu, G., Zhong, S., Zhang, P., Yuan, T.: Benchmark analysis of YOLO performance on edge intelligence devices. Cryptography 6(2), 16 (2022)","journal-title":"Cryptography"},{"key":"22_CR7","unstructured":"Gal-On, S., Levy, M.: Exploring coremark a benchmark maximizing simplicity and efficacy. (2012). https:\/\/www.eembc.org\/techlit\/articles\/coremark-whitepaper.pdf"},{"key":"22_CR8","unstructured":"Ge, Z., Liu, S., Wang, F., Li, Z., Sun, J.: YOLOx: Exceeding yolo series in 2021. arXiv preprint arXiv:2107.08430 (2021)"},{"key":"22_CR9","unstructured":"Hyodo, K.: Tools to convert ONNX files (NCHW) to TensorFlow format (NHWC) (2024). https:\/\/github.com\/PINTO0309\/onnx2tf. Accessed 16 Jul 2024"},{"key":"22_CR10","doi-asserted-by":"publisher","unstructured":"Jocher, G.: YOLOv5 by Ultralytics (2020). https:\/\/doi.org\/10.5281\/zenodo.3908559, https:\/\/github.com\/ultralytics\/yolov5","DOI":"10.5281\/zenodo.3908559"},{"key":"22_CR11","unstructured":"Jocher, G., Chaurasia, A., Qiu, J.: YOLO by Ultralytics (2023). https:\/\/github.com\/ultralytics\/ultralytics"},{"key":"22_CR12","doi-asserted-by":"crossref","unstructured":"Kaur, P., Khehra, B.S., Mavi, E.B.S.: Data augmentation for object detection: a review. In: 2021 IEEE International Midwest Symposium on Circuits and Systems (MWSCAS), pp. 537\u2013543. IEEE (2021)","DOI":"10.1109\/MWSCAS47672.2021.9531849"},{"key":"22_CR13","unstructured":"Labs, S.: Performance benchmark of YOLO v5, v7 and v8. https:\/\/www.stereolabs.com\/blog\/performance-of-yolo-v5-v7-and-v8\/ (2023)"},{"issue":"3","key":"22_CR14","doi-asserted-by":"publisher","first-page":"82","DOI":"10.1109\/MPRV.2017.2940968","volume":"16","author":"ND Lane","year":"2017","unstructured":"Lane, N.D., Bhattacharya, S., Mathur, A., Georgiev, P., Forlivesi, C., Kawsar, F.: Squeezing deep learning into mobile and embedded devices. IEEE Pervasive Comput. 16(3), 82\u201388 (2017)","journal-title":"IEEE Pervasive Comput."},{"key":"22_CR15","doi-asserted-by":"crossref","unstructured":"Lazarevich, I., Grimaldi, M., Kumar, R., Mitra, S., Khan, S., Sah, S.: YOLOBench: Benchmarking efficient object detectors on embedded systems (2023). https:\/\/arxiv.org\/abs\/2307.13901","DOI":"10.1109\/ICCVW60793.2023.00126"},{"key":"22_CR16","unstructured":"Li, C., et al.: YOLOv6 v3. 0: A full-scale reloading. arXiv preprint arXiv:2301.05586 (2023)"},{"key":"22_CR17","first-page":"21002","volume":"33","author":"X Li","year":"2020","unstructured":"Li, X., et al.: Generalized focal loss: learning qualified and distributed bounding boxes for dense object detection. Adv. Neural. Inf. Process. Syst. 33, 21002\u201321012 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"22_CR18","unstructured":"Lin, T.Y., et al.: Microsoft COCO: Common objects in context (2014). http:\/\/arxiv.org\/abs\/1405.0312, cite arxiv:1405.0312Comment: 1) updated annotation pipeline description and figures; 2) added new section describing datasets splits; 3) updated author list"},{"key":"22_CR19","unstructured":"Lite, T.: TensorFlow lite for mobile and edge (2024). https:\/\/onnx.ai\/. Accessed 16 Jul 2024"},{"key":"22_CR20","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"21","DOI":"10.1007\/978-3-319-46448-0_2","volume-title":"Computer Vision \u2013 ECCV 2016","author":"W Liu","year":"2016","unstructured":"Liu, W., et al.: SSD: single shot MultiBox detector. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9905, pp. 21\u201337. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46448-0_2"},{"key":"22_CR21","unstructured":"Liu, W., et al.: SSD: single shot MultiBox detector. CoRR abs\/1512.02325 (2015). http:\/\/arxiv.org\/abs\/1512.02325"},{"key":"22_CR22","unstructured":"Lv, W., et al.: DETRs beat YOLOs on real-time object detection. arXiv preprint arXiv:2304.08069 (2023)"},{"key":"22_CR23","unstructured":"ONNX: Open neural network exchange (2024). https:\/\/onnx.ai\/. Accessed 16 Jul 2024"},{"key":"22_CR24","unstructured":"Rath, S., Gupta, V.: Performance comparison of yolo object detection models - an intensive study. https:\/\/learnopencv.com\/performance-comparison-of-yolo-models\/ (2022)"},{"key":"22_CR25","unstructured":"Reddi, V.J., et al.: MLPERF inference benchmark (2020). https:\/\/arxiv.org\/abs\/1911.02549"},{"key":"22_CR26","doi-asserted-by":"crossref","unstructured":"Redmon, J., Divvala, S., Girshick, R., Farhadi, A.: You only look once: unified, real-time object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 779\u2013788 (2016)","DOI":"10.1109\/CVPR.2016.91"},{"key":"22_CR27","unstructured":"Redmon, J., Farhadi, A.: YOLO9000: better, faster, stronger. CoRR abs\/1612.08242 (2016). http:\/\/arxiv.org\/abs\/1612.08242"},{"key":"22_CR28","unstructured":"Redmon, J., Farhadi, A.: YOLOv3: An incremental improvement. arXiv preprint arXiv:1804.02767 (2018)"},{"key":"22_CR29","doi-asserted-by":"crossref","unstructured":"Stefan\u00a0Elfwing, Eiji\u00a0Uchibe, K.D.: Sigmoid-weighted linear units for neural network function approximation in reinforcement learning. arXiv:1702.03118v3 (2017)","DOI":"10.1016\/j.neunet.2017.12.012"},{"key":"22_CR30","unstructured":"STMicroelectronics: Discovery kit for IoT node with STM32U5 series (2024). https:\/\/www.st.com\/en\/evaluation-tools\/b-u585i-iot02a.html. Accessed 16 Jul 2024"},{"key":"22_CR31","unstructured":"STMicroelectronics: Discovery kit with STM32H573IIK3Q MCU (2024). https:\/\/www.st.com\/en\/evaluation-tools\/stm32h573i-dk.html. Accessed 16 Jul 2024"},{"key":"22_CR32","unstructured":"STMicroelectronics: Discovery kit with STM32L4R9AI MCU (2024). https:\/\/www.st.com\/en\/evaluation-tools\/32l4r9idiscovery.html. Accessed 16 Jul 2024"},{"key":"22_CR33","unstructured":"STMicroelectronics: St edge AI developer cloud (2024). https:\/\/stm32ai.st.com\/st-edge-ai-developer-cloud\/. Accessed 16 Jul 2024"},{"key":"22_CR34","unstructured":"STMicroelectronics: ST X-CUBE-AI (2024). https:\/\/www.st.com\/en\/embedded-software\/x-cube-ai.html. Accessed 16 Jul 2024"},{"key":"22_CR35","unstructured":"STMicroelectronics: STM32 NUCLEO (2024). https:\/\/www.st.com\/en\/evaluation-tools\/nucleo-h743zi.html. Aaccessed 16 Jul 2024"},{"key":"22_CR36","unstructured":"STMicroelectronics: STM32F469I discovery kit (2024). https:\/\/www.st.com\/en\/evaluation-tools\/32f469idiscovery.html. Accessed 16 Jul 2024"},{"key":"22_CR37","unstructured":"STMicroelectronics: STM32F769I discovery kit (2024). https:\/\/www.st.com\/en\/evaluation-tools\/32f769idiscovery.html. Accessed 16 Jul 2024"},{"key":"22_CR38","unstructured":"STMicroelectronics: STM32H747I discovery kit (2024). https:\/\/www.st.com\/en\/evaluation-tools\/stm32h747i-disco.html. Accessed 16 Jul 2024"},{"key":"22_CR39","unstructured":"Terven, J., Cordova-Esparza, D.: A comprehensive review of YOLO: From YOLOv1 and beyond. arXiv preprint arXiv:2304.00501 (2023)"},{"key":"22_CR40","unstructured":"Torelli, P., Bangale, M.: Measuring inference performance of machine-learning frameworks on edge-class devices with the MLMark benchmark., https:\/\/www.eembc.org\/techlit\/articles\/MLMARK-WHITEPAPER-FINAL-1.pdf"},{"key":"22_CR41","doi-asserted-by":"crossref","unstructured":"Wang, C.Y., Bochkovskiy, A., Liao, H.Y.M.: YOLOv7: trainable bag-of-freebies sets new state-of-the-art for real-time object detectors. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7464\u20137475 (2023)","DOI":"10.1109\/CVPR52729.2023.00721"},{"key":"22_CR42","doi-asserted-by":"crossref","unstructured":"Zhu, J., Feng, H., Zhong, S., Yuan, T.: Performance analysis of real-time object detection on Jetson device. In: 2022 IEEE\/ACIS 22nd International Conference on Computer and Information Science (ICIS), pp. 156\u2013161. IEEE (2022)","DOI":"10.1109\/ICIS54925.2022.9882480"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-91979-4_22","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,31]],"date-time":"2025-05-31T19:07:30Z","timestamp":1748718450000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-91979-4_22"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031919787","9783031919794"],"references-count":42,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-91979-4_22","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"12 May 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}