{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T16:10:17Z","timestamp":1774541417859,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":40,"publisher":"ACM","license":[{"start":{"date-parts":[[2018,10,15]],"date-time":"2018-10-15T00:00:00Z","timestamp":1539561600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["1617627, 1565604, 1632051"],"award-info":[{"award-number":["1617627, 1565604, 1632051"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2018,10,15]]},"DOI":"10.1145\/3241539.3241559","type":"proceedings-article","created":{"date-parts":[[2018,10,16]],"date-time":"2018-10-16T13:04:36Z","timestamp":1539695076000},"page":"115-127","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":226,"title":["NestDNN"],"prefix":"10.1145","author":[{"given":"Biyi","family":"Fang","sequence":"first","affiliation":[{"name":"Michigan State University, East Lansing, MI, USA"}]},{"given":"Xiao","family":"Zeng","sequence":"additional","affiliation":[{"name":"Michigan State University, East Lansing, MI, USA"}]},{"given":"Mi","family":"Zhang","sequence":"additional","affiliation":[{"name":"Michigan State University, East Lansing, MI, USA"}]}],"member":"320","published-online":{"date-parts":[[2018,10,15]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"2016. Monsoon Power Monitor. https:\/\/www.msoon.com\/ LabEquipment\/PowerMonitor\/.  2016. Monsoon Power Monitor. https:\/\/www.msoon.com\/ LabEquipment\/PowerMonitor\/."},{"key":"e_1_3_2_1_2_1","unstructured":"2016. This Powerful Wearable Is a Life-Changer for the Blind. https:\/\/blogs.nvidia.com\/blog\/2016\/10\/27\/ wearable-device-for-blind-visually-impaired\/.  2016. This Powerful Wearable Is a Life-Changer for the Blind. https:\/\/blogs.nvidia.com\/blog\/2016\/10\/27\/ wearable-device-for-blind-visually-impaired\/."},{"key":"e_1_3_2_1_3_1","unstructured":"2017. An On-device Deep Neural Network for Face Detection. https: \/\/machinelearning.apple.com\/2017\/11\/16\/face-detection.html.  2017. An On-device Deep Neural Network for Face Detection. https: \/\/machinelearning.apple.com\/2017\/11\/16\/face-detection.html."},{"key":"e_1_3_2_1_4_1","unstructured":"2018. Amazon DeepLens. https:\/\/aws.amazon.com\/deeplens\/.  2018. Amazon DeepLens. https:\/\/aws.amazon.com\/deeplens\/."},{"key":"e_1_3_2_1_5_1","unstructured":"2018. Google Clips. https:\/\/store.google.com\/us\/product\/google_clips.  2018. Google Clips. https:\/\/store.google.com\/us\/product\/google_clips."},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/2307849.2307862"},{"key":"e_1_3_2_1_7_1","volume-title":"Binarized neural networks: Training deep neural networks with weights and activations constrained to+ 1 or-1. arXiv preprint arXiv:1602.02830","author":"Courbariaux Matthieu","year":"2016"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/3131672.3131693"},{"key":"e_1_3_2_1_9_1","unstructured":"Yiwen Guo Anbang Yao and Yurong Chen. 2016. Dynamic network surgery for efficient dnns. In NIPS. 1379--1387.   Yiwen Guo Anbang Yao and Yurong Chen. 2016. Dynamic network surgery for efficient dnns. In NIPS. 1379--1387."},{"key":"e_1_3_2_1_10_1","volume-title":"Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149","author":"Han Song","year":"2015"},{"key":"e_1_3_2_1_11_1","unstructured":"Song Han Jeff Pool John Tran and William Dally. 2015. Learning both weights and connections for efficient neural network. In NIPS. 1135--1143.   Song Han Jeff Pool John Tran and William Dally. 2015. Learning both weights and connections for efficient neural network. In NIPS. 1135--1143."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/2906388.2906396"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_1_14_1","volume-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861","author":"Howard Andrew G","year":"2017"},{"key":"e_1_3_2_1_15_1","volume-title":"IEEE conference on computer vision and pattern recognition","volume":"1","author":"Huang Gao"},{"key":"e_1_3_2_1_16_1","volume-title":"What makes ImageNet good for transfer learning? arXiv:1608.08614","author":"Huh Minyoung","year":"2016"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/3081333.3081360"},{"key":"e_1_3_2_1_19_1","unstructured":"Alex Krizhevsky Ilya Sutskever and Geoffrey E Hinton. 2012. Imagenet classification with deep convolutional neural networks. In NIPS. 1097-- 1105.   Alex Krizhevsky Ilya Sutskever and Geoffrey E Hinton. 2012. Imagenet classification with deep convolutional neural networks. In NIPS. 1097-- 1105."},{"key":"e_1_3_2_1_20_1","volume-title":"DeepX: A software accelerator for low-power deep learning inference on mobile devices","author":"Lane Nicholas D"},{"key":"e_1_3_2_1_21_1","volume-title":"Deep learning. Nature 521, 7553","author":"LeCun Yann","year":"2015"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2015.7301352"},{"key":"e_1_3_2_1_23_1","volume-title":"Pruning filters for efficient convnets. arXiv preprint arXiv:1608.08710","author":"Li Hao","year":"2016"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/2742647.2742663"},{"key":"e_1_3_2_1_25_1","unstructured":"Ji Lin Yongming Rao Jiwen Lu and Jie Zhou. 2017. Runtime neural pruning. In NIPS. 2181--2191.  Ji Lin Yongming Rao Jiwen Lu and Jie Zhou. 2017. Runtime neural pruning. In NIPS. 2181--2191."},{"key":"e_1_3_2_1_26_1","volume-title":"Thinet: A filter level pruning method for deep neural network compression. arXiv preprint arXiv:1707.06342","author":"Luo Jian-Hao","year":"2017"},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1145\/3081333.3081359"},{"key":"e_1_3_2_1_28_1","volume-title":"Pruning convolutional neural networks for resource efficient transfer learning. arXiv preprint arXiv:1611.06440","author":"Molchanov Pavlo","year":"2016"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/3081333.3081347"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.222"},{"key":"e_1_3_2_1_31_1","volume-title":"A survey of unmanned aerial vehicles (UAV) for traffic surveillance. Department of computer science and engineering","author":"Puri Anuj"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"e_1_3_2_1_33_1","volume-title":"Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556","author":"Simonyan Karen","year":"2014"},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2012.02.016"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2017.2761740"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.220"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1145\/3081333.3081336"},{"key":"e_1_3_2_1_39_1","unstructured":"Haoyu Zhang Ganesh Ananthanarayanan Peter Bodik Matthai Philipose Paramvir Bahl and Michael J Freedman. 2017. Live Video Analytics at Scale with Approximation and Delay-Tolerance.. In NSDI.   Haoyu Zhang Ganesh Ananthanarayanan Peter Bodik Matthai Philipose Paramvir Bahl and Michael J Freedman. 2017. Live Video Analytics at Scale with Approximation and Delay-Tolerance.. In NSDI."},{"key":"e_1_3_2_1_40_1","volume-title":"Places: A 10 million Image Database for Scene Recognition","author":"Zhou Bolei","year":"2017"},{"key":"e_1_3_2_1_41_1","unstructured":"Bolei Zhou Agata Lapedriza Jianxiong Xiao Antonio Torralba and Aude Oliva. 2014. Learning deep features for scene recognition using places database. In NIPS. 487--495.   Bolei Zhou Agata Lapedriza Jianxiong Xiao Antonio Torralba and Aude Oliva. 2014. Learning deep features for scene recognition using places database. In NIPS. 487--495."}],"event":{"name":"MobiCom '18: The 24th Annual International Conference on Mobile Computing and Networking","location":"New Delhi India","acronym":"MobiCom '18","sponsor":["SIGMOBILE ACM Special Interest Group on Mobility of Systems, Users, Data and Computing"]},"container-title":["Proceedings of the 24th Annual International Conference on Mobile Computing and Networking"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3241539.3241559","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3241539.3241559","content-type":"application\/pdf","content-version":"vor","intended-application":"syndication"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3241539.3241559","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T00:57:37Z","timestamp":1750208257000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3241539.3241559"}},"subtitle":["Resource-Aware Multi-Tenant On-Device Deep Learning for Continuous Mobile Vision"],"short-title":[],"issued":{"date-parts":[[2018,10,15]]},"references-count":40,"alternative-id":["10.1145\/3241539.3241559","10.1145\/3241539"],"URL":"https:\/\/doi.org\/10.1145\/3241539.3241559","relation":{},"subject":[],"published":{"date-parts":[[2018,10,15]]},"assertion":[{"value":"2018-10-15","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}