{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T15:44:16Z","timestamp":1775144656392,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":53,"publisher":"ACM","funder":[{"name":"Toyota Motor North America InfoTech Labs"},{"DOI":"10.13039\/100006754","name":"Army Research Laboratory","doi-asserted-by":"publisher","award":["W911NF-23-2-0224"],"award-info":[{"award-number":["W911NF-23-2-0224"]}],"id":[{"id":"10.13039\/100006754","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,12,3]]},"DOI":"10.1145\/3769102.3770622","type":"proceedings-article","created":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T16:00:41Z","timestamp":1764777641000},"page":"1-16","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["PlatformX: An End-to-End Transferable Platform for Energy-Efficient Neural Architecture Search"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-9396-5383","authenticated-orcid":false,"given":"Xiaolong","family":"Tu","sequence":"first","affiliation":[{"name":"Computer Science, Georgia State University, Atlanta, GA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4162-1423","authenticated-orcid":false,"given":"Dawei","family":"Chen","sequence":"additional","affiliation":[{"name":"InfoTech Labs, Toyota Motor North America R&amp;D, Mountain View, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8291-5025","authenticated-orcid":false,"given":"Kyungtae","family":"Han","sequence":"additional","affiliation":[{"name":"InfoTech Labs, Toyota Motor North America R&amp;D, Mountain View, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5487-5273","authenticated-orcid":false,"given":"Onur","family":"Altintas","sequence":"additional","affiliation":[{"name":"InfoTech Labs, Toyota Motor North America R&amp;D, Mountain View, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8732-6200","authenticated-orcid":false,"given":"Haoxin","family":"Wang","sequence":"additional","affiliation":[{"name":"Computer Science, Georgia State University, Atlanta, GA, USA"}]}],"member":"320","published-online":{"date-parts":[[2025,12,3]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"IEEE Conference on Computer Vision and Pattern Recognition","author":"Wu Bichen","year":"2019","unstructured":"Bichen Wu, Xiaoliang Dai, et al. FBNet: Hardware-aware efficient convnet design via differentiable neural architecture search. In IEEE Conference on Computer Vision and Pattern Recognition, 2019."},{"key":"e_1_3_2_1_2_1","volume-title":"International Conference on Learning Representations","author":"Dong Xuanyi","year":"2020","unstructured":"Xuanyi Dong and Yi Yang. NAS-Bench-201: Extending the scope of reproducible neural architecture search. In International Conference on Learning Representations, 2020."},{"key":"e_1_3_2_1_3_1","volume-title":"International Conference on Learning Representations","author":"Hongxu","year":"2021","unstructured":"Hongxu Li et al. HW-NASBench: Hardware-aware neural architecture search benchmark. In International Conference on Learning Representations, 2021."},{"key":"e_1_3_2_1_4_1","volume-title":"International Conference on Machine Learning","author":"Tan Mingxing","year":"2019","unstructured":"Mingxing Tan and Quoc V. Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International Conference on Machine Learning, 2019."},{"key":"e_1_3_2_1_5_1","volume-title":"Advances in Neural Information Processing Systems","author":"Shishuo","year":"2020","unstructured":"Shishuo Gu et al. Brp-nas: Prediction-based nas via bias-reduced pruning. In Advances in Neural Information Processing Systems, 2020."},{"key":"e_1_3_2_1_6_1","first-page":"93","volume-title":"2023 IEEE\/ACM Symposium on Edge Computing (SEC)","author":"Tu Xiaolong","unstructured":"Xiaolong Tu, Anik Mallik, Dawei Chen, Kyungtae Han, Onur Altintas, Haoxin Wang, and Jiang Xie. Unveiling energy efficiency in deep learning: Measurement, prediction, and scoring across edge devices. In 2023 IEEE\/ACM Symposium on Edge Computing (SEC), pages 80\u201393. IEEE, 2023."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/TGCN.2020.3041666"},{"key":"e_1_3_2_1_8_1","volume-title":"IEEE International Conference on Acoustics, Speech and Signal Processing","author":"Haibo","year":"2022","unstructured":"Haibo Yang et al. Knas: A training-free neural architecture search for on-device speech recognition. In IEEE International Conference on Acoustics, Speech and Signal Processing, 2022."},{"key":"e_1_3_2_1_9_1","volume-title":"ACM\/IEEE International Symposium on Low Power Electronics and Design","author":"Canziani Marco","year":"2019","unstructured":"Marco Canziani, Ivan Hubara, and Elias Jarlebring. TEA-DNN: The quest for time-energy-accuracy co-optimised deep neural networks. In ACM\/IEEE International Symposium on Low Power Electronics and Design, 2019."},{"key":"e_1_3_2_1_10_1","volume-title":"International Conference on Machine Learning","author":"Hieu","year":"2018","unstructured":"Hieu Pham et al. Efficient neural architecture search via parameter sharing. In International Conference on Machine Learning, 2018."},{"key":"e_1_3_2_1_11_1","volume-title":"IEEE Conference on Computer Vision and Pattern Recognition","author":"Barret","year":"2018","unstructured":"Barret Zoph et al. Learning transferable architectures for scalable image recognition. In IEEE Conference on Computer Vision and Pattern Recognition, 2018."},{"key":"e_1_3_2_1_12_1","first-page":"4789","volume-title":"Proc. AAAI Conference on Artificial Intelligence","author":"Real Esteban","year":"2019","unstructured":"Esteban Real, Alok Aggarwal, Yanping Huang, and Quoc V Le. Regularized evolution for image classifier architecture search. In Proc. AAAI Conference on Artificial Intelligence, pages 4780\u20134789, 2019."},{"key":"e_1_3_2_1_13_1","volume-title":"International Conference on Machine Learning","author":"Chu Hang","year":"2021","unstructured":"Hang Chu, Zizheng Pan, et al. Tunas: Simplifying the search space of weight-sharing neural architecture search. In International Conference on Machine Learning, 2021."},{"key":"e_1_3_2_1_14_1","volume-title":"International Conference on Machine Learning","author":"Mellor Joe","year":"2021","unstructured":"Joe Mellor, Jack Turner, Amos Storkey, and Elliot J. Crowley. Neural architecture search without training. In International Conference on Machine Learning, 2021."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00293"},{"key":"e_1_3_2_1_16_1","volume-title":"International Conference on Learning Representations","author":"Cai Han","year":"2019","unstructured":"Han Cai, Ligeng Zhu, and Song Han. Proxylessnas: Direct neural architecture search on target task and hardware. In International Conference on Learning Representations, 2019."},{"key":"e_1_3_2_1_17_1","first-page":"1477","volume-title":"21st USENIX Symposium on Networked Systems Design and Implementation (NSDI 24)","author":"Feng Chengquan","year":"2024","unstructured":"Chengquan Feng, Li Lyna Zhang, Yuanchi Liu, Jiahang Xu, Chengruidong Zhang, Zhiyuan Wang, Ting Cao, Mao Yang, and Haisheng Tan. {LitePred}: Transferable and scalable latency prediction for {Hardware-Aware} neural architecture search. In 21st USENIX Symposium on Networked Systems Design and Implementation (NSDI 24), pages 1463\u20131477, 2024."},{"key":"e_1_3_2_1_18_1","volume-title":"International Conference on Learning Representations","author":"Cai Han","year":"2020","unstructured":"Han Cai, Chuang Gan, and Song Han. Once for all: Train one network and specialize it for efficient deployment. In International Conference on Learning Representations, 2020."},{"key":"e_1_3_2_1_19_1","volume-title":"Advances in Neural Information Processing Systems","author":"Hanxiao","year":"2021","unstructured":"Hanxiao Liu et al. Pi-nas: Predictive iterative neural architecture search. In Advances in Neural Information Processing Systems, 2021."},{"key":"e_1_3_2_1_20_1","volume-title":"CE-NAS: An end-to-end carbon-efficient neural architecture search framework. arXiv preprint arXiv:2406.01414","author":"Zhao Yiyang","year":"2024","unstructured":"Yiyang Zhao, Yunzhuo Liu, Bo Jiang, and Tian Guo. CE-NAS: An end-to-end carbon-efficient neural architecture search framework. arXiv preprint arXiv:2406.01414, 2024."},{"key":"e_1_3_2_1_21_1","volume-title":"Deepen2023: Energy datasets for edge artificial intelligence. arXiv preprint arXiv:2312.00103","author":"Tu Xiaolong","year":"2023","unstructured":"Xiaolong Tu, Anik Mallik, Haoxin Wang, and Jiang Xie. Deepen2023: Energy datasets for edge artificial intelligence. arXiv preprint arXiv:2312.00103, 2023."},{"key":"e_1_3_2_1_22_1","first-page":"12","volume-title":"Proceedings of the 26th International Workshop on Mobile Computing Systems and Applications","author":"Tu Xiaolong","year":"2025","unstructured":"Xiaolong Tu, Dawei Chen, Kyungtae Han, Onur Altintas, and Haoxin Wang. Greenauto: An automated platform for sustainable ai model design on edge devices. In Proceedings of the 26th International Workshop on Mobile Computing Systems and Applications, pages 7\u201312, 2025."},{"key":"e_1_3_2_1_23_1","volume-title":"Aienergy: An energy benchmark for ai-empowered mobile and iot devices","author":"Tu Xiaolong","year":"2025","unstructured":"Xiaolong Tu, Anik Mallik, Haoxin Wang, and Jiang Xie. Aienergy: An energy benchmark for ai-empowered mobile and iot devices. 2025."},{"key":"e_1_3_2_1_24_1","volume-title":"ACM International Conference on Future Energy Systems","author":"Boubouh Karim","year":"2023","unstructured":"Karim Boubouh and Robert Basmadjian. Powerprofiler: Monitoring energy consumption of machine learning algorithms on android mobile devices. In ACM International Conference on Future Energy Systems, 2023."},{"key":"e_1_3_2_1_25_1","volume-title":"IEEE","author":"Mallik Anik","year":"2023","unstructured":"Anik Mallik, Haoxin Wang, Jiang Xie, Dawei Chen, and Kyungtae Han. Epam: A predictive energy model for mobile ai. In Icc 2023-ieee international conference on communications, pages 954\u2013959. IEEE, 2023."},{"key":"e_1_3_2_1_26_1","volume-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861","author":"Howard Andrew G","year":"2017","unstructured":"Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861, 2017."},{"key":"e_1_3_2_1_27_1","volume-title":"Proxylessnas: Direct neural architecture search on target task and hardware. arXiv preprint arXiv:1812.00332","author":"Cai Han","year":"2018","unstructured":"Han Cai, Ligeng Zhu, and Song Han. Proxylessnas: Direct neural architecture search on target task and hardware. arXiv preprint arXiv:1812.00332, 2018."},{"key":"e_1_3_2_1_28_1","first-page":"300","volume-title":"Proceedings of the European conference on computer vision (ECCV)","author":"Yang Tien-Ju","year":"2018","unstructured":"Tien-Ju Yang, Andrew Howard, Bo Chen, Xiao Zhang, Alec Go, Mark Sandler, Vivienne Sze, and Hartwig Adam. Netadapt: Platform-aware neural network adaptation for mobile applications. In Proceedings of the European conference on computer vision (ECCV), pages 285\u2013300, 2018."},{"key":"e_1_3_2_1_29_1","volume-title":"Advances in Neural Information Processing Systems","author":"Lin Ji","year":"2020","unstructured":"Ji Lin, Wei-Ming Chen, Yujun Lin, John Cohn, Chuang Gan, and Song Han. Mcunet: Tiny deep learning on iot devices. In Advances in Neural Information Processing Systems, 2020."},{"key":"e_1_3_2_1_30_1","first-page":"6","volume-title":"2019 IEEE\/ACM International Symposium on Low Power Electronics and Design (ISLPED)","author":"Cai Lile","unstructured":"Lile Cai, Anne-Maelle Barneche, Arthur Herbout, Chuan Sheng Foo, Jie Lin, Vijay Ramaseshan Chandrasekhar, and Mohamed M Sabry Aly. Tea-dnn: the quest for time-energy-accuracy co-optimized deep neural networks. In 2019 IEEE\/ACM International Symposium on Low Power Electronics and Design (ISLPED), pages 1\u20136. IEEE, 2019."},{"key":"e_1_3_2_1_31_1","volume-title":"Brp-nas: Prediction-based nas using gcns. Advances in neural information processing systems, 33:10480\u201310490","author":"Dudziak Lukasz","year":"2020","unstructured":"Lukasz Dudziak, Thomas Chau, Mohamed Abdelfattah, Royson Lee, Hyeji Kim, and Nicholas Lane. Brp-nas: Prediction-based nas using gcns. Advances in neural information processing systems, 33:10480\u201310490, 2020."},{"key":"e_1_3_2_1_32_1","first-page":"1388","volume-title":"Proc. IEEE INFOCOM","author":"Wang Haoxin","year":"2020","unstructured":"Haoxin Wang and Jiang Xie. User preference based energy-aware mobile ar system with edge computing. In Proc. IEEE INFOCOM, pages 1379\u20131388, 2020."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2022.3179943"},{"key":"e_1_3_2_1_34_1","first-page":"6","volume-title":"2017 IEEE International Conference on Communications (ICC)","author":"Wang Haoxin","unstructured":"Haoxin Wang, Jiang Xie, and Tao Han. V-handoff: A practical energy efficient handoff for 802.11 infrastructure networks. In 2017 IEEE International Conference on Communications (ICC), pages 1\u20136. IEEE, 2017."},{"key":"e_1_3_2_1_35_1","volume-title":"Proc. ICLR","author":"Dong Xuanyi","year":"2020","unstructured":"Xuanyi Dong and Yi Yang. NAS-Bench-201: Extending the scope of reproducible neural architecture search. In Proc. ICLR, 2020."},{"key":"e_1_3_2_1_36_1","first-page":"7598","volume-title":"Proc. International conference on machine learning","author":"Mellor Joe","unstructured":"Joe Mellor, Jack Turner, Amos Storkey, and Elliot J Crowley. Neural architecture search without training. In Proc. International conference on machine learning, pages 7588\u20137598. PMLR, 2021."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1080\/23311916.2018.1502242"},{"key":"e_1_3_2_1_38_1","first-page":"7","volume-title":"Journal of Membrane Computing","author":"Kang Shida","year":"2024","unstructured":"Shida Kang, Kaiwen Li, and Rui Wang. A survey on pareto front learning for multi-objective optimization. Journal of Membrane Computing, pages 1\u20137, 2024."},{"key":"e_1_3_2_1_39_1","volume-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky Alex","year":"2009","unstructured":"Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009."},{"key":"e_1_3_2_1_40_1","unstructured":"TFLite Benchmark. https:\/\/ai.google.dev\/edge\/litert\/models\/measurement. Accessed on Oct. 2024."},{"key":"e_1_3_2_1_41_1","unstructured":"Monsoon. https:\/\/www.msoon.com\/high-voltage-power-monitor. Accessed on Oct. 2024."},{"key":"e_1_3_2_1_42_1","volume-title":"Darts: Differentiable architecture search. arXiv preprint arXiv:1806.09055","author":"Liu Hanxiao","year":"2018","unstructured":"Hanxiao Liu, Karen Simonyan, and Yiming Yang. Darts: Differentiable architecture search. arXiv preprint arXiv:1806.09055, 2018."},{"key":"e_1_3_2_1_43_1","first-page":"7","volume-title":"2019 IEEE\/ACM International Conference on Computer-Aided Design (ICCAD)","author":"Gong Chengyue","unstructured":"Chengyue Gong, Zixuan Jiang, Dilin Wang, Yibo Lin, Qiang Liu, and David Z Pan. Mixed precision neural architecture search for energy efficient deep learning. In 2019 IEEE\/ACM International Conference on Computer-Aided Design (ICCAD), pages 1\u20137. IEEE, 2019."},{"key":"e_1_3_2_1_44_1","volume-title":"ICLR","author":"Hongxu","year":"2021","unstructured":"Hongxu Li et al. Hw-nasbench: Hardware-aware neural architecture search benchmark. In ICLR, 2021."},{"key":"e_1_3_2_1_45_1","volume-title":"Ce-nas: An end-to-end carbon-efficient neural architecture search framework. arXiv preprint arXiv:2406.01414","author":"Zhao Yiyang","year":"2024","unstructured":"Yiyang Zhao, Yunzhuo Liu, Bo Jiang, and Tian Guo. Ce-nas: An end-to-end carbon-efficient neural architecture search framework. arXiv preprint arXiv:2406.01414, 2024."},{"key":"e_1_3_2_1_46_1","unstructured":"pyJoules. https:\/\/github.com\/powerapi-ng\/pyJoules\/. Accessed on Oct. 2024."},{"key":"e_1_3_2_1_47_1","unstructured":"PowerAPI. https:\/\/powerapi.org\/. Accessed on Oct. 2024."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"e_1_3_2_1_49_1","volume-title":"Light-weight, general-purpose, and mobile-friendly vision transformer. arxiv","author":"Mehta Sachin","year":"2021","unstructured":"Sachin Mehta and Mohammad Rastegari. Mobilevit: Light-weight, general-purpose, and mobile-friendly vision transformer. arxiv 2021. arXiv preprint arXiv:2110.02178, 2021."},{"key":"e_1_3_2_1_50_1","volume-title":"et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023."},{"issue":"3","key":"e_1_3_2_1_51_1","first-page":"3","article-title":"Phi-2: The surprising power of small language models","volume":"1","author":"Javaheripi Mojan","year":"2023","unstructured":"Mojan Javaheripi, S\u00e9bastien Bubeck, Marah Abdin, Jyoti Aneja, Sebastien Bubeck, Caio C\u00e9sar Teodoro Mendes, Weizhu Chen, Allie Del Giorno, Ronen Eldan, Sivakanth Gopi, et al. Phi-2: The surprising power of small language models. Microsoft Research Blog, 1(3):3, 2023.","journal-title":"Microsoft Research Blog"},{"key":"e_1_3_2_1_52_1","first-page":"594","volume-title":"13th USENIX Symposium on Operating Systems Design and Implementation (OSDI 18)","author":"Chen Tianqi","year":"2018","unstructured":"Tianqi Chen, Thierry Moreau, Ziheng Jiang, Lianmin Zheng, Eddie Yan, Haichen Shen, Meghan Cowan, Leyuan Wang, Yuwei Hu, Luis Ceze, et al. {TVM}: An automated {End-to-End} optimizing compiler for deep learning. In 13th USENIX Symposium on Operating Systems Design and Implementation (OSDI 18), pages 578\u2013594, 2018."},{"key":"e_1_3_2_1_53_1","volume-title":"Carboncp: Carbon-aware dnn partitioning with conformal prediction for sustainable edge intelligence. arXiv preprint arXiv:2404.16970","author":"Ke Hongyu","year":"2024","unstructured":"Hongyu Ke, Wanxin Jin, and Haoxin Wang. Carboncp: Carbon-aware dnn partitioning with conformal prediction for sustainable edge intelligence. arXiv preprint arXiv:2404.16970, 2024."}],"event":{"name":"SEC '25: Tenth ACM\/IEEE Symposium on Edge Computing","location":"the Hilton Arlington National Landing Arlington VA USA","acronym":"SEC '25","sponsor":["SIGMOBILE ACM Special Interest Group on Mobility of Systems, Users, Data and Computing","IEEE Computer Society"]},"container-title":["Proceedings of the Tenth ACM\/IEEE Symposium on Edge Computing"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3769102.3770622","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T16:05:30Z","timestamp":1764777930000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3769102.3770622"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,3]]},"references-count":53,"alternative-id":["10.1145\/3769102.3770622","10.1145\/3769102"],"URL":"https:\/\/doi.org\/10.1145\/3769102.3770622","relation":{},"subject":[],"published":{"date-parts":[[2025,12,3]]},"assertion":[{"value":"2025-12-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}