{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T20:18:29Z","timestamp":1764793109787,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":68,"publisher":"ACM","funder":[{"name":"National Key R&D Program of China","award":["2023YFB4502400"],"award-info":[{"award-number":["2023YFB4502400"]}]},{"name":"China NSF grant","award":["62322206, 62025204, 62132018, U2268204, 62272307, 62372296"],"award-info":[{"award-number":["62322206, 62025204, 62132018, U2268204, 62272307, 62372296"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,8,3]]},"DOI":"10.1145\/3711896.3736823","type":"proceedings-article","created":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T13:30:13Z","timestamp":1754055013000},"page":"673-684","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["A Two-Stage Data Selection Framework for Data-Efficient Model Training on Edge Devices"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0333-6418","authenticated-orcid":false,"given":"Chen","family":"Gong","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-7523-8334","authenticated-orcid":false,"given":"Rui","family":"Xing","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5094-5331","authenticated-orcid":false,"given":"Zhenzhe","family":"Zheng","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0965-9058","authenticated-orcid":false,"given":"Fan","family":"Wu","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]}],"member":"320","published-online":{"date-parts":[[2025,8,3]]},"reference":[{"key":"e_1_3_2_2_1_1","unstructured":"2019. Siri - Apple. https:\/\/www.apple.com\/siri\/\/."},{"key":"e_1_3_2_2_2_1","unstructured":"2024. Google Lens - Search What You See. https:\/\/lens.google\/."},{"key":"e_1_3_2_2_3_1","unstructured":"2024. Microsoft SwiftKey Keyboard. https:\/\/www.microsoft.com\/en-us\/swiftkey."},{"key":"e_1_3_2_2_4_1","volume-title":"Annual Conference on Machine Learning and Systems (MLSys). 374-388","author":"Bonawitz Kallista A.","year":"2019","unstructured":"Kallista A. Bonawitz, Hubert Eichner, Wolfgang Grieskamp, Dzmitry Huba, Alex Ingerman, Vladimir Ivanov, Chlo\u00e9 Kiddon, Stefano Mazzocchi, Brendan McMahan, Timon Van Overveldt, David Petrou, Daniel Ramage, and Jason Roselander. 2019. Towards Federated Learning at Scale: System Design. In Annual Conference on Machine Learning and Systems (MLSys). 374-388."},{"key":"e_1_3_2_2_5_1","first-page":"441","article-title":"CaSMoS: A Framework for Learning Candidate Selection Models over Structured Queries and Documents","author":"Borisyuk Fedor","year":"2016","unstructured":"Fedor Borisyuk, Krishnaram Kenthapadi, David Stein, and Bo Zhao. 2016. CaSMoS: A Framework for Learning Candidate Selection Models over Structured Queries and Documents. In SIGKDD. 441-450.","journal-title":"SIGKDD."},{"key":"e_1_3_2_2_6_1","first-page":"31","article-title":"Towards ubiquitous learning: A first measurement of ondevice training performance","author":"Cai Dongqi","year":"2021","unstructured":"Dongqi Cai, Qipeng Wang, Yuanqiang Liu, Yunxin Liu, Shangguang Wang, and Mengwei Xu. 2021. Towards ubiquitous learning: A first measurement of ondevice training performance. In EMDL. 31-36.","journal-title":"EMDL."},{"key":"e_1_3_2_2_7_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Coleman Cody","year":"2020","unstructured":"Cody Coleman, Christopher Yeh, Stephen Mussmann, Baharan Mirzasoleiman, Peter Bailis, Percy Liang, Jure Leskovec, and Matei Zaharia. 2020. Selection via Proxy: Efficient Data Selection for Deep Learning. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_2_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/2959100.2959190"},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"publisher","DOI":"10.1109\/PerComWorkshops53856.2022.9767442"},{"key":"e_1_3_2_2_10_1","unstructured":"Apple Developer. 2023. Maximum build file sizes. https:\/\/developer.apple.com\/help\/app-store-connect\/reference\/maximum-build-file-sizes\/."},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/3178876.3186183"},{"key":"e_1_3_2_2_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/3178876.3186183"},{"key":"e_1_3_2_2_13_1","volume-title":"GIO: Gradient Information Optimization for Training Dataset Selection. In International Conference on Learning Representations (ICLR).","author":"Everaert Dante","year":"2024","unstructured":"Dante Everaert and Christopher Potts. 2024. GIO: Gradient Information Optimization for Training Dataset Selection. In International Conference on Learning Representations (ICLR)."},{"volume-title":"Data Shapley: Equitable Valuation of Data for Machine Learning. In International Conference on Machine Learning (ICML). 2242-2251","author":"Ghorbani Amirata","key":"e_1_3_2_2_14_1","unstructured":"Amirata Ghorbani and James Y. Zou. 2019. Data Shapley: Equitable Valuation of Data for Machine Learning. In International Conference on Machine Learning (ICML). 2242-2251."},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3498361.3539765"},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNET.2024.3365534"},{"key":"e_1_3_2_2_17_1","volume-title":"Delta: A Cloud-assisted Data Enrichment Framework for On-Device Continual Learning. In International Conference on Mobile Computing and Networking (MobiCom). 1408-1423","author":"Gong Chen","year":"2024","unstructured":"Chen Gong, Zhenzhe Zheng, FanWu, Xiaofeng Jia, and Guihai Chen. 2024. Delta: A Cloud-assisted Data Enrichment Framework for On-Device Continual Learning. In International Conference on Mobile Computing and Networking (MobiCom). 1408-1423."},{"key":"e_1_3_2_2_18_1","volume-title":"Online Data Selection for Federated Learning with Limited Storage. In ACM The Web Conference (WWW). 3044-3055","author":"Gong Chen","year":"2023","unstructured":"Chen Gong, Zhenzhe Zheng, Fan Wu, Yunfeng Shao, Bingshuai Li, and Guihai Chen. 2023. To Store or Not? Online Data Selection for Federated Learning with Limited Storage. In ACM The Web Conference (WWW). 3044-3055."},{"key":"e_1_3_2_2_19_1","unstructured":"Google. [n. d.]. Android Developers: APK Expansion Files. https:\/\/developer.android.com\/google\/play\/expansion-files."},{"key":"e_1_3_2_2_20_1","volume-title":"large minibatch sgd: Training imagenet in 1 hour. arXiv:1706.02677","author":"Goyal Priya","year":"2017","unstructured":"Priya Goyal, Piotr Doll\u00e1r, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. 2017. Accurate, large minibatch sgd: Training imagenet in 1 hour. arXiv:1706.02677 (2017)."},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_2_22_1","volume-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861","author":"Howard Andrew G","year":"2017","unstructured":"Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. 2017. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861 (2017)."},{"key":"e_1_3_2_2_23_1","volume-title":"ElasticTrainer: Speeding Up On-Device Training with Runtime Elastic Tensor Selection. In ACM International Conference on Mobile Systems, Applications, and Services (MobiSys). 56-69","author":"Huang Kai","year":"2023","unstructured":"Kai Huang, Boyuan Yang, and Wei Gao. 2023. ElasticTrainer: Speeding Up On-Device Training with Runtime Elastic Tensor Selection. In ACM International Conference on Mobile Systems, Applications, and Services (MobiSys). 56-69."},{"key":"e_1_3_2_2_24_1","unstructured":"HUAWEI. 2023. HUAWEI WiFi AX3 Pro. https:\/\/consumer.huawei.com\/en\/routers\/ax3-pro\/specs\/."},{"key":"e_1_3_2_2_25_1","volume-title":"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and < 0.5 MB model size. arXiv:1602.07360","author":"Iandola Forrest N","year":"2016","unstructured":"Forrest N Iandola, Song Han, Matthew W Moskewicz, Khalid Ashraf, William J Dally, and Kurt Keutzer. 2016. SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and < 0.5 MB model size. arXiv:1602.07360 (2016)."},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"publisher","DOI":"10.1145\/3498361.3538932"},{"key":"e_1_3_2_2_27_1","unstructured":"Angelos Katharopoulos and Fran\u00e7ois Fleuret. 2017. Biased Importance Sampling for Deep Neural Network Training. (2017). arXiv:1706.00043"},{"key":"e_1_3_2_2_28_1","volume-title":"Not All Samples Are Created Equal: Deep Learning with Importance Sampling. In International Conference on Machine Learning (ICML). 2530-2539","author":"Katharopoulos Angelos","year":"2018","unstructured":"Angelos Katharopoulos and Fran\u00e7ois Fleuret. 2018. Not All Samples Are Created Equal: Deep Learning with Importance Sampling. In International Conference on Machine Learning (ICML). 2530-2539."},{"key":"e_1_3_2_2_29_1","unstructured":"Alex Krizhevsky Geoffrey Hinton et al. 2009. Learning multiple layers of features from tiny images. (2009)."},{"key":"e_1_3_2_2_30_1","unstructured":"Alex Krizhevsky Ilya Sutskever and Geoffrey E Hinton. 2012. ImageNet Classification with Deep Convolutional Neural Networks. In NeurIPS."},{"key":"e_1_3_2_2_31_1","doi-asserted-by":"publisher","DOI":"10.1145\/3495243.3517017"},{"key":"e_1_3_2_2_32_1","first-page":"1271","article-title":"Camel","author":"Li Yiming","year":"2022","unstructured":"Yiming Li, Yanyan Shen, and Lei Chen. 2022. Camel: Managing Data for Efficient Stream Learning. In SIGMOD. 1271-1285.","journal-title":"Managing Data for Efficient Stream Learning. In SIGMOD."},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.1145\/3432208"},{"key":"e_1_3_2_2_34_1","doi-asserted-by":"publisher","DOI":"10.1145\/3696410.3714796"},{"key":"e_1_3_2_2_35_1","unstructured":"Brendan McMahan Eider Moore Daniel Ramage Seth Hampson and Blaise Ag\u00fcera y Arcas. [n. d.]. Communication-Efficient Learning of Deep Networks from Decentralized Data. In Artificial Intelligence and Statistics (AISTATS)."},{"key":"e_1_3_2_2_36_1","volume-title":"Coresets for Data-efficient Training of Machine Learning Models. In International Conference on Machine Learning (ICML). 6950-6960","author":"Mirzasoleiman Baharan","year":"2020","unstructured":"Baharan Mirzasoleiman, Jeff A. Bilmes, and Jure Leskovec. 2020. Coresets for Data-efficient Training of Machine Learning Models. In International Conference on Machine Learning (ICML). 6950-6960."},{"key":"e_1_3_2_2_37_1","unstructured":"NVIDIA. 2023. Jetson Nano Developer Kit. https:\/\/developer.nvidia.com\/embedded\/jetson-nano-developer-kit."},{"key":"e_1_3_2_2_38_1","unstructured":"Official Journal of the European Union. 2021. General data protection regulation. https:\/\/gdpr-info.eu\/."},{"key":"e_1_3_2_2_39_1","unstructured":"OpenAI. 2023. ChatGPT General FAQ. https:\/\/help.openai.com\/en\/articles\/6783457-chatgpt-general-faq."},{"key":"e_1_3_2_2_40_1","doi-asserted-by":"publisher","DOI":"10.1145\/3458864.3467681"},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM48880.2022.9796748"},{"key":"e_1_3_2_2_42_1","volume-title":"Approximation theory of the MLP model in neural networks. Acta numerica 8","author":"Pinkus Allan","year":"1999","unstructured":"Allan Pinkus. 1999. Approximation theory of the MLP model in neural networks. Acta numerica 8 (1999), 143-195."},{"key":"e_1_3_2_2_43_1","volume-title":"International Conference on Machine Learning (ICML). 17848-17869","author":"Pooladzandi Omead","year":"2022","unstructured":"Omead Pooladzandi, David Davini, and Baharan Mirzasoleiman. 2022. Adaptive second order coresets for data-efficient machine learning. In International Conference on Machine Learning (ICML). 17848-17869."},{"volume-title":"IEEE \/ CVF Computer Vision and Pattern Recognition Conference (CVPR). 5533-5542","author":"Rebuffi Sylvestre-Alvise","key":"e_1_3_2_2_44_1","unstructured":"Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, and Christoph H. Lampert. 2017. iCaRL: Incremental Classifier and Representation Learning. In IEEE \/ CVF Computer Vision and Pattern Recognition Conference (CVPR). 5533-5542."},{"key":"e_1_3_2_2_45_1","volume-title":"A stochastic approximation method. The Annals of Mathematical Statistics","author":"Robbins Herbert","year":"1951","unstructured":"Herbert Robbins and Sutton Monro. 1951. A stochastic approximation method. The Annals of Mathematical Statistics (1951), 400-407."},{"key":"e_1_3_2_2_46_1","unstructured":"Burr Settles. 2009. Active learning literature survey. (2009)."},{"key":"e_1_3_2_2_47_1","unstructured":"Vatsal Shah Xiaoxia Wu and Sujay Sanghavi. [n. d.]. Choosing the Sample with Lowest Loss makes SGD Robust. In Artificial Intelligence and Statistics (AISTATS)."},{"key":"e_1_3_2_2_48_1","volume-title":"Mastering the game of Go without human knowledge. Nature 550, 7676","author":"Silver David","year":"2017","unstructured":"David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, Yutian Chen, Timothy P. Lillicrap, Fan Hui, Laurent Sifre, George van den Driessche, Thore Graepel, and Demis Hassabis. 2017. Mastering the game of Go without human knowledge. Nature 550, 7676 (2017), 354-359."},{"key":"e_1_3_2_2_49_1","first-page":"1","article-title":"Characterizing the performance of accelerated Jetson edge devices for training deep learning models","volume":"6","author":"Sai Anuroop Kesanapalli Prashanthi SK","year":"2022","unstructured":"Prashanthi SK, Sai Anuroop Kesanapalli, and Yogesh Simmhan. 2022. Characterizing the performance of accelerated Jetson edge devices for training deep learning models. SIGMETRICS 6, 3 (2022), 1-26.","journal-title":"SIGMETRICS"},{"volume-title":"Increase the Batch Size. In International Conference on Learning Representations (ICLR).","author":"Smith Samuel L.","key":"e_1_3_2_2_50_1","unstructured":"Samuel L. Smith, Pieter-Jan Kindermans, Chris Ying, and Quoc V. Le. 2018. Don't Decay the Learning Rate, Increase the Batch Size. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_2_51_1","volume-title":"Revisiting Unreasonable Effectiveness of Data in Deep Learning Era. In International Conference on Computer Vision (ICCV). 843-852","author":"Sun Chen","year":"2017","unstructured":"Chen Sun, Abhinav Shrivastava, Saurabh Singh, and Abhinav Gupta. 2017. Revisiting Unreasonable Effectiveness of Data in Deep Learning Era. In International Conference on Computer Vision (ICCV). 843-852."},{"key":"e_1_3_2_2_52_1","volume-title":"andWei Dong","author":"Sun Tong","year":"2025","unstructured":"Tong Sun, Bowen Jiang, Hailong Lin, Borui Li, Yixiao Teng, Yi Gao, andWei Dong. 2025. TensorShield: Safeguarding On-Device Inference by Shielding Critical DNN Tensors with TEE. arXiv:2505.22735 [cs.CR] https:\/\/arxiv.org\/abs\/2505.22735"},{"key":"e_1_3_2_2_53_1","volume-title":"FedSS: Federated learning with smart selection of clients. arXiv preprint arXiv:2207.04569","author":"Tahir Ammar","year":"2022","unstructured":"Ammar Tahir, Yongzhou Chen, and Prashanti Nilayam. 2022. FedSS: Federated learning with smart selection of clients. arXiv preprint arXiv:2207.04569 (2022)."},{"key":"e_1_3_2_2_54_1","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM48880.2022.9796929"},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"publisher","DOI":"10.1145\/3447993.3448625"},{"key":"e_1_3_2_2_56_1","volume-title":"ACM International Conference on Mobile Systems, Applications, and Services (MobiSys). 450-463","author":"Xu Mengwei","year":"2022","unstructured":"QipengWang, Mengwei Xu, Chao Jin, Xinran Dong, Jinliang Yuan, Xin Jin, Gang Huang, Yunxin Liu, and Xuanzhe Liu. 2022. Melon: breaking the memory wall for resource-efficient on-device machine learning. In ACM International Conference on Mobile Systems, Applications, and Services (MobiSys). 450-463."},{"key":"e_1_3_2_2_57_1","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM41043.2020.9155284"},{"key":"e_1_3_2_2_58_1","volume-title":"Speech commands: A dataset for limited-vocabulary speech recognition. arXiv preprint arXiv:1804.03209","author":"Warden Pete","year":"2018","unstructured":"Pete Warden. 2018. Speech commands: A dataset for limited-vocabulary speech recognition. arXiv preprint arXiv:1804.03209 (2018)."},{"key":"e_1_3_2_2_59_1","volume-title":"NN-Stretch: Automatic Neural Network Branching for Parallel Inference on Heterogeneous Multi-Processors. In ACM International Conference on Mobile Systems, Applications, and Services (MobiSys). 70-83","author":"Wei Jianyu","year":"2023","unstructured":"Jianyu Wei, Ting Cao, Shijie Cao, Shiqi Jiang, Shaowei Fu, Mao Yang, Yanyong Zhang, and Yunxin Liu. 2023. NN-Stretch: Automatic Neural Network Branching for Parallel Inference on Heterogeneous Multi-Processors. In ACM International Conference on Mobile Systems, Applications, and Services (MobiSys). 70-83."},{"key":"e_1_3_2_2_60_1","volume-title":"Annual International Conference on Mobile Computing and Networking (MobiCom). 214-227","author":"Xu Daliang","year":"2022","unstructured":"Daliang Xu, Mengwei Xu, Qipeng Wang, Shangguang Wang, Yun Ma, Kang Huang, Gang Huang, Xin Jin, and Xuanzhe Liu. 2022. Mandheling: mixedprecision on-device DNN training with DSP offloading. In Annual International Conference on Mobile Computing and Networking (MobiCom). 214-227."},{"key":"e_1_3_2_2_61_1","doi-asserted-by":"publisher","DOI":"10.1145\/3308558.3313591"},{"key":"e_1_3_2_2_62_1","doi-asserted-by":"publisher","DOI":"10.1145\/3287075"},{"key":"e_1_3_2_2_63_1","doi-asserted-by":"publisher","DOI":"10.1145\/3494981"},{"key":"e_1_3_2_2_64_1","volume-title":"Boosting DNN Cold Inference on Edge Devices. In ACM International Conference on Mobile Systems, Applications, and Services (MobiSys). 516-529","author":"Yi Rongjie","year":"2023","unstructured":"Rongjie Yi, Ting Cao, Ao Zhou, Xiao Ma, Shangguang Wang, and Mengwei Xu. 2023. Boosting DNN Cold Inference on Edge Devices. In ACM International Conference on Mobile Systems, Applications, and Services (MobiSys). 516-529."},{"key":"e_1_3_2_2_65_1","volume-title":"Online Coreset Selection for Rehearsal-based Continual Learning. In International Conference on Learning Representations (ICLR).","author":"Yoon Jaehong","year":"2022","unstructured":"Jaehong Yoon, Divyam Madaan, Eunho Yang, and Sung Ju Hwang. 2022. Online Coreset Selection for Rehearsal-based Continual Learning. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_2_66_1","first-page":"29","article-title":"Mercury: Efficient on-device distributed dnn training via stochastic importance sampling","author":"Zeng Xiao","year":"2021","unstructured":"Xiao Zeng, Ming Yan, and Mi Zhang. 2021. Mercury: Efficient on-device distributed dnn training via stochastic importance sampling. In Sensys. 29-41.","journal-title":"Sensys."},{"key":"e_1_3_2_2_67_1","volume-title":"Stochastic Optimization with Importance Sampling for Regularized Loss Minimization. In International Conference on Machine Learning (ICML). 1-9.","author":"Zhao Peilin","year":"2015","unstructured":"Peilin Zhao and Tong Zhang. 2015. Stochastic Optimization with Importance Sampling for Regularized Loss Minimization. In International Conference on Machine Learning (ICML). 1-9."},{"key":"e_1_3_2_2_68_1","doi-asserted-by":"publisher","DOI":"10.1145\/3666025.3699355"}],"event":{"name":"KDD '25: The 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"],"location":"Toronto ON Canada","acronym":"KDD '25"},"container-title":["Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.2"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3711896.3736823","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,16]],"date-time":"2025-08-16T14:30:36Z","timestamp":1755354636000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3711896.3736823"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,3]]},"references-count":68,"alternative-id":["10.1145\/3711896.3736823","10.1145\/3711896"],"URL":"https:\/\/doi.org\/10.1145\/3711896.3736823","relation":{},"subject":[],"published":{"date-parts":[[2025,8,3]]},"assertion":[{"value":"2025-08-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}