{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T18:08:23Z","timestamp":1776103703500,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":72,"publisher":"ACM","funder":[{"name":"111 Center","award":["D25008"],"award-info":[{"award-number":["D25008"]}]},{"name":"Shenzhen Science and Technology Foundation","award":["ZDSYS20190902092853047"],"award-info":[{"award-number":["ZDSYS20190902092853047"]}]},{"name":"the Project of DEGP","award":["2024GCZX003, 2023KCXTD042"],"award-info":[{"award-number":["2024GCZX003, 2023KCXTD042"]}]},{"name":"Guangdong Provincial Key Lab of Integrated Communication, Sensing and Computation for Ubiquitous Internet of Things","award":["2023B1212010007"],"award-info":[{"award-number":["2023B1212010007"]}]},{"name":"China NSFC Grant","award":["62472366"],"award-info":[{"award-number":["62472366"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,8,3]]},"DOI":"10.1145\/3711896.3737226","type":"proceedings-article","created":{"date-parts":[[2025,8,3]],"date-time":"2025-08-03T21:07:39Z","timestamp":1754255259000},"page":"4511-4521","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":4,"title":["LLM4HAR: Generalizable On-device Human Activity Recognition with Pretrained LLMs"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3682-4290","authenticated-orcid":false,"given":"Zhiqing","family":"Hong","sequence":"first","affiliation":[{"name":"Rutgers University, Piscataway, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-3730-4457","authenticated-orcid":false,"given":"Yiwei","family":"Song","sequence":"additional","affiliation":[{"name":"JD Logistics, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-3819-287X","authenticated-orcid":false,"given":"Zelong","family":"Li","sequence":"additional","affiliation":[{"name":"JD Logistics, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-7092-7502","authenticated-orcid":false,"given":"Anlan","family":"Yu","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-1758-2870","authenticated-orcid":false,"given":"Shuxin","family":"Zhong","sequence":"additional","affiliation":[{"name":"HKUST (GZ), Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1226-341X","authenticated-orcid":false,"given":"Yi","family":"Ding","sequence":"additional","affiliation":[{"name":"UT Dallas, Richardson, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6062-2619","authenticated-orcid":false,"given":"Tian","family":"He","sequence":"additional","affiliation":[{"name":"JD Logistics, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9307-8736","authenticated-orcid":false,"given":"Desheng","family":"Zhang","sequence":"additional","affiliation":[{"name":"Rutgers University, Piscataway, USA"}]}],"member":"320","published-online":{"date-parts":[[2025,8,3]]},"reference":[{"key":"e_1_3_2_2_1_1","unstructured":"Baichuan. 2023. Baichuan 2: Open Large-scale Language Models. arXiv preprint arXiv:2309.10305(2023). https:\/\/arxiv.org\/abs\/2309.10305"},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3494994"},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/3570361.3613270"},{"key":"e_1_3_2_2_4_1","unstructured":"Ching Chang Wen-Chih Peng and Tien-Fu Chen. 2023. Llm4ts: Two-stage fine-tuning for time-series forecasting with pre-trained llms. arXiv preprint arXiv:2308.08469(2023)."},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.1145\/3380985"},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/3447744"},{"key":"e_1_3_2_2_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/3557915.3561023"},{"key":"e_1_3_2_2_8_1","volume-title":"Tsmixer: An all-mlp architecture for time series forecasting. arXiv preprint arXiv:2303.06053(2023).","author":"Chen Si-An","year":"2023","unstructured":"Si-An Chen, Chun-Liang Li, Nate Yoder, Sercan O Arik, and Tomas Pfister. 2023. Tsmixer: An all-mlp architecture for time series forecasting. arXiv preprint arXiv:2303.06053(2023)."},{"key":"e_1_3_2_2_9_1","unstructured":"Junyoung Chung Caglar Gulcehre KyungHyun Cho and Yoshua Bengio. 2014. Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling. arXiv:1412.3555 [cs.NE]"},{"key":"e_1_3_2_2_10_1","unstructured":"DeepSeek-AI. 2024. DeepSeek-V3 Technical Report. arXiv:2412.19437 [cs.CL] https:\/\/arxiv.org\/abs\/2412.19437"},{"key":"e_1_3_2_2_11_1","unstructured":"Abhimanyu Dubey Abhinav Jauhri Abhinav Pandey Abhishek Kadian Ahmad Al-Dahle Aiesha Letman Akhil Mathur Alan Schelten Amy Yang Angela Fan et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783(2024)."},{"key":"e_1_3_2_2_12_1","unstructured":"Siqi Fan Xin Jiang Xiang Li Xuying Meng Peng Han Shuo Shang Aixin Sun Yequan Wang and Zhongyuan Wang. 2024. Not all Layers of LLMs are Necessary during Inference. arXiv:2403.02181 [cs.CL]"},{"key":"e_1_3_2_2_13_1","unstructured":"Nate Gruver Marc Finzi Shikai Qiu and Andrew Gordon Wilson. 2023. Large language models are zero-shot time series forecasters. arXiv preprint arXiv:2310.07820(2023)."},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534574"},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3369813"},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3659597"},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/3557915.3560944"},{"key":"e_1_3_2_2_18_1","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2024.3411562"},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/3659596"},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.1145\/3557915.3560999"},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i5.25743"},{"key":"e_1_3_2_2_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3351244"},{"key":"e_1_3_2_2_23_1","volume-title":"HARGPT: Are LLMs Zero-Shot Human Activity Recognizers? arXiv:2403.02727 [cs.CL] https:\/\/arxiv.org\/abs\/2403.02727","author":"Ji Sijie","year":"2024","unstructured":"Sijie Ji, Xinzhe Zheng, and Chenshu Wu. 2024. HARGPT: Are LLMs Zero-Shot Human Activity Recognizers? arXiv:2403.02727 [cs.CL] https:\/\/arxiv.org\/abs\/2403.02727"},{"key":"e_1_3_2_2_24_1","volume-title":"Time-llm: Time series forecasting by reprogramming large language models. arXiv preprint arXiv:2310.01728(2023).","author":"Jin Ming","year":"2023","unstructured":"Ming Jin, Shiyu Wang, Lintao Ma, Zhixuan Chu, James Y Zhang, Xiaoming Shi, Pin-Yu Chen, Yuxuan Liang, Yuan-Fang Li, Shirui Pan, et al., 2023a. Time-llm: Time series forecasting by reprogramming large language models. arXiv preprint arXiv:2310.01728(2023)."},{"key":"e_1_3_2_2_25_1","unstructured":"Ming Jin Qingsong Wen Yuxuan Liang Chaoli Zhang Siqiao Xue Xue Wang James Zhang Yi Wang Haifeng Chen Xiaoli Li et al. 2023b. Large models for time series and spatio-temporal data: A survey and outlook. arXiv preprint arXiv:2310.10196(2023)."},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19827-4_36"},{"key":"e_1_3_2_2_27_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00948"},{"key":"e_1_3_2_2_28_1","unstructured":"Yubin Kim Xuhai Xu Daniel McDuff Cynthia Breazeal and Hae Won Park. 2024. Health-LLM: Large Language Models for Health Prediction via Wearable Sensor Data. arXiv:2401.06866 [cs.CL]"},{"key":"e_1_3_2_2_29_1","unstructured":"Jun Li Che Liu Sibo Cheng Rossella Arcucci and Shenda Hong. 2023. Frozen Language Model Helps ECG Zero-Shot Learning. arXiv preprint arXiv:2303.12311(2023)."},{"key":"e_1_3_2_2_30_1","unstructured":"Zechen Li Shohreh Deldari Linyao Chen Hao Xue and Flora D Salim. 2024. SensorLLM: Aligning Large Language Models with Motion Sensors for Human Activity Recognition. arXiv preprint arXiv:2410.10624(2024)."},{"key":"e_1_3_2_2_31_1","doi-asserted-by":"publisher","DOI":"10.1145\/3631429"},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534589"},{"key":"e_1_3_2_2_33_1","unstructured":"Shuming Ma Hongyu Wang Lingxiao Ma Lei Wang Wenhui Wang Shaohan Huang Li Dong Ruiping Wang Jilong Xue and Furu Wei. 2024. The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits. arXiv:2402.17764 [cs.CL]"},{"key":"e_1_3_2_2_34_1","doi-asserted-by":"publisher","DOI":"10.1145\/3302505.3310068"},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.883"},{"key":"e_1_3_2_2_36_1","volume-title":"International Conference on Learning Representations.","author":"Nie Yuqi","year":"2023","unstructured":"Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. 2023. A Time Series is Worth 64 Words: Long-term Forecasting with Transformers. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_37_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i13.17416"},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.1145\/3552434"},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599360"},{"key":"e_1_3_2_2_40_1","unstructured":"Alec Radford Jeff Wu Rewon Child David Luan Dario Amodei and Ilya Sutskever. 2019. Language Models are Unsupervised Multitask Learners. (2019)."},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2015.07.085"},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539027"},{"key":"e_1_3_2_2_43_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403332"},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"publisher","DOI":"10.1145\/3328932"},{"key":"e_1_3_2_2_45_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2022.101429"},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"publisher","DOI":"10.3390\/s140610146"},{"key":"e_1_3_2_2_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/2809695.2809718"},{"key":"e_1_3_2_2_48_1","volume-title":"TEST: Text Prototype Aligned Embedding to Activate LLM's Ability for Time Series. arXiv preprint arXiv:2308.08241(2023).","author":"Sun Chenxi","year":"2023","unstructured":"Chenxi Sun, Yaliang Li, Hongyan Li, and Shenda Hong. 2023. TEST: Text Prototype Aligned Embedding to Activate LLM's Ability for Time Series. arXiv preprint arXiv:2308.08241(2023)."},{"key":"e_1_3_2_2_49_1","unstructured":"Qwen Team. 2023. Qwen Technical Report. arXiv preprint arXiv:2309.16609(2023)."},{"key":"e_1_3_2_2_50_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01933"},{"key":"e_1_3_2_2_51_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971(2023).","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al., 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971(2023)."},{"key":"e_1_3_2_2_52_1","volume-title":"Attention is all you need. Advances in neural information processing systems","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, Vol. 30 (2017)."},{"key":"e_1_3_2_2_53_1","volume-title":"Generalizing to unseen domains: A survey on domain generalization","author":"Wang Jindong","year":"2022","unstructured":"Jindong Wang, Cuiling Lan, Chang Liu, Yidong Ouyang, Tao Qin, Wang Lu, Yiqiang Chen, Wenjun Zeng, and Philip Yu. 2022. Generalizing to unseen domains: A survey on domain generalization. IEEE Transactions on Knowledge and Data Engineering(2022)."},{"key":"e_1_3_2_2_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/3265689.3265705"},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539084"},{"key":"e_1_3_2_2_56_1","doi-asserted-by":"publisher","DOI":"10.1145\/3666025.3699349"},{"key":"e_1_3_2_2_57_1","doi-asserted-by":"publisher","DOI":"10.1145\/3400066"},{"key":"e_1_3_2_2_58_1","doi-asserted-by":"publisher","DOI":"10.1145\/3411836"},{"key":"e_1_3_2_2_59_1","volume-title":"Penetrative AI: Making LLMs Comprehend the Physical World. arXiv:2310.09605 [cs.AI]","author":"Xu Huatao","year":"2024","unstructured":"Huatao Xu, Liying Han, Qirui Yang, Mo Li, and Mani Srivastava. 2024b. Penetrative AI: Making LLMs Comprehend the Physical World. arXiv:2310.09605 [cs.AI]"},{"key":"e_1_3_2_2_60_1","doi-asserted-by":"publisher","DOI":"10.1145\/3570361.3613299"},{"key":"e_1_3_2_2_61_1","doi-asserted-by":"publisher","DOI":"10.1145\/3485730.3485937"},{"key":"e_1_3_2_2_62_1","volume-title":"When Large Language Model Agents Meet 6G Networks: Perception, Grounding, and Alignment. arXiv:2401","author":"Xu Minrui","unstructured":"Minrui Xu, Niyato Dusit, Jiawen Kang, Zehui Xiong, Shiwen Mao, Zhu Han, Dong In Kim, and Khaled B. Letaief. 2024a. When Large Language Model Agents Meet 6G Networks: Perception, Grounding, and Alignment. arXiv:2401.07764 [cs.AI]"},{"key":"e_1_3_2_2_63_1","doi-asserted-by":"publisher","DOI":"10.1145\/3569485"},{"key":"e_1_3_2_2_64_1","doi-asserted-by":"publisher","DOI":"10.1145\/3570361.3613302"},{"key":"e_1_3_2_2_65_1","doi-asserted-by":"publisher","DOI":"10.1145\/3583780.3614802"},{"key":"e_1_3_2_2_66_1","unstructured":"Jingfeng Yang Hongye Jin Ruixiang Tang Xiaotian Han Qizhang Feng Haoming Jiang Bing Yin and Xia Hu. 2023a. Harnessing the Power of LLMs in Practice: A Survey on ChatGPT and Beyond. arXiv:2304.13712 [cs.CL]"},{"key":"e_1_3_2_2_67_1","doi-asserted-by":"publisher","DOI":"10.1145\/3038912.3052577"},{"key":"e_1_3_2_2_68_1","doi-asserted-by":"publisher","DOI":"10.1109\/PerCom64205.2025.00022"},{"key":"e_1_3_2_2_69_1","unstructured":"Xinli Yu Zheng Chen Yuan Ling Shujing Dong Zongyi Liu and Yanbin Lu. 2023. Temporal Data Meets LLM-Explainable Financial Time Series Forecasting. arXiv preprint arXiv:2306.11025(2023)."},{"key":"e_1_3_2_2_70_1","volume-title":"SIFT meets CNN: A decade survey of instance retrieval","author":"Zheng Liang","year":"2017","unstructured":"Liang Zheng, Yi Yang, and Qi Tian. 2017. SIFT meets CNN: A decade survey of instance retrieval. IEEE transactions on pattern analysis and machine intelligence, Vol. 40, 5 (2017), 1224-1244."},{"key":"e_1_3_2_2_71_1","unstructured":"Tian Zhou Peisong Niu Xue Wang Liang Sun and Rong Jin. 2023. One Fits All: Power General Time Series Analysis by Pretrained LM. In NeurIPS."},{"key":"e_1_3_2_2_72_1","volume-title":"MAGNETO: Edge AI for Human Activity Recognition - Privacy and Personalization. arXiv:2402.07180 [cs.LG]","author":"Zuo Jingwei","year":"2024","unstructured":"Jingwei Zuo, George Arvanitakis, Mthandazo Ndhlovu, and Hakim Hacid. 2024. MAGNETO: Edge AI for Human Activity Recognition - Privacy and Personalization. arXiv:2402.07180 [cs.LG]"}],"event":{"name":"KDD '25: The 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining","location":"Toronto ON Canada","acronym":"KDD '25","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"]},"container-title":["Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.2"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3711896.3737226","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,16]],"date-time":"2025-08-16T14:38:14Z","timestamp":1755355094000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3711896.3737226"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,3]]},"references-count":72,"alternative-id":["10.1145\/3711896.3737226","10.1145\/3711896"],"URL":"https:\/\/doi.org\/10.1145\/3711896.3737226","relation":{},"subject":[],"published":{"date-parts":[[2025,8,3]]},"assertion":[{"value":"2025-08-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}