{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,10]],"date-time":"2025-12-10T04:15:55Z","timestamp":1765340155483,"version":"3.46.0"},"publisher-location":"New York, NY, USA","reference-count":51,"publisher":"ACM","funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["Grant No. 62441227"],"award-info":[{"award-number":["Grant No. 62441227"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["Grant No. 2023YFB3106500"],"award-info":[{"award-number":["Grant No. 2023YFB3106500"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004543","name":"China Scholarship Council","doi-asserted-by":"publisher","award":["Grant No. 202406230318"],"award-info":[{"award-number":["Grant No. 202406230318"]}],"id":[{"id":"10.13039\/501100004543","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,10,27]]},"DOI":"10.1145\/3746027.3754587","type":"proceedings-article","created":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T06:47:18Z","timestamp":1761374838000},"page":"5375-5384","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["FedDEAP: Adaptive Dual-Prompt Tuning for Multi-Domain Federated Learning"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-2624-2856","authenticated-orcid":false,"given":"Yubin","family":"Zheng","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4778-1134","authenticated-orcid":false,"given":"Pak Hei","family":"Yeung","sequence":"additional","affiliation":[{"name":"Nanyang Technological University, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4962-2604","authenticated-orcid":false,"given":"Jing","family":"Xia","sequence":"additional","affiliation":[{"name":"Nanyang Technological University, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-6978-1935","authenticated-orcid":false,"given":"Tianjie","family":"Ju","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6607-1280","authenticated-orcid":false,"given":"Peng","family":"Tang","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6428-1655","authenticated-orcid":false,"given":"Weidong","family":"Qiu","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7944-1658","authenticated-orcid":false,"given":"Jagath C.","family":"Rajapakse","sequence":"additional","affiliation":[{"name":"Nanyang Technological University, Singapore, Singapore"}]}],"member":"320","published-online":{"date-parts":[[2025,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Alexey Dosovitskiy Lucas Beyer Alexander Kolesnikov Dirk Weissenborn Xiaohua Zhai Thomas Unterthiner Mostafa Dehghani Matthias Minderer Georg Heigold Sylvain Gelly et al. 2020. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)."},{"key":"e_1_3_2_1_2_1","volume-title":"Personalized federated learning with theoretical guarantees: A model-agnostic meta-learning approach. Advances in neural information processing systems","author":"Fallah Alireza","year":"2020","unstructured":"Alireza Fallah, Aryan Mokhtari, and Asuman Ozdaglar. 2020. Personalized federated learning with theoretical guarantees: A model-agnostic meta-learning approach. Advances in neural information processing systems, Vol. 33 (2020), 3557-3568."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.5555\/2354409.2355024"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01480"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i11.21446"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2023.3302410"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_1_8_1","volume-title":"International conference on machine learning. PMLR, 2790-2799","author":"Houlsby Neil","year":"2019","unstructured":"Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for NLP. In International conference on machine learning. PMLR, 2790-2799."},{"key":"e_1_3_2_1_9_1","first-page":"3","article-title":"Lora: Low-rank adaptation of large language models","volume":"1","author":"Hu Edward J","year":"2022","unstructured":"Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al., 2022. Lora: Low-rank adaptation of large language models. ICLR, Vol. 1, 2 (2022), 3.","journal-title":"ICLR"},{"key":"e_1_3_2_1_10_1","volume-title":"Communication-efficient on-device machine learning: Federated distillation and augmentation under non-iid private data. arXiv preprint arXiv:1811.11479","author":"Jeong Eunjeong","year":"2018","unstructured":"Eunjeong Jeong, Seungeun Oh, Hyesung Kim, Jihong Park, Mehdi Bennis, and Seong-Lyun Kim. 2018. Communication-efficient on-device machine learning: Federated distillation and augmentation under non-iid private data. arXiv preprint arXiv:1811.11479 (2018)."},{"key":"e_1_3_2_1_11_1","volume-title":"International conference on machine learning. PMLR, 5132-5143","author":"Karimireddy Sai Praneeth","year":"2020","unstructured":"Sai Praneeth Karimireddy, Satyen Kale, Mehryar Mohri, Sashank Reddi, Sebastian Stich, and Ananda Theertha Suresh. 2020. Scaffold: Stochastic controlled averaging for federated learning. In International conference on machine learning. PMLR, 5132-5143."},{"key":"e_1_3_2_1_12_1","volume-title":"The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691","author":"Lester Brian","year":"2021","unstructured":"Brian Lester, Rami Al-Rfou, and Noah Constant. 2021. The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691 (2021)."},{"key":"e_1_3_2_1_13_1","volume-title":"Fedmd: Heterogenous federated learning via model distillation. arXiv preprint arXiv:1910.03581","author":"Li Daliang","year":"2019","unstructured":"Daliang Li and Junpu Wang. 2019. Fedmd: Heterogenous federated learning via model distillation. arXiv preprint arXiv:1910.03581 (2019)."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.591"},{"key":"e_1_3_2_1_15_1","volume-title":"International conference on machine learning. PMLR, 12888-12900","author":"Li Junnan","year":"2022","unstructured":"Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. 2022. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning. PMLR, 12888-12900."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01057"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2019.06.011"},{"key":"e_1_3_2_1_18_1","first-page":"429","article-title":"Federated optimization in heterogeneous networks","volume":"2","author":"Li Tian","year":"2020","unstructured":"Tian Li, Anit Kumar Sahu, Manzil Zaheer, Maziar Sanjabi, Ameet Talwalkar, and Virginia Smith. 2020b. Federated optimization in heterogeneous networks. Proceedings of Machine learning and systems, Vol. 2 (2020), 429-450.","journal-title":"Proceedings of Machine learning and systems"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2020.101765"},{"volume-title":"Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)","author":"Li Xiang Lisa","key":"e_1_3_2_1_20_1","unstructured":"Xiang Lisa Li and Percy Liang. 2021. Prefix-Tuning: Optimizing Continuous Prompts for Generation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers). Association for Computational Linguistics."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3081560"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00490"},{"key":"e_1_3_2_1_23_1","volume-title":"Think locally, act globally: Federated learning with local and global representations. arXiv preprint arXiv:2001.01523","author":"Liang Paul Pu","year":"2020","unstructured":"Paul Pu Liang, Terrance Liu, Liu Ziyin, Nicholas B Allen, Randy P Auerbach, David Brent, Ruslan Salakhutdinov, and Louis-Philippe Morency. 2020. Think locally, act globally: Federated learning with local and global representations. arXiv preprint arXiv:2001.01523 (2020)."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00107"},{"key":"e_1_3_2_1_25_1","first-page":"1","article-title":"Fate: An industrial grade platform for collaborative learning with data protection","volume":"22","author":"Liu Yang","year":"2021","unstructured":"Yang Liu, Tao Fan, Tianjian Chen, Qian Xu, and Qiang Yang. 2021b. Fate: An industrial grade platform for collaborative learning with data protection. Journal of Machine Learning Research, Vol. 22, 226 (2021), 1-6.","journal-title":"Journal of Machine Learning Research"},{"volume-title":"FEDCLIP: FAST GENERALIZATION AND PERSONALIZATION FOR CLIP IN FEDERATED LEARNING. In ICLR 2023 Workshop on Trustworthy and Reliable Large-Scale Machine Learning Models.","author":"Lu Wang","key":"e_1_3_2_1_26_1","unstructured":"Wang Lu, HU Xixu, Jindong Wang, and Xing Xie. [n.d.]. FEDCLIP: FAST GENERALIZATION AND PERSONALIZATION FOR CLIP IN FEDERATED LEARNING. In ICLR 2023 Workshop on Trustworthy and Reliable Large-Scale Machine Learning Models."},{"key":"e_1_3_2_1_27_1","first-page":"1273","article-title":"Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics","author":"McMahan Brendan","year":"2017","unstructured":"Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. 2017. Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics. PMLR, 1273-1282.","journal-title":"PMLR"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.2015509117"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00149"},{"key":"e_1_3_2_1_30_1","volume-title":"International conference on machine learning. PmLR, 8748-8763","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al., 2021. Learning transferable visual models from natural language supervision. In International conference on machine learning. PmLR, 8748-8763."},{"key":"e_1_3_2_1_31_1","volume-title":"FedPIA-Permuting and Integrating Adapters leveraging Wasserstein Barycenters for Finetuning Foundation Models in Multi-Modal Federated Learning. arXiv preprint arXiv:2412.14424","author":"Saha Pramit","year":"2024","unstructured":"Pramit Saha, Divyanshu Mishra, Felix Wagner, Konstantinos Kamnitsas, and J Alison Noble. 2024. FedPIA-Permuting and Integrating Adapters leveraging Wasserstein Barycenters for Finetuning Foundation Models in Multi-Modal Federated Learning. arXiv preprint arXiv:2412.14424 (2024)."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICME55011.2023.00254"},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i13.29434"},{"key":"e_1_3_2_1_35_1","volume-title":"Personalized federated learning with moreau envelopes. Advances in neural information processing systems","author":"Dinh Canh T","year":"2020","unstructured":"Canh T Dinh, Nguyen Tran, and Josh Nguyen. 2020. Personalized federated learning with moreau envelopes. Advances in neural information processing systems, Vol. 33 (2020), 21394-21405."},{"key":"e_1_3_2_1_36_1","volume-title":"Federated learning from pre-trained models: A contrastive learning approach. Advances in neural information processing systems","author":"Tan Yue","year":"2022","unstructured":"Yue Tan, Guodong Long, Jie Ma, Lu Liu, Tianyi Zhou, and Jing Jiang. 2022. Federated learning from pre-trained models: A contrastive learning approach. Advances in neural information processing systems, Vol. 35 (2022), 19332-19344."},{"key":"e_1_3_2_1_37_1","article-title":"Visualizing data using t-SNE","volume":"9","author":"der Maaten Laurens Van","year":"2008","unstructured":"Laurens Van der Maaten and Geoffrey Hinton. 2008. Visualizing data using t-SNE. Journal of machine learning research, Vol. 9, 11 (2008).","journal-title":"Journal of machine learning research"},{"key":"e_1_3_2_1_38_1","volume-title":"Tackling the objective inconsistency problem in heterogeneous federated optimization. Advances in neural information processing systems","author":"Wang Jianyu","year":"2020","unstructured":"Jianyu Wang, Qinghua Liu, Hao Liang, Gauri Joshi, and H Vincent Poor. 2020. Tackling the objective inconsistency problem in heterogeneous federated optimization. Advances in neural information processing systems, Vol. 33 (2020), 7611-7623."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.256"},{"key":"e_1_3_2_1_40_1","volume-title":"Communication-efficient federated learning via knowledge distillation. Nature communications","author":"Wu Chuhan","year":"2022","unstructured":"Chuhan Wu, Fangzhao Wu, Lingjuan Lyu, Yongfeng Huang, and Xing Xie. 2022. Communication-efficient federated learning via knowledge distillation. Nature communications, Vol. 13, 1 (2022), 2032."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72390-2_50"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-87199-4_35"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"crossref","unstructured":"Dong Yang Ziyue Xu Wenqi Li Andriy Myronenko Holger R Roth Stephanie Harmon Sheng Xu Baris Turkbey Evrim Turkbey Xiaosong Wang et al. 2021. Federated semi-supervised learning for COVID region segmentation in chest CT using multi-national data from China Italy Japan. Medical image analysis Vol. 70 (2021) 101992.","DOI":"10.1016\/j.media.2021.101992"},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i15.29568"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681490"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20217"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01151"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/BIBM62325.2024.10822798"},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01653-1"},{"key":"e_1_3_2_1_51_1","volume-title":"International conference on machine learning. PMLR, 12878-12889","author":"Zhu Zhuangdi","year":"2021","unstructured":"Zhuangdi Zhu, Junyuan Hong, and Jiayu Zhou. 2021. Data-free knowledge distillation for heterogeneous federated learning. In International conference on machine learning. PMLR, 12878-12889. n"}],"event":{"name":"MM '25: The 33rd ACM International Conference on Multimedia","sponsor":["SIGMM ACM Special Interest Group on Multimedia"],"location":"Dublin Ireland","acronym":"MM '25"},"container-title":["Proceedings of the 33rd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3746027.3754587","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,10]],"date-time":"2025-12-10T04:13:43Z","timestamp":1765340023000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3746027.3754587"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,27]]},"references-count":51,"alternative-id":["10.1145\/3746027.3754587","10.1145\/3746027"],"URL":"https:\/\/doi.org\/10.1145\/3746027.3754587","relation":{},"subject":[],"published":{"date-parts":[[2025,10,27]]},"assertion":[{"value":"2025-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}