{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T01:13:19Z","timestamp":1755825199333,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":41,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T00:00:00Z","timestamp":1715558400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"RGC GRF","award":["2151185"],"award-info":[{"award-number":["2151185"]}]},{"name":"CUHK","award":["14222922"],"award-info":[{"award-number":["14222922"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,5,13]]},"DOI":"10.1145\/3589335.3651933","type":"proceedings-article","created":{"date-parts":[[2024,5,12]],"date-time":"2024-05-12T18:41:21Z","timestamp":1715539281000},"page":"1558-1567","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["FedHLT: Efficient Federated Low-Rank Adaption with Hierarchical Language Tree for Multilingual Modeling"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1889-583X","authenticated-orcid":false,"given":"Zhihan","family":"Guo","sequence":"first","affiliation":[{"name":"The Chinese University of Hong Kong, Hong Kong SAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4185-8663","authenticated-orcid":false,"given":"Yifei","family":"Zhang","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Hong Kong SAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1835-5411","authenticated-orcid":false,"given":"Zhuo","family":"Zhang","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5550-6461","authenticated-orcid":false,"given":"Zenglin","family":"Xu","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8106-6447","authenticated-orcid":false,"given":"Irwin","family":"King","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Hong Kong SAR, China"}]}],"member":"320","published-online":{"date-parts":[[2024,5,13]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.568"},{"key":"e_1_3_2_2_2_1","volume-title":"Nearest neighbour fewshot learning for cross-lingual classification. arXiv preprint arXiv:2109.02221","author":"Bari M Saiful","year":"2021","unstructured":"M Saiful Bari, Batool Haider, and Saab Mansour. 2021. Nearest neighbour fewshot learning for cross-lingual classification. arXiv preprint arXiv:2109.02221 (2021)."},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-short.1"},{"key":"e_1_3_2_2_4_1","volume-title":"Win-Win: A Privacy-Preserving Federated Framework for Dual-Target Cross-Domain Recommendation. In Thirty-Seventh AAAI Conference on Artificial Intelligence, AAAI","author":"Chen Gaode","year":"2023","unstructured":"Gaode Chen, Xinghua Zhang, Yijun Su, Yantong Lai, Ji Xiang, Junbo Zhang, and Yu Zheng. 2023. Win-Win: A Privacy-Preserving Federated Framework for Dual-Target Cross-Domain Recommendation. In Thirty-Seventh AAAI Conference on Artificial Intelligence, AAAI 2023, Brian Williams, Yiling Chen, and Jennifer Neville (Eds.). AAAI Press."},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.loresmt-1.5"},{"key":"e_1_3_2_2_6_1","volume-title":"Unsupervised Cross-lingual Representation Learning at Scale. CoRR","author":"Conneau Alexis","year":"2019","unstructured":"Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Unsupervised Cross-lingual Representation Learning at Scale. CoRR (2019)."},{"key":"e_1_3_2_2_7_1","first-page":"19","volume-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","volume":"1","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), Jill Burstein, Christy Doran, and Thamar Solorio (Eds.). Association for Computational Linguistics, Minneapolis, Minnesota, 4171--4186. https:\/\/doi. org\/10.18653\/v1\/N19-1423"},{"key":"e_1_3_2_2_8_1","unstructured":"Ning Ding Yujia Qin Guang Yang Fuchao Wei Zonghan Yang Yusheng Su Shengding Hu Yulin Chen Chi-Min Chan Weize Chen et al. 2022. Delta tuning: A comprehensive study of parameter efficient methods for pre-trained language models. arXiv preprint arXiv:2203.06904 (2022)."},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"publisher","DOI":"10.5555\/3546258.3546365"},{"key":"e_1_3_2_2_10_1","volume-title":"FedHGN: A Federated Framework for Heterogeneous Graph Neural Networks. arXiv preprint arXiv:2305.09729","author":"Fu Xinyu","year":"2023","unstructured":"Xinyu Fu and Irwin King. 2023. FedHGN: A Federated Framework for Heterogeneous Graph Neural Networks. arXiv preprint arXiv:2305.09729 (2023)."},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.eacl-main.237"},{"key":"e_1_3_2_2_12_1","volume-title":"Federated Learning Based Multilingual Emoji Prediction In Clean and Attack Scenarios. arXiv preprint arXiv:2304.01005","author":"Gamal Karim","year":"2023","unstructured":"Karim Gamal, Ahmed Gaber, and Hossam Amer. 2023. Federated Learning Based Multilingual Emoji Prediction In Clean and Attack Scenarios. arXiv preprint arXiv:2304.01005 (2023)."},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acllong"},{"key":"e_1_3_2_2_14_1","volume-title":"International Conference on Machine Learning. PMLR, 2790--2799","author":"Houlsby Neil","year":"2019","unstructured":"Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for NLP. In International Conference on Machine Learning. PMLR, 2790--2799."},{"key":"e_1_3_2_2_15_1","volume-title":"LoRA: Low-Rank Adaptation of Large Language Models. In International Conference on Learning Representations. https: \/\/openreview.net\/forum?id=nZeVKeeFYf9","author":"Hu Edward J","year":"2022","unstructured":"Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2022. LoRA: Low-Rank Adaptation of Large Language Models. In International Conference on Learning Representations. https: \/\/openreview.net\/forum?id=nZeVKeeFYf9"},{"key":"e_1_3_2_2_16_1","volume-title":"Lidong Bing, and Soujanya Poria.","author":"Hu Zhiqiang","year":"2023","unstructured":"Zhiqiang Hu, Yihuai Lan, Lei Wang, Wanyu Xu, Ee-Peng Lim, Roy Ka-Wei Lee, Lidong Bing, and Soujanya Poria. 2023. LLM-Adapters: An Adapter Family for Parameter-Efficient Fine-Tuning of Large Language Models. arXiv preprint arXiv:2304.01933 (2023)."},{"key":"e_1_3_2_2_17_1","volume-title":"Kallista Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al.","author":"Kairouz Peter","year":"2021","unstructured":"Peter Kairouz, H Brendan McMahan, Brendan Avent, Aur\u00e9lien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Kallista Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al. 2021. Advances and open problems in federated learning. Foundations and Trends\u00ae in Machine Learning 14, 1--2 (2021), 1--210."},{"key":"e_1_3_2_2_18_1","volume-title":"Client-Customized Adaptation for Parameter-Efficient Federated Learning. In Findings of the Association for Computational Linguistics: ACL","author":"Kim Yeachan","year":"2023","unstructured":"Yeachan Kim, Junho Kim, Wing-Lam Mok, Jun-Hyung Park, and SangKeun Lee. 2023. Client-Customized Adaptation for Parameter-Efficient Federated Learning. In Findings of the Association for Computational Linguistics: ACL 2023."},{"key":"e_1_3_2_2_19_1","volume-title":"Ananda Theertha Suresh, and Dave Bacon","author":"Kone\u010dn\u00fd Jakub","year":"2016","unstructured":"Jakub Kone\u010dn\u00fd, H. Brendan McMahan, Felix X. Yu, Peter Richt\u00e1rik, Ananda Theertha Suresh, and Dave Bacon. 2016. Federated Learning: Strategies for Improving Communication Efficiency. CoRR abs\/1610.05492 (2016). arXiv:1610.05492 http:\/\/arxiv.org\/abs\/1610.05492"},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.350"},{"key":"e_1_3_2_2_22_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-naacl.13"},{"key":"e_1_3_2_2_23_1","volume-title":"Communication Efficient Federated Learning for Multilingual Neural Machine Translation with Adapter. In Findings of the Association for Computational Linguistics: ACL 2023","author":"Liu Yi","year":"2023","unstructured":"Yi Liu, Xiaohan Bi, Lei Li, Sishuo Chen, Wenkai Yang, and Xu Sun. 2023. Communication Efficient Federated Learning for Multilingual Neural Machine Translation with Adapter. In Findings of the Association for Computational Linguistics: ACL 2023. Association for Computational Linguistics, Toronto, Canada, 5315--5328. https:\/\/aclanthology.org\/2023.findings-acl.327"},{"key":"e_1_3_2_2_24_1","volume-title":"Proceedings of the Conference on Health, Inference, and Learning (Proceedings of Machine Learning Research","volume":"162","author":"Manoel Andrea","year":"2023","unstructured":"Andrea Manoel, Mirian del Carmen Hipolito Garcia, Tal Baumel, Shize Su, Jialei Chen, Robert Sim, Dan Miller, Danny Karmon, and Dimitrios Dimitriadis. 2023. Federated Multilingual Models for Medical Transcript Analysis. In Proceedings of the Conference on Health, Inference, and Learning (Proceedings of Machine Learning Research, Vol. 209), Bobak J. Mortazavi, Tasmie Sarker, Andrew Beam, and Joyce C. Ho (Eds.). PMLR, 147--162. https:\/\/proceedings.mlr.press\/v209\/manoel23a.html"},{"key":"e_1_3_2_2_25_1","volume-title":"Proceedings of the 20th International Conference on Artificial Intelligence and Statistics (Proceedings of Machine Learning Research","volume":"1282","author":"McMahan Brendan","year":"2017","unstructured":"Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. 2017. Communication-Efficient Learning of Deep Networks from Decentralized Data. In Proceedings of the 20th International Conference on Artificial Intelligence and Statistics (Proceedings of Machine Learning Research, Vol. 54), Aarti Singh and Jerry Zhu (Eds.). PMLR, 1273--1282. https:\/\/proceedings. mlr.press\/v54\/mcmahan17a.html"},{"key":"e_1_3_2_2_26_1","volume-title":"Ethnologue: Languages of the world.","author":"Paul Lewis M","year":"2009","unstructured":"Lewis M Paul, Gary F Simons, Charles D Fennig, et al. 2009. Ethnologue: Languages of the world. Dallas, TX: SIL International. Available online at www. ethnologue. com\/. Retrieved June 19 (2009), 2011."},{"key":"e_1_3_2_2_27_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-6319"},{"key":"e_1_3_2_2_28_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i7.20785"},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-tutorials.5"},{"key":"e_1_3_2_2_30_1","volume-title":"a distilled version of BERT: smaller, faster, cheaper and lighter. ArXiv abs\/1910.01108","author":"Sanh Victor","year":"2019","unstructured":"Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. ArXiv abs\/1910.01108 (2019)."},{"key":"e_1_3_2_2_31_1","first-page":"12991","article-title":"Lst: Ladder side-tuning for parameter and memory efficient transfer learning","volume":"35","author":"Sung Yi-Lin","year":"2022","unstructured":"Yi-Lin Sung, Jaemin Cho, and Mohit Bansal. 2022. Lst: Ladder side-tuning for parameter and memory efficient transfer learning. Advances in Neural Information Processing Systems 35 (2022), 12991--13005.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i8.26197"},{"key":"e_1_3_2_2_33_1","volume-title":"FedKC: Federated Knowledge Composition for Multilingual Natural Language Understanding. In The ACM Web Conference","author":"Wang Haoyu","year":"2022","unstructured":"Haoyu Wang, Handong Zhao, Yaqing Wang, Tong Yu, Jiuxiang Gu, and Jing Gao. 2022. FedKC: Federated Knowledge Composition for Multilingual Natural Language Understanding. In The ACM Web Conference 2022."},{"key":"e_1_3_2_2_34_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.101"},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-short.58"},{"key":"e_1_3_2_2_36_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-industry.60"},{"key":"e_1_3_2_2_37_1","volume-title":"Fed Lab: A Flexible Federated Learning Framework. Journal of Machine Learning Research","author":"Zeng Dun","year":"2023","unstructured":"Dun Zeng, Siqi Liang, Xiangjing Hu, Hui Wang, and Zenglin Xu. 2023. Fed Lab: A Flexible Federated Learning Framework. Journal of Machine Learning Research (2023)."},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acllong"},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.1145\/3543873.3587681"},{"key":"e_1_3_2_2_40_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.193"},{"key":"e_1_3_2_2_41_1","volume-title":"Fed PETuning: When Federated Learning Meets the Parameter-Efficient Tuning Methods of Pre-trained Language Models. In Findings of the Association for Computational Linguistics: ACL","author":"Zhang Zhuo","year":"2023","unstructured":"Zhuo Zhang, Yuanhang Yang, Yong Dai, Qifan Wang, Yue Yu, Lizhen Qu, and Zenglin Xu. 2023. Fed PETuning: When Federated Learning Meets the Parameter-Efficient Tuning Methods of Pre-trained Language Models. In Findings of the Association for Computational Linguistics: ACL 2023."}],"event":{"name":"WWW '24: The ACM Web Conference 2024","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"],"location":"Singapore Singapore","acronym":"WWW '24"},"container-title":["Companion Proceedings of the ACM Web Conference 2024"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3589335.3651933","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3589335.3651933","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:28:02Z","timestamp":1755822482000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3589335.3651933"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,13]]},"references-count":41,"alternative-id":["10.1145\/3589335.3651933","10.1145\/3589335"],"URL":"https:\/\/doi.org\/10.1145\/3589335.3651933","relation":{},"subject":[],"published":{"date-parts":[[2024,5,13]]},"assertion":[{"value":"2024-05-13","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}