{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,27]],"date-time":"2026-04-27T14:35:52Z","timestamp":1777300552929,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":49,"publisher":"ACM","funder":[{"name":"National Key R&D Program of China","award":["2022YFB2703301"],"award-info":[{"award-number":["2022YFB2703301"]}]},{"name":"Science and Technology Development Program of Two Districts in Xinjiang, China","award":["2024LQ03004"],"award-info":[{"award-number":["2024LQ03004"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2026,4,13]]},"DOI":"10.1145\/3774904.3792144","type":"proceedings-article","created":{"date-parts":[[2026,4,27]],"date-time":"2026-04-27T13:28:36Z","timestamp":1777296516000},"page":"5087-5098","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["FedSRD: Sparsify-Reconstruct-Decompose for Communication-Efficient Federated Large Language Models Fine-Tuning"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-5941-1311","authenticated-orcid":false,"given":"Guochen","family":"Yan","sequence":"first","affiliation":[{"name":"Beijing Key Laboratory of Data Intelligence and Security, Peking University, Beijing, China, National Engineering Research Center for Software Engineering, Peking University, Beijing, China, and School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4777-122X","authenticated-orcid":false,"given":"Luyuan","family":"Xie","sequence":"additional","affiliation":[{"name":"Beijing Key Laboratory of Data Intelligence and Security, Peking University, Beijing, China and School of Software and Microelectronics, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0605-6043","authenticated-orcid":false,"given":"Qingni","family":"Shen","sequence":"additional","affiliation":[{"name":"Beijing Key Laboratory of Data Intelligence and Security, Peking University, Beijing, China and School of Software and Microelectronics, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8279-6908","authenticated-orcid":false,"given":"Yuejian","family":"Fang","sequence":"additional","affiliation":[{"name":"Beijing Key Laboratory of Data Intelligence and Security, Peking University, Beijing, China and School of Software and Microelectronics, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1268-836X","authenticated-orcid":false,"given":"Zhonghai","family":"Wu","sequence":"additional","affiliation":[{"name":"Beijing Key Laboratory of Data Intelligence and Security, Peking University, Beijing, China and National Engineering Research Center for Software Engineering, Peking University, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2026,4,12]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Sparse communication for distributed gradient descent. arXiv preprint arXiv:1704.05021","author":"Aji Alham Fikri","year":"2017","unstructured":"Alham Fikri Aji and Kenneth Heafield. 2017. Sparse communication for distributed gradient descent. arXiv preprint arXiv:1704.05021 (2017)."},{"key":"e_1_3_2_1_2_1","unstructured":"Jacob Austin Augustus Odena Maxwell Nye Maarten Bosma Henryk Michalewski David Dohan Ellen Jiang Carrie Cai Michael Terry Quoc Le et al. 2021. Program synthesis with large language models. arXiv preprint arXiv:2108.07732 (2021)."},{"key":"e_1_3_2_1_3_1","volume-title":"Federated Fine-tuning of Large Language Models under Heterogeneous Tasks and Client Resources. arXiv preprint arXiv:2402.11505","author":"Bai Jiamu","year":"2024","unstructured":"Jiamu Bai, Daoyuan Chen, Bingchen Qian, Liuyi Yao, and Yaliang Li. 2024. Federated Fine-tuning of Large Language Models under Heterogeneous Tasks and Client Resources. arXiv preprint arXiv:2402.11505 (2024)."},{"key":"e_1_3_2_1_4_1","volume-title":"Code Alpaca: An Instruction-following LLaMA model for code generation. https:\/\/github.com\/sahil280114\/codealpaca.","author":"Chaudhary Sahil","year":"2023","unstructured":"Sahil Chaudhary. 2023. Code Alpaca: An Instruction-following LLaMA model for code generation. https:\/\/github.com\/sahil280114\/codealpaca."},{"key":"e_1_3_2_1_5_1","volume-title":"Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al.","author":"Chen Mark","year":"2021","unstructured":"Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde De Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al., 2021. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374 (2021)."},{"key":"e_1_3_2_1_6_1","unstructured":"Karl Cobbe Vineet Kosaraju Mohammad Bavarian Mark Chen Heewoo Jun Lukasz Kaiser Matthias Plappert Jerry Tworek Jacob Hilton Reiichiro Nakano et al. 2021. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168 (2021)."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"crossref","unstructured":"Ning Ding Yujia Qin Guang Yang Fuchao Wei Zonghan Yang Yusheng Su Shengding Hu Yulin Chen Chi-Min Chan Weize Chen et al. 2023. Parameter-efficient fine-tuning of large-scale pre-trained language models. Nature machine intelligence Vol. 5 3 (2023) 220-235.","DOI":"10.1038\/s42256-023-00626-4"},{"key":"e_1_3_2_1_8_1","volume-title":"International Conference on Machine Learning. PMLR, 8356-8388","author":"Dorfman Ron","year":"2023","unstructured":"Ron Dorfman, Shay Vargaftik, Yaniv Ben-Itzhak, and Kfir Yehuda Levy. 2023. DoCoFL: Downlink compression for cross-device federated learning. In International Conference on Machine Learning. PMLR, 8356-8388."},{"key":"e_1_3_2_1_9_1","volume-title":"James J Clark, and Mehdi Rezagholizadeh.","author":"Edalati Ali","year":"2025","unstructured":"Ali Edalati, Marzieh Tahaei, Ivan Kobyzev, Vahid Partovi Nia, James J Clark, and Mehdi Rezagholizadeh. 2025. KronA: Parameter-Efficient Tuning with Kronecker Adapter. In Enhancing LLM Performance: Efficacy, Fine-Tuning, and Inference Techniques. Springer, 49-65."},{"key":"e_1_3_2_1_10_1","volume-title":"Lawbench: Benchmarking legal knowledge of large language models. arXiv preprint arXiv:2309.16289","author":"Fei Zhiwei","year":"2023","unstructured":"Zhiwei Fei, Xiaoyu Shen, Dawei Zhu, Fengzhe Zhou, Zhuo Han, Songyang Zhang, Kai Chen, Zongwen Shen, and Jidong Ge. 2023. Lawbench: Benchmarking legal knowledge of large language models. arXiv preprint arXiv:2309.16289 (2023)."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.1966.1053907"},{"key":"e_1_3_2_1_12_1","volume-title":"Towards a unified view of parameter-efficient transfer learning. arXiv preprint arXiv:2110.04366","author":"He Junxian","year":"2021","unstructured":"Junxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig. 2021. Towards a unified view of parameter-efficient transfer learning. arXiv preprint arXiv:2110.04366 (2021)."},{"key":"e_1_3_2_1_13_1","volume-title":"Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874","author":"Hendrycks Dan","year":"2021","unstructured":"Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874 (2021)."},{"key":"e_1_3_2_1_14_1","volume-title":"International conference on machine learning. PMLR, 2790-2799","author":"Houlsby Neil","year":"2019","unstructured":"Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for NLP. In International conference on machine learning. PMLR, 2790-2799."},{"key":"e_1_3_2_1_15_1","first-page":"3","article-title":"Lora: Low-rank adaptation of large language models","volume":"1","author":"Hu Edward J","year":"2022","unstructured":"Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al., 2022. Lora: Low-rank adaptation of large language models. ICLR, Vol. 1, 2 (2022), 3.","journal-title":"ICLR"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.3390\/app11146421"},{"key":"e_1_3_2_1_17_1","volume-title":"Scaling laws for neural language models. arXiv preprint arXiv:2001.08361","author":"Kaplan Jared","year":"2020","unstructured":"Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. 2020. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361 (2020)."},{"key":"e_1_3_2_1_18_1","volume-title":"Ananda Theertha Suresh, and Dave Bacon","author":"Kone\u010dn\u1ef3 Jakub","year":"2016","unstructured":"Jakub Kone\u010dn\u1ef3, H Brendan McMahan, Felix X Yu, Peter Richt\u00e1rik, Ananda Theertha Suresh, and Dave Bacon. 2016. Federated learning: Strategies for improving communication efficiency. arXiv preprint arXiv:1610.05492 (2016)."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671573"},{"key":"e_1_3_2_1_20_1","volume-title":"Dominik Wagner, Haebin Seong, Tobias Bocklet, Juho Lee, and Sung Ju Hwang.","author":"Lee Seanie","year":"2025","unstructured":"Seanie Lee, Sangwoo Park, Dong Bok Lee, Dominik Wagner, Haebin Seong, Tobias Bocklet, Juho Lee, and Sung Ju Hwang. 2025. FedSVD: Adaptive Orthogonalization for Private Federated Learning with LoRA. arXiv preprint arXiv:2505.12805 (2025)."},{"key":"e_1_3_2_1_21_1","volume-title":"The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691","author":"Lester Brian","year":"2021","unstructured":"Brian Lester, Rami Al-Rfou, and Noah Constant. 2021. The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691 (2021)."},{"key":"e_1_3_2_1_22_1","first-page":"429","article-title":"Federated optimization in heterogeneous networks","volume":"2","author":"Li Tian","year":"2020","unstructured":"Tian Li, Anit Kumar Sahu, Manzil Zaheer, Maziar Sanjabi, Ameet Talwalkar, and Virginia Smith. 2020. Federated optimization in heterogeneous networks. Proceedings of Machine learning and systems, Vol. 2 (2020), 429-450.","journal-title":"Proceedings of Machine learning and systems"},{"key":"e_1_3_2_1_23_1","volume-title":"Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190","author":"Li Xiang Lisa","year":"2021","unstructured":"Xiang Lisa Li and Percy Liang. 2021. Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190 (2021)."},{"key":"e_1_3_2_1_24_1","volume-title":"Splitting with Importance-aware Updating for Heterogeneous Federated Learning with Large Language Models. In Forty-second International Conference on Machine Learning.","author":"Liao Yangxu","unstructured":"Yangxu Liao, Wenke Huang, Guancheng Wan, Jian Liang, Bin Yang, and Mang Ye. [n.d.]. Splitting with Importance-aware Updating for Heterogeneous Federated Learning with Large Language Models. In Forty-second International Conference on Machine Learning."},{"key":"e_1_3_2_1_25_1","first-page":"39409","article-title":"Dual-personalizing adapter for federated foundation models","volume":"37","author":"Long Guodong","year":"2024","unstructured":"Guodong Long, Tao Shen, Jing Jiang, Michael Blumenstein, et al., 2024. Dual-personalizing adapter for federated foundation models. Advances in Neural Information Processing Systems, Vol. 37 (2024), 39409-39433.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_26_1","unstructured":"Yingfeng Luo Dingyang Lin Junxin Wang Ziqiang Xu Kaiyan Chang Tong Zheng Bei Li Anxiang Ma Tong Xiao Zhengtao Yu et al. 2025. One Size Does Not Fit All: A Distribution-Aware Sparsification for More Precise Model Merging. arXiv preprint arXiv:2508.06163 (2025)."},{"key":"e_1_3_2_1_27_1","first-page":"1273","article-title":"Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics","author":"McMahan Brendan","year":"2017","unstructured":"Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. 2017. Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics. PMLR, 1273-1282.","journal-title":"PMLR"},{"key":"e_1_3_2_1_28_1","first-page":"1","volume-title":"ACM International Conference Proceeding Series","volume":"58","author":"Mundt Thomas","year":"2004","unstructured":"Thomas Mundt. 2004. How much is a byte?: a survey of costs for mobile data transmission. In ACM International Conference Proceeding Series, Vol. 58. 1-6."},{"key":"e_1_3_2_1_29_1","volume-title":"Flora: Enhancing vision-language models with parameter-efficient federated learning. arXiv preprint arXiv:2404.15182","author":"Nguyen Duy Phuong","year":"2024","unstructured":"Duy Phuong Nguyen, J Pablo Munoz, and Ali Jannesari. 2024. Flora: Enhancing vision-language models with parameter-efficient federated learning. arXiv preprint arXiv:2404.15182 (2024)."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"crossref","unstructured":"Long Ouyang Jeffrey Wu Xu Jiang Diogo Almeida Carroll Wainwright Pamela Mishkin Chong Zhang Sandhini Agarwal Katarina Slama Alex Ray et al. 2022. Training language models to follow instructions with human feedback. Advances in neural information processing systems Vol. 35 (2022) 27730-27744.","DOI":"10.52202\/068431-2011"},{"key":"e_1_3_2_1_31_1","volume-title":"Conference on health, inference, and learning. PMLR, 248-260","author":"Pal Ankit","year":"2022","unstructured":"Ankit Pal, Logesh Kumar Umapathi, and Malaikannan Sankarasubbu. 2022. Medmcqa: A large-scale multi-subject multi-choice dataset for medical domain question answering. In Conference on health, inference, and learning. PMLR, 248-260."},{"key":"e_1_3_2_1_32_1","volume-title":"Philip HS Torr, and Adel Bibi","author":"Petrov Aleksandar","year":"2023","unstructured":"Aleksandar Petrov, Philip HS Torr, and Adel Bibi. 2023. When do prompting and prefix-tuning work? a theory of capabilities and limitations. arXiv preprint arXiv:2310.19698 (2023)."},{"key":"e_1_3_2_1_33_1","first-page":"8133","article-title":"Rethinking gradient sparsification as total error minimization","volume":"34","author":"Sahu Atal","year":"2021","unstructured":"Atal Sahu, Aritra Dutta, Ahmed M Abdelmoniem, Trambak Banerjee, Marco Canini, and Panos Kalnis. 2021. Rethinking gradient sparsification as total error minimization. Advances in Neural Information Processing Systems, Vol. 34 (2021), 8133-8146.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_34_1","volume-title":"International Workshop on Federated Foundation Models in Conjunction with NeurIPS","author":"Sani Lorenzo","year":"2024","unstructured":"Lorenzo Sani, Alex Iacob, Zeyu Cao, Bill Marino, Yan Gao, Tomas Paulik, Wanru Zhao, William F Shen, Preslav Aleksandrov, Xinchi Qiu, et al., [n.d.]. The Future of Large Language Model Pre-training is Federated. In International Workshop on Federated Foundation Models in Conjunction with NeurIPS 2024."},{"key":"e_1_3_2_1_35_1","volume-title":"Robust and communication-efficient federated learning from non-iid data","author":"Sattler Felix","year":"2019","unstructured":"Felix Sattler, Simon Wiedemann, Klaus-Robert M\u00fcller, and Wojciech Samek. 2019. Robust and communication-efficient federated learning from non-iid data. IEEE transactions on neural networks and learning systems, Vol. 31, 9 (2019), 3400-3413."},{"key":"e_1_3_2_1_36_1","volume-title":"Fed-SB: A silver bullet for extreme communication efficiency and performance in (private) federated lora fine-tuning. arXiv preprint arXiv:2502.15436","author":"Singhal Raghav","year":"2025","unstructured":"Raghav Singhal, Kaustubh Ponkshe, Rohit Vartak, Lav R Varshney, and Praneeth Vepakomma. 2025. Fed-SB: A silver bullet for extreme communication efficiency and performance in (private) federated lora fine-tuning. arXiv preprint arXiv:2502.15436 (2025)."},{"key":"e_1_3_2_1_37_1","volume-title":"Improving lora in privacy-preserving federated learning. arXiv preprint arXiv:2403.12313","author":"Sun Youbang","year":"2024","unstructured":"Youbang Sun, Zitao Li, Yaliang Li, and Bolin Ding. 2024. Improving lora in privacy-preserving federated learning. arXiv preprint arXiv:2403.12313 (2024)."},{"key":"e_1_3_2_1_38_1","volume-title":"Qwen2 technical report. arXiv preprint arXiv:2407.10671","author":"Team Qwen","year":"2024","unstructured":"Qwen Team. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671, Vol. 2 (2024)."},{"key":"e_1_3_2_1_39_1","first-page":"22513","article-title":"Flora: Federated fine-tuning large language models with heterogeneous low-rank adaptations","volume":"37","author":"Wang Ziyao","year":"2024","unstructured":"Ziyao Wang, Zheyu Shen, Yexiao He, Guoheng Sun, Hongyi Wang, Lingjuan Lyu, and Ang Li. 2024. Flora: Federated fine-tuning large language models with heterogeneous low-rank adaptations. Advances in Neural Information Processing Systems, Vol. 37 (2024), 22513-22533.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_40_1","volume-title":"A survey on federated fine-tuning of large language models. arXiv preprint arXiv:2503.12016","author":"Wu Yebo","year":"2025","unstructured":"Yebo Wu, Chunlin Tian, Jingguang Li, He Sun, Kahou Tam, Zhanting Zhou, Haicheng Liao, Zhijiang Guo, Li Li, and Chengzhong Xu. 2025. A survey on federated fine-tuning of large language models. arXiv preprint arXiv:2503.12016 (2025)."},{"key":"e_1_3_2_1_41_1","volume-title":"Towards federated graph learning in one-shot communication. arXiv preprint arXiv:2411.11304","author":"Yan Guochen","year":"2024","unstructured":"Guochen Yan, Xunkai Li, Luyuan Xie, Wentao Zhang, Qingni Shen, Yuejian Fang, and Zhonghai Wu. 2024. Towards federated graph learning in one-shot communication. arXiv preprint arXiv:2411.11304 (2024)."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i20.35497"},{"key":"e_1_3_2_1_43_1","volume-title":"Impart: Importance-aware delta-sparsification for improved model compression and merging in llms. arXiv preprint arXiv:2504.13237","author":"Yang Yan","year":"2025","unstructured":"Yan Yang, Yixia Li, Hongru Wang, Xuetao Wei, Jianqiao Yu, Yun Chen, and Guanhua Chen. 2025. Impart: Importance-aware delta-sparsification for improved model compression and merging in llms. arXiv preprint arXiv:2504.13237 (2025)."},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671582"},{"key":"e_1_3_2_1_45_1","volume-title":"Forty-first International Conference on Machine Learning.","author":"Yu Le","year":"2024","unstructured":"Le Yu, Bowen Yu, Haiyang Yu, Fei Huang, and Yongbin Li. 2024. Language models are super mario: Absorbing abilities from homologous models as a free lunch. In Forty-first International Conference on Machine Learning."},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447454"},{"key":"e_1_3_2_1_47_1","volume-title":"Fineval: A chinese financial domain knowledge evaluation benchmark for large language models. arXiv preprint arXiv:2308.09975","author":"Zhang Liwen","year":"2023","unstructured":"Liwen Zhang, Weige Cai, Zhaowei Liu, Zhi Yang, Wei Dai, Yujie Liao, Qianru Qin, Yifei Li, Xingyu Liu, Zhiqiang Liu, et al., 2023. Fineval: A chinese financial domain knowledge evaluation benchmark for large language models. arXiv preprint arXiv:2308.09975 (2023)."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1145\/3583780.3615285"},{"key":"e_1_3_2_1_49_1","volume-title":"Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364","author":"Zhong Wanjun","year":"2023","unstructured":"Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. 2023. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364 (2023)."}],"event":{"name":"WWW '26: The ACM Web Conference 2026","location":"Dubai United Arab Emirates","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Proceedings of the ACM Web Conference 2026"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3774904.3792144","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,27]],"date-time":"2026-04-27T13:38:56Z","timestamp":1777297136000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3774904.3792144"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4,12]]},"references-count":49,"alternative-id":["10.1145\/3774904.3792144","10.1145\/3774904"],"URL":"https:\/\/doi.org\/10.1145\/3774904.3792144","relation":{},"subject":[],"published":{"date-parts":[[2026,4,12]]},"assertion":[{"value":"2026-04-12","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}