{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,24]],"date-time":"2025-08-24T00:02:29Z","timestamp":1755993749050,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":27,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,7,24]],"date-time":"2024-07-24T00:00:00Z","timestamp":1721779200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,7,24]]},"DOI":"10.1145\/3688636.3688654","type":"proceedings-article","created":{"date-parts":[[2024,10,11]],"date-time":"2024-10-11T18:26:22Z","timestamp":1728671182000},"page":"108-114","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["WGLora:Efficient fine-tuning method integrating weights and gradient low-rank adaptation"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-9553-440X","authenticated-orcid":false,"given":"Qingyun","family":"Lin","sequence":"first","affiliation":[{"name":"Sichuan Normal University, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-1946-5855","authenticated-orcid":false,"given":"Wenlin","family":"He","sequence":"additional","affiliation":[{"name":"Sichuan Normal University, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-2841-8083","authenticated-orcid":false,"given":"qian","family":"Zhang","sequence":"additional","affiliation":[{"name":"Sichuan Normal University, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-6554-9857","authenticated-orcid":false,"given":"Zihan","family":"Peng","sequence":"additional","affiliation":[{"name":"Sichuan Normal University, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-1936-8506","authenticated-orcid":false,"given":"Zhendong","family":"Wu","sequence":"additional","affiliation":[{"name":"Sichuan Normal University, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6793-7074","authenticated-orcid":false,"given":"Lilan","family":"Peng","sequence":"additional","affiliation":[{"name":"Southwest Jiaotong University, China"}]}],"member":"320","published-online":{"date-parts":[[2024,10,11]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"A survey of large language models.\u00a0arxiv preprint arxiv:2303.18223","author":"Zhao W. X.","year":"2023","unstructured":"Zhao, W. X., Zhou, K., Li, J., Tang, T., Wang, X., Hou, Y., ... & Wen, J. R. (2023). A survey of large language models.\u00a0arxiv preprint arxiv:2303.18223."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"crossref","unstructured":"Chowdhary KR1442 and K. R. Chowdhary. \"Natural language processing.\"\u00a0Fundamentals of artificial intelligence\u00a0(2020): 603-649.","DOI":"10.1007\/978-81-322-3972-7_19"},{"key":"e_1_3_2_1_3_1","unstructured":"Han Zeyu \"Parameter-efficient fine-tuning for large models: A comprehensive survey.\"\u00a0arxiv preprint arxiv:2403.14608\u00a0(2024)."},{"key":"e_1_3_2_1_4_1","volume-title":"PMLR","author":"Houlsby","year":"2019","unstructured":"Houlsby, Neil, \"Parameter-efficient transfer learning for NLP.\"\u00a0International conference on machine learning. PMLR, 2019."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"crossref","unstructured":"Lester Brian Rami Al-Rfou and Noah Constant. \"The power of scale for parameter-efficient prompt tuning.\"\u00a0arxiv preprint arxiv:2104.08691\u00a0(2021).","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"e_1_3_2_1_6_1","volume-title":"Prefix-tuning: Optimizing continuous prompts for generation.\u00a0arxiv preprint arxiv:2101.00190","author":"Li X. L.","year":"2021","unstructured":"Li, X. L., & Liang, P. (2021). Prefix-tuning: Optimizing continuous prompts for generation.\u00a0arxiv preprint arxiv:2101.00190."},{"key":"e_1_3_2_1_7_1","volume-title":"Lipani A. Dept: Decomposed prompt tuning for parameter-efficient fine-tuning[J]. arxiv preprint arxiv:2309.05173","author":"Shi Z","year":"2023","unstructured":"Shi Z, Lipani A. Dept: Decomposed prompt tuning for parameter-efficient fine-tuning[J]. arxiv preprint arxiv:2309.05173, 2023."},{"key":"e_1_3_2_1_8_1","unstructured":"Zaken Elad Ben Shauli Ravfogel and Yoav Goldberg. \"Bitfit: Simple parameter-efficient fine-tuning for transformer-based masked language-models.\"\u00a0arxiv preprint arxiv:2106.10199\u00a0(2021)."},{"key":"e_1_3_2_1_9_1","unstructured":"Mao Yuning \"Unipelt: A unified framework for parameter-efficient language model tuning.\"\u00a0arxiv preprint arxiv:2110.07577\u00a0(2021)."},{"key":"e_1_3_2_1_10_1","volume-title":"Pre-training of deep bidirectional transformers for language understanding.\" arXiv preprint arXiv:1810.04805","author":"Devlin","year":"2018","unstructured":"Devlin, Jacob, \"Bert: Pre-training of deep bidirectional transformers for language understanding.\" arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_1_11_1","unstructured":"Vaswani Ashish \"Attention is all you need.\"\u00a0Advances in neural information processing systems\u00a030 (2017)."},{"key":"e_1_3_2_1_12_1","volume-title":"Improving language understanding by generative pre-training[J]","author":"Radford A","year":"2018","unstructured":"Radford A, Narasimhan K, Salimans T, Improving language understanding by generative pre-training[J]. 2018."},{"key":"e_1_3_2_1_13_1","volume-title":"Roberta: A robustly optimized bert pretraining approach[J]. arxiv preprint arxiv:1907.11692","author":"Liu Y","year":"2019","unstructured":"Liu Y, Ott M, Goyal N, Roberta: A robustly optimized bert pretraining approach[J]. arxiv preprint arxiv:1907.11692, 2019."},{"key":"e_1_3_2_1_14_1","unstructured":"He Pengcheng \"Deberta: Decoding-enhanced bert with disentangled attention.\"\u00a0arxiv preprint arxiv:2006.03654\u00a0(2020)."},{"key":"e_1_3_2_1_15_1","volume-title":"Kanclerz","author":"Koco","year":"1861","unstructured":"J. Koco \u0301 n, I. Cichecki, O. Kaszyca, M. Kochanek, D. Szyd\u0142o, J. Baran, J. Bielaniewicz, M. Gruza, A. Janz, K. Kanclerz, Chatgpt: Jack of all trades, master of none. Information Fusion, 99:101861, 2023"},{"key":"e_1_3_2_1_16_1","unstructured":"Touvron Hugo \"Llama: Open and efficient foundation language models.\"\u00a0arxiv preprint arxiv:2302.13971\u00a0(2023)."},{"key":"e_1_3_2_1_17_1","unstructured":"Hu Edward J. \"Lora: Low-rank adaptation of large language models.\"\u00a0arxiv preprint arxiv:2106.09685\u00a0(2021)."},{"key":"e_1_3_2_1_18_1","volume-title":"Parameter-efficient fine-tuning for large models: A comprehensive survey[J]. arxiv preprint arxiv:2403.14608","author":"Han Z","year":"2024","unstructured":"Han Z, Gao C, Liu J, Parameter-efficient fine-tuning for large models: A comprehensive survey[J]. arxiv preprint arxiv:2403.14608, 2024."},{"key":"e_1_3_2_1_19_1","volume-title":"Openreview","author":"Zhang","year":"2023","unstructured":"Zhang, Qingru, \"Adaptive budget allocation for parameter-efficient fine-tuning.\"\u00a0International Conference on Learning Representations. Openreview, 2023."},{"key":"e_1_3_2_1_20_1","volume-title":"Efficient finetuning of quantized llms.\"\u00a0Advances in Neural Information Processing Systems\u00a036","author":"Dettmers","year":"2024","unstructured":"Dettmers, Tim, \"Qlora: Efficient finetuning of quantized llms.\"\u00a0Advances in Neural Information Processing Systems\u00a036 (2024)."},{"key":"e_1_3_2_1_21_1","unstructured":"Zhang Feiyu \"Increlora: Incremental parameter allocation method for parameter-efficient fine-tuning.\"\u00a0arxiv preprint arxiv:2308.12043\u00a0(2023)."},{"key":"e_1_3_2_1_22_1","volume-title":"A Method for Stochastic Optimization.\" Computer Science","author":"Kingma","year":"2014","unstructured":"Kingma, Diederik , and J. Ba . \"Adam: A Method for Stochastic Optimization.\" Computer Science (2014)."},{"key":"e_1_3_2_1_23_1","volume-title":"PMLR","author":"Shazeer","year":"2018","unstructured":"Shazeer, Noam, and Mitchell Stern. \"Adafactor: Adaptive learning rates with sublinear memory cost.\"\u00a0International Conference on Machine Learning. PMLR, 2018."},{"key":"e_1_3_2_1_24_1","unstructured":"Zhao Jiawei \"Galore: Memory-efficient llm training by gradient low-rank projection.\"\u00a0arxiv preprint arxiv:2403.03507\u00a0(2024)."},{"key":"e_1_3_2_1_25_1","unstructured":"Anil Rohan \"Memory efficient adaptive optimization.\"\u00a0Advances in Neural Information Processing Systems\u00a032 (2019)."},{"key":"e_1_3_2_1_26_1","volume-title":"Weight-decomposed low-rank adaptation.\"\u00a0Arxiv, abs\/2402.09353\u00a05","author":"Liu","year":"2024","unstructured":"yang Liu, Shih, \"Dora: Weight-decomposed low-rank adaptation.\"\u00a0Arxiv, abs\/2402.09353\u00a05 (2024)."},{"key":"e_1_3_2_1_27_1","unstructured":"Li Zhiyuan and Sanjeev Arora. \"An exponential learning rate schedule for deep learning.\"\u00a0arxiv preprint arxiv:1910.07454\u00a0(2019)."}],"event":{"name":"ICCBN 2024: 2024 12th International Conference on Communications and Broadband Networking","acronym":"ICCBN 2024","location":"Nyingchi China"},"container-title":["Proceedings of the 2024 12th International Conference on Communications and Broadband Networking"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3688636.3688654","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3688636.3688654","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T01:44:55Z","timestamp":1755913495000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3688636.3688654"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7,24]]},"references-count":27,"alternative-id":["10.1145\/3688636.3688654","10.1145\/3688636"],"URL":"https:\/\/doi.org\/10.1145\/3688636.3688654","relation":{},"subject":[],"published":{"date-parts":[[2024,7,24]]},"assertion":[{"value":"2024-10-11","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}