{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T12:26:47Z","timestamp":1743078407792,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":32,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819756681"},{"type":"electronic","value":"9789819756698"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-97-5669-8_30","type":"book-chapter","created":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T17:02:31Z","timestamp":1722618151000},"page":"366-377","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["A Task-Efficient Gradient Guide Knowledge Distillation for Pre-train Language Model Compression"],"prefix":"10.1007","author":[{"given":"Xu","family":"Liu","sequence":"first","affiliation":[]},{"given":"Yila","family":"Su","sequence":"additional","affiliation":[]},{"given":"Nier","family":"Wu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,8,3]]},"reference":[{"key":"30_CR1","doi-asserted-by":"crossref","unstructured":"Dalvi, F., Sajjad, H., Durrani, N., Belinkov, Y.: Analyzing redundancy in pretrained transformer models. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 4908\u20134926. Association for Computational Linguistics (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.398"},{"key":"30_CR2","doi-asserted-by":"crossref","unstructured":"Durrani, N., Sajjad, H., Dalvi, F., Belinkov, Y.: Analyzing individual neurons in pre-trained language models. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 4865\u20134880. Association for Computational Linguistics (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.395"},{"key":"30_CR3","unstructured":"Liang, C., Zuo, S., Zhang, Q., He, P., Chen, W., Zhao, T.: Less is more: task-aware layer-wise distillation for language model compression. In: International Conference on Machine Learning, pp. 20852\u201320867. PMLR (2023)"},{"key":"30_CR4","unstructured":"Devlin, J., Chang, M.-W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers) (2019)"},{"key":"30_CR5","unstructured":"Brown, T., et al.: Language models are few-shot learners. In: Advances in Neural Information Processing Systems, vol. 33, pp. 1877\u20131901 (2020)"},{"key":"30_CR6","unstructured":"Liu, Y., et al.: RoBERTa: a robustly optimized BERT pretraining approach. arXiv preprint arXiv:1907.11692 (2019)"},{"key":"30_CR7","unstructured":"He, P., Gao, J., Chen, W.: DeBERTav3: improving deBERTa using electra-style pre-training with gradient-disentangled embedding sharing. arXiv preprint arXiv:2111.09543 (2021)"},{"key":"30_CR8","unstructured":"Touvron, H., Lavril, T., Izacard, G., et al.: LLaMA: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023). (touvron2023llama)"},{"key":"30_CR9","unstructured":"Touvron, H., Martin, L., Stone, K., et al.: Llama 2: open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)"},{"issue":"140","key":"30_CR10","first-page":"1","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res. 21(140), 1\u201367 (2020)","journal-title":"J. Mach. Learn. Res."},{"key":"30_CR11","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., et al.: Attention is all you need. In: Guyon, I., et al. (eds.) Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, pp. 5998\u20136008. Curran Associates, Inc. (2017)"},{"key":"30_CR12","unstructured":"Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531 (2015)"},{"key":"30_CR13","unstructured":"Wang, L., Li, L., Sun, X.: Gradient knowledge distillation for pre-trained language models. arXiv preprint arXiv:2211.01071 (2022)"},{"key":"30_CR14","unstructured":"Liang, C., Jiang, H., Li, Z., Tang, X., Yin, B., Zhao, T.: HomoDistil: homotopic task-agnostic distillation of pre-trained transformers. arXiv preprint arXiv:2302.09632 (2023)"},{"key":"30_CR15","doi-asserted-by":"crossref","unstructured":"Wang, A., Singh, A., Michael, J., Hill, F., Levy, O., Bowman, S.R.: GLUE: a multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461 (2018)","DOI":"10.18653\/v1\/W18-5446"},{"key":"30_CR16","unstructured":"Liang, K.J., et al.: MixKD: towards efficient distillation of large-scale language models. arXiv preprint arXiv:2011.00593 (2020)"},{"key":"30_CR17","doi-asserted-by":"publisher","unstructured":"Shi, W., Song, Y., Zhou, H., Li, B., Li, L.: Follow your path: a progressive method for knowledge distillation. In: Oliver, N., P\u00e9rez-Cruz, F., Kramer, S., Read, J., Lozano, J.A. (eds.) ECML PKDD 2021. LNCS, vol. 12977, pp. 596\u2013611. Springer, Bilbao (2021). https:\/\/doi.org\/10.1007\/978-3-030-86523-8_36","DOI":"10.1007\/978-3-030-86523-8_36"},{"key":"30_CR18","doi-asserted-by":"crossref","unstructured":"Zhou, W., Xu, C., McAuley, J.: BERT learns to teach: knowledge distillation with meta learning. In: Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 7037\u20137049 (2022)","DOI":"10.18653\/v1\/2022.acl-long.485"},{"key":"30_CR19","doi-asserted-by":"crossref","unstructured":"Ren, Y., Zhong, Z., Shi, X., Zhu, Y., Yuan, C., Li, M.: Tailoring instructions to student\u2019s learning levels boosts knowledge distillation. arXiv preprint arXiv:2305.09651 (2023)","DOI":"10.18653\/v1\/2023.acl-long.111"},{"key":"30_CR20","doi-asserted-by":"crossref","unstructured":"Sun, S., Cheng, Y., Gan, Z., Liu, J.: Patient knowledge distillation for BERT model compression. arXiv preprint arXiv:1908.09355 (2019)","DOI":"10.18653\/v1\/D19-1441"},{"key":"30_CR21","doi-asserted-by":"crossref","unstructured":"Xu, C., Zhou, W., Ge, T., Xu, K., McAuley, J., Wei, F.: Beyond preserved accuracy: evaluating loyalty and robustness of BERT compression. In: Moens, M.-F., Huang, X., Specia, L., Yih, S.W.-T. (eds.) Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 10653\u201310659. Association for Computational Linguistics, Online and Punta Cana, Dominican Republic (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.832"},{"key":"30_CR22","doi-asserted-by":"crossref","unstructured":"Ding, S., Xu, H., Koehn, P.: Saliency-driven word alignment interpretation for neural machine translation. In: Bojar, O., et al. (eds.) Proceedings of the Fourth Conference on Machine Translation (Volume 1: Research Papers), pp. 1\u201312. Association for Computational Linguistics, Florence, Italy (2019)","DOI":"10.18653\/v1\/W19-5201"},{"key":"30_CR23","unstructured":"Sun, Y., et al.: ERNIE 3.0: large-scale knowledge enhanced pre-training for language understanding and generation. arXiv preprint arXiv:2107.02137 (2021)"},{"key":"30_CR24","doi-asserted-by":"crossref","unstructured":"Rajpurkar, P., Zhang, J., Lopyrev, K., Liang, P.: SQuAD: 100,000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250 (2016)","DOI":"10.18653\/v1\/D16-1264"},{"key":"30_CR25","doi-asserted-by":"crossref","unstructured":"Zhao, B., Cui, Q., Song, R., Qiu, Y., Liang, J.: Decoupled knowledge distillation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11953\u201311962 (2022)","DOI":"10.1109\/CVPR52688.2022.01165"},{"key":"30_CR26","unstructured":"Wang, W., Wei, F., Dong, L., Bao, H., Yang, N., Zhou, M.: MINILM: deep self-attention distillation for task-agnostic compression of pre-trained transformers. In: Advances in Neural Information Processing Systems, vol. 33, pp. 5776\u20135788 (2020)"},{"key":"30_CR27","unstructured":"Sanh, V., Debut, L., Chaumond, J., Wolf, T.: DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108 (2019)"},{"key":"30_CR28","doi-asserted-by":"crossref","unstructured":"Liu, Y., Shen, S., Lapata, M.: Noisy self-knowledge distillation for text summarization. arXiv preprint arXiv:2009.07032 (2020)","DOI":"10.18653\/v1\/2021.naacl-main.56"},{"key":"30_CR29","unstructured":"Xu, C., McAuley, J.: A survey on model compression for natural language processing. arXiv preprint arXiv:2202.07105 (2022)"},{"key":"30_CR30","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2023.104700","volume":"135","author":"Y Tang","year":"2023","unstructured":"Tang, Y., Chen, Y., Xie, L.: Self-knowledge distillation based on knowledge transfer from soft to hard examples. Image Vis. Comput. 135, 104700 (2023)","journal-title":"Image Vis. Comput."},{"key":"30_CR31","doi-asserted-by":"publisher","first-page":"1355","DOI":"10.1162\/tacl_a_00431","volume":"9","author":"G Rotman","year":"2021","unstructured":"Rotman, G., Feder, A., Reichart, R.: Model compression for domain adaptation through causal effect estimation. Trans. Assoc. Comput. Linguis. 9, 1355\u20131373 (2021)","journal-title":"Trans. Assoc. Comput. Linguis."},{"key":"30_CR32","doi-asserted-by":"crossref","unstructured":"Bisk, Y., Zellers, R., Gao, J., Choi, Y., et al.: PIQA: reasoning about physical commonsense in natural language. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 7432\u20137439 (2020)","DOI":"10.1609\/aaai.v34i05.6239"}],"container-title":["Lecture Notes in Computer Science","Advanced Intelligent Computing Technology and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-97-5669-8_30","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T17:13:36Z","timestamp":1722618816000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-97-5669-8_30"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9789819756681","9789819756698"],"references-count":32,"URL":"https:\/\/doi.org\/10.1007\/978-981-97-5669-8_30","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"3 August 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Intelligent Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tianjin","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 August 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 August 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icic2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.ic-icc.cn\/2024\/index.htm","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}