{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T03:15:17Z","timestamp":1743045317258,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":25,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819794393"},{"type":"electronic","value":"9789819794409"}],"license":[{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-97-9440-9_19","type":"book-chapter","created":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T17:07:18Z","timestamp":1730394438000},"page":"239-251","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Activate Integrated Controllable Generation with\u00a0Soft Prompt"],"prefix":"10.1007","author":[{"given":"Jingkun","family":"Ma","sequence":"first","affiliation":[]},{"given":"Runzhe","family":"Zhan","sequence":"additional","affiliation":[]},{"given":"Derek F.","family":"Wong","sequence":"additional","affiliation":[]},{"given":"Lidia S.","family":"Chao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,1]]},"reference":[{"key":"19_CR1","unstructured":"Houlsby, N., et al.: Parameter-efficient transfer learning for NLP. In: Chaudhuri, K., Salakhutdinov, R. (eds.) Proceedings of ICML, Proceedings of Machine Learning Research, vol. 97, pp. 2790\u20132799. PMLR (2019)"},{"key":"19_CR2","doi-asserted-by":"crossref","unstructured":"Hsu, C., et al.: Prompt-learning for cross-lingual relation extraction. In: 2023 International Joint Conference on Neural Networks (IJCNN), pp. 1\u20139. IEEE (2023)","DOI":"10.1109\/IJCNN54540.2023.10192002"},{"issue":"9","key":"19_CR3","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3560815","volume":"55","author":"P Liu","year":"2023","unstructured":"Liu, P., Yuan, W., Jinlan, F., Jiang, Z., Hayashi, H., Neubig, G.: Pre-train, prompt, and predict: a systematic survey of prompting methods in natural language processing. ACM Comput. Surv. 55(9), 1\u201335 (2023)","journal-title":"ACM Comput. Surv."},{"key":"19_CR4","doi-asserted-by":"crossref","unstructured":"Zhou, H., Wang, Z., Wang, H., Chen, D., Mu, W., Zhang, F.: Evaluating the validity of word-level adversarial attacks with large language models. In Ku, L.-W., Martins, A., Srikumar, V. (eds.) Findings of the Association for Computational Linguistics ACL 2024, pp. 4902\u20134922, Bangkok, Thailand and virtual meeting, August 2024. Association for Computational Linguistics","DOI":"10.18653\/v1\/2024.findings-acl.292"},{"key":"19_CR5","unstructured":"Yang, S., et al.: Dialectical alignment: resolving the tension of 3H and security threats of LLMS. arXiv preprint arXiv:2404.00486 (2024)"},{"key":"19_CR6","unstructured":"Chan, A., Ong, Y.S., Pung, B., Zhang, A., Fu, J.: COCON: a self-supervised approach for controlled text generation. In: Proceedings of ICLR. OpenReview.net (2021)"},{"key":"19_CR7","unstructured":"Chen, G., Yao, Y., Wong, D.F., Chao, L.S.: A two-stage prediction-aware contrastive learning framework for multi-intent NLU. arXiv preprint arXiv:2405.02925 (2024)"},{"key":"19_CR8","doi-asserted-by":"crossref","unstructured":"Ma, J., Zhan, R., Wong, D.F., Yu sheng .: Human-in-loop classical Chinese poetry generation system. In: Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: system Demonstrations, pp. 57\u201366. Association for Computational Linguistics (2023)","DOI":"10.18653\/v1\/2023.eacl-demo.8"},{"key":"19_CR9","doi-asserted-by":"crossref","unstructured":"Hu, Z., et al.: Controllable dialogue generation with disentangled multi-grained style specification and attribute consistency reward. IEEE\/ACM Trans. Audio Speech Lang. Process. 31, 188\u2013199 (2022)","DOI":"10.1109\/TASLP.2022.3221002"},{"key":"19_CR10","unstructured":"Keskar, N.S., McCann, B., Varshney, L.R., Xiong, C., Socher, R.: CTRL: a conditional transformer language model for controllable generation. ArXiv preprint, abs\/1909.05858 (2019)"},{"key":"19_CR11","unstructured":"Wu, J., Yang, S., Zhan, R., Yuan, Y., Wong, D.F., Chao, L.S.: A survey on LLM-gernerated text detection: necessity, methods, and future directions. arXiv preprint arXiv:2310.14724 (2023)"},{"key":"19_CR12","doi-asserted-by":"crossref","unstructured":"Ding, N ., et\u00a0al.: Parameter-efficient fine-tuning of large-scale pre-trained language models. Nat. Mach. Intell. 5(3), 1\u201316 (2023)","DOI":"10.1038\/s42256-023-00626-4"},{"key":"19_CR13","unstructured":"He, R., et al.: On the effectiveness of adapter-based tuning for pretrained language model adaptation. In Proceedings of ACL, pp. 2208\u20132222. Association for Computational Linguistics (2021)"},{"key":"19_CR14","doi-asserted-by":"crossref","unstructured":"Li, X.L., Liang, P.: Prefix-Tuning: optimizing continuous prompts for generation. In: Proceedings of ACL, pp. 4582\u20134597. Association for Computational Linguistics (2021)","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"19_CR15","doi-asserted-by":"crossref","unstructured":"Lester, B., Al-Rfou, R., Constant, N.: The power of scale for parameter-efficient prompt tuning. In: Proceedings of EMNLP, pp. 3045\u20133059. Association for Computational Linguistics (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"19_CR16","doi-asserted-by":"crossref","unstructured":"Liu, X., et al.: P-Tuning: prompt tuning can be comparable to fine-tuning across scales and tasks. In: Proceedings of ACL, pp. 61\u201368. Association for Computational Linguistics (2022)","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"19_CR17","unstructured":"Edward, J.H., et al.: Low-rank adaptation of large language models. In Proceedings of ICLR. OpenReview.net, Lora (2022)"},{"key":"19_CR18","unstructured":"Zhang, H., Song, H., Li, S., Zhou, M., Song, D.: A survey of controllable text generation using transformer-based pre-trained language models. ArXiv preprint, abs\/2201.05337 (2022)"},{"key":"19_CR19","doi-asserted-by":"crossref","unstructured":"He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 9726\u20139735. IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"19_CR20","unstructured":"Hjelm, R.D., et al.: Learning deep representations by mutual information estimation and maximization. In Proceedings of ICLR. OpenReview.net (2019)"},{"key":"19_CR21","doi-asserted-by":"crossref","unstructured":"Zhipeng, G., et al.: A human-machine collaborative Chinese classical poetry generation system. In: Proceedings of ACL, pp. 25\u201330. Association for Computational Linguistics (2019)","DOI":"10.18653\/v1\/P19-3005"},{"key":"19_CR22","doi-asserted-by":"crossref","unstructured":"Novikova, J., Du\u0161ek, O., Rieser, V.: The E2E dataset: new challenges for end-to-end generation. In: Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, pp. 201\u2013206. Association for Computational Linguistics (2017)","DOI":"10.18653\/v1\/W17-5525"},{"issue":"8","key":"19_CR23","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Jeffrey, W., Child, R., Luan, D., Amodei, D., Sutskever, I., et al.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019)","journal-title":"OpenAI blog"},{"key":"19_CR24","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res. 21, 140:1\u2013140:67 (2020)"},{"key":"19_CR25","doi-asserted-by":"crossref","unstructured":"Lewis, M., et al.: BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In: Proceeding of ACL, pp. 7871\u20137880. Association for Computational Linguistics (2020)","DOI":"10.18653\/v1\/2020.acl-main.703"}],"container-title":["Lecture Notes in Computer Science","Natural Language Processing and Chinese Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-97-9440-9_19","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T17:11:26Z","timestamp":1730394686000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-97-9440-9_19"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,1]]},"ISBN":["9789819794393","9789819794409"],"references-count":25,"URL":"https:\/\/doi.org\/10.1007\/978-981-97-9440-9_19","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,11,1]]},"assertion":[{"value":"1 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"NLPCC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"CCF International Conference on Natural Language Processing and Chinese Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Hangzhou","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 November 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 November 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"13","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"nlpcc2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/tcci.ccf.org.cn\/conference\/2024\/index.php","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}