{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,15]],"date-time":"2025-10-15T00:30:57Z","timestamp":1760488257239,"version":"build-2065373602"},"publisher-location":"New York, NY, USA","reference-count":12,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,11,28]]},"DOI":"10.1145\/3732437.3732759","type":"proceedings-article","created":{"date-parts":[[2025,10,14]],"date-time":"2025-10-14T10:33:54Z","timestamp":1760438034000},"page":"164-168","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Research on the Human-Style Generation Mechanism of Large Language Models Based on Prompt Tuning \u2014 Optimization and Evaluation of AI Detectors under the SICO Method"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-8971-8257","authenticated-orcid":false,"given":"Jiawei","family":"Ding","sequence":"first","affiliation":[{"name":"School of Information, Shanghai Polytechnic University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-8200-1849","authenticated-orcid":false,"given":"Lingyue","family":"Pan","sequence":"additional","affiliation":[{"name":"Zhuluoshou Technology Co., Ltd., Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-8379-7446","authenticated-orcid":false,"given":"Xinrong","family":"Zhu","sequence":"additional","affiliation":[{"name":"School of Information, Sanda University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-5115-4745","authenticated-orcid":false,"given":"Ying","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Information, Sanda University, Shanghai, China"}]}],"member":"320","published-online":{"date-parts":[[2025,10,14]]},"reference":[{"key":"e_1_3_3_1_2_2","unstructured":"Tom\u00a0B Brown. 2020. Language models are few-shot learners. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2005.14165 (2020)."},{"key":"e_1_3_3_1_3_2","unstructured":"Geoffrey Hinton. 2015. Distilling the Knowledge in a Neural Network. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1503.02531 (2015)."},{"key":"e_1_3_3_1_4_2","unstructured":"Xiaomeng Hu and et al.2023. Radar: Robust ai-text detection via adversarial learning. Advances in Neural Information Processing Systems 36 (2023) 15077\u201315095."},{"key":"e_1_3_3_1_5_2","unstructured":"Cameron\u00a0R Jones. 2024. People cannot distinguish GPT-4 from a human in a Turing test. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2405.08007 (2024)."},{"key":"e_1_3_3_1_6_2","doi-asserted-by":"crossref","unstructured":"Brian Lester. 2021. The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2104.08691 (2021).","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"e_1_3_3_1_7_2","unstructured":"Xiang\u00a0Lisa Li. 2021. Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2101.00190 (2021)."},{"key":"e_1_3_3_1_8_2","unstructured":"Ning Lu and et al.2023. Large language models can be guided to evade ai-generated text detection. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2305.10847 (2023)."},{"key":"e_1_3_3_1_9_2","unstructured":"Irene Solaiman and et al.2019. Release strategies and the social impacts of language models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1908.09203 (2019)."},{"key":"e_1_3_3_1_10_2","unstructured":"Hugo Touvron and et al.2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2302.13971 (2023)."},{"key":"e_1_3_3_1_11_2","unstructured":"Jason Wei and et al.2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems 35 (2022) 24824\u201324837."},{"key":"e_1_3_3_1_12_2","unstructured":"Junjie Ye and et al.2023. A comprehensive capability analysis of gpt-3 and gpt-3.5 series models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2303.10420 (2023)."},{"key":"e_1_3_3_1_13_2","unstructured":"Rowan Zellers and et al.2019. Defending against neural fake news. Advances in neural information processing systems 32 (2019)."}],"event":{"name":"ICEA 2024: The 2024 International Conference on Intelligent Computing and its Emerging Applicaton","location":"Tokyo Japan","acronym":"ICEA 2024"},"container-title":["Proceedings of the 2024 International Conference on Intelligent Computing and its Emerging Applicaton"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3732437.3732759","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,14]],"date-time":"2025-10-14T10:33:58Z","timestamp":1760438038000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3732437.3732759"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,28]]},"references-count":12,"alternative-id":["10.1145\/3732437.3732759","10.1145\/3732437"],"URL":"https:\/\/doi.org\/10.1145\/3732437.3732759","relation":{},"subject":[],"published":{"date-parts":[[2024,11,28]]},"assertion":[{"value":"2025-10-14","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}