{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T11:51:05Z","timestamp":1770378665041,"version":"3.49.0"},"reference-count":86,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100010418","name":"Information and Communications Technology Planning and Evaluation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100010418","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Korean Government","award":["RS-2024-00439762"],"award-info":[{"award-number":["RS-2024-00439762"]}]},{"DOI":"10.13039\/501100007053","name":"Korea Institute of Police Technology","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100007053","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003600","name":"Korean National Police Agency","doi-asserted-by":"publisher","award":["RS-2025-02304983"],"award-info":[{"award-number":["RS-2025-02304983"]}],"id":[{"id":"10.13039\/501100003600","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans.Inform.Forensic Secur."],"published-print":{"date-parts":[[2026]]},"DOI":"10.1109\/tifs.2025.3613882","type":"journal-article","created":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T17:38:15Z","timestamp":1759340295000},"page":"1363-1378","source":"Crossref","is-referenced-by-count":0,"title":["Amplifying Training Data Exposure Through Fine-Tuning With Pseudo-Labeled Memberships"],"prefix":"10.1109","volume":"21","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0253-1580","authenticated-orcid":false,"given":"Myunggyo","family":"Oh","sequence":"first","affiliation":[{"name":"KT Research and Development Center, Seoul, South Korea"}]},{"given":"Hong","family":"Eun Ahn","sequence":"additional","affiliation":[{"name":"AI Security and Information Security Laboratory, Yonsei University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3100-2258","authenticated-orcid":false,"given":"Leo","family":"Hyun Park","sequence":"additional","affiliation":[{"name":"AI Security and Information Security Laboratory, Yonsei University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5513-0836","authenticated-orcid":false,"given":"Taekyoung","family":"Kwon","sequence":"additional","affiliation":[{"name":"AI Security and Information Security Laboratory, Yonsei University, Seoul, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978318"},{"key":"ref2","article-title":"Better fine-tuning by reducing representational collapse","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Aghajanyan"},{"key":"ref3","article-title":"PaLM 2 technical report","volume-title":"arXiv:2305.10403","author":"Anil","year":"2023"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-emnlp.484"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN48605.2020.9207304"},{"key":"ref6","first-page":"7641","article-title":"LAMP: Extracting text from gradients with language model priors","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Dimitrov"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/icwsm.v14i1.7347"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1214\/ss\/1177011077"},{"key":"ref9","first-page":"1467","article-title":"Poisoning attacks against support vector machines","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Biggio"},{"key":"ref10","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. NIPS","author":"Brown"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/SP46214.2022.9833649"},{"key":"ref12","article-title":"Quantifying memorization across neural language models","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Carlini"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-56877-1_7"},{"key":"ref14","first-page":"2633","article-title":"Extracting training data from large language models","volume-title":"Proc. USENIX Secur. Symp.","author":"Carlini"},{"key":"ref15","article-title":"RelaxLoss: Defending membership inference attacks without losing utility","author":"Chen","year":"2022","journal-title":"arXiv:2207.05801"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3658644.3690325"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.122327"},{"key":"ref18","first-page":"4299","article-title":"Deep reinforcement learning from human preferences","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Christiano"},{"key":"ref19","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref20","article-title":"The llama 3 herd of models","author":"Dubey","year":"2024","journal-title":"arXiv:2407.21783"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0251415"},{"key":"ref22","article-title":"The pile: An 800GB dataset of diverse text for language modeling","author":"Gao","year":"2021","journal-title":"arXiv:2101.00027"},{"key":"ref23","article-title":"DeepSeek-r1: Incentivizing reasoning capability in LLMs via reinforcement learning","author":"Guo","year":"2025","journal-title":"arXiv:2501.12948"},{"key":"ref24","article-title":"Revisiting self-training for neural sequence generation","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"He"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.161"},{"key":"ref26","article-title":"The curious case of neural text degeneration","author":"Holtzman","year":"2019","journal-title":"arXiv:1904.09751"},{"key":"ref27","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Hu"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.164"},{"key":"ref29","article-title":"Measuring forgetting of memorized training examples","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Jagielski"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1259"},{"key":"ref31","volume-title":"KOGPT: Kakaobrain Korean(Hangul) Generative Pre-Trained Transformer","author":"Kim","year":"2021"},{"issue":"2","key":"ref32","first-page":"896","article-title":"Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","volume":"3","author":"Lee"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.577"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.73"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1907.11692"},{"key":"ref36","article-title":"SGDR: Stochastic gradient descent with warm restarts","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Loshchilov"},{"key":"ref37","article-title":"RobustFT: Robust supervised fine-tuning for large language models under noisy response","author":"Luo","year":"2024","journal-title":"arXiv:2412.14922"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1137\/0222058"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/2692956.2663188"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.719"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1016\/s0079-7421(08)60536-8"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.570"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.119"},{"key":"ref44","article-title":"DetectGPT: Zero-shot machine-generated text detection using probability curvature","author":"Mitchell","year":"2023","journal-title":"arXiv:2301.11305"},{"key":"ref45","article-title":"Scalable extraction of training data from (production) language models","author":"Nasr","year":"2023","journal-title":"arXiv:2311.17035"},{"key":"ref46","article-title":"GPT-4 technical report","volume-title":"arXiv:2303.08774","author":"Achiam","year":"2023"},{"key":"ref47","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","author":"Ouyang"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.5555\/3454287.3455008"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.225"},{"issue":"8","key":"ref50","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref51","first-page":"53728","article-title":"Direct preference optimization: Your language model is secretly a reward model","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Rafailov"},{"issue":"140","key":"ref52","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2019","journal-title":"J. Mach. Learn. Res."},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3406703"},{"key":"ref54","first-page":"551","article-title":"ZeRO-offload: Democratizing billion-scale model training","volume-title":"Proc. USENIX Annu. Tech. Conf. (USENIX ATC)","author":"Ren"},{"key":"ref55","article-title":"Code llama: Open foundation models for code","author":"Rozi\u00e9re","year":"2023","journal-title":"arXiv:2308.12950"},{"key":"ref56","article-title":"Can AI-generated text be reliably detected?","author":"Sankar Sadasivan","year":"2023","journal-title":"arXiv:2303.11156"},{"key":"ref57","article-title":"Language models are greedy reasoners: A systematic formal analysis of chain-of-thought","author":"Saparov","year":"2022","journal-title":"arXiv:2210.01240"},{"key":"ref58","article-title":"Bloom: A 176B-parameter open-access multilingual language model","author":"Workshop","year":"2022","journal-title":"arXiv:2211.05100"},{"key":"ref59","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref60","article-title":"BadGPT: Exploring security vulnerabilities of ChatGPT via backdoor attacks to InstructGPT","author":"Shi","year":"2023","journal-title":"arXiv:2304.12298"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.41"},{"issue":"2","key":"ref62","first-page":"2615","article-title":"Systematic evaluation of privacy risks of machine learning models","volume-title":"Proc. USENIX Secur. Symp.","volume":"1","author":"Song"},{"key":"ref63","first-page":"3008","article-title":"Learning to summarize with human feedback","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Stiennon"},{"key":"ref64","article-title":"NSML: A machine learning platform that enables you to focus on your models","author":"Sung","year":"2017","journal-title":"arXiv:1712.05902"},{"key":"ref65","first-page":"1433","article-title":"Mitigating membership inference attacks by self-distillation through a novel ensemble architecture","volume-title":"Proc. 31st USENIX Secur. Symp. (USENIX Secur.)","author":"Tang"},{"key":"ref66","first-page":"38274","article-title":"Memorization without overfitting: Analyzing the training dynamics of large language models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Tirumala"},{"key":"ref67","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv:2302.13971"},{"key":"ref68","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023","journal-title":"arXiv:2307.09288"},{"key":"ref69","article-title":"A simple method for commonsense reasoning","author":"Trinh","year":"2018","journal-title":"arXiv:1806.02847"},{"key":"ref70","volume-title":"Does Gpt-2 Know Your Phone Number?","author":"Wallace","year":"2020"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W19-2304"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.52202\/079017-3819"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE48619.2023.00054"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.41"},{"key":"ref76","article-title":"DeepSpeed-chat: Easy, fast and affordable RLHF training of chatGPT-like models at all scales","author":"Yao","year":"2023","journal-title":"arXiv:2308.01320"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1145\/3548606.3560675"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/CSF.2018.00027"},{"key":"ref79","article-title":"RRHF: Rank responses to align language models with human feedback without tears","author":"Yuan","year":"2023","journal-title":"arXiv:2304.05302"},{"key":"ref80","first-page":"9051","article-title":"Defending against neural fake news","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Zellers"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00381"},{"key":"ref82","article-title":"OPT: Open pre-trained transformer language models","author":"Zhang","year":"2022","journal-title":"arXiv:2205.01068"},{"key":"ref83","article-title":"SLiC-HF: Sequence likelihood calibration with human feedback","author":"Zhao","year":"2023","journal-title":"arXiv:2305.10425"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1145\/3209978.3210080"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.11"},{"key":"ref86","article-title":"Universal and transferable adversarial attacks on aligned language models","author":"Zou","year":"2023","journal-title":"arXiv:2307.15043"}],"container-title":["IEEE Transactions on Information Forensics and Security"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10206\/11313711\/11185134.pdf?arnumber=11185134","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,5]],"date-time":"2026-02-05T20:39:20Z","timestamp":1770323960000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11185134\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"references-count":86,"URL":"https:\/\/doi.org\/10.1109\/tifs.2025.3613882","relation":{},"ISSN":["1556-6013","1556-6021"],"issn-type":[{"value":"1556-6013","type":"print"},{"value":"1556-6021","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]}}}