{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,6]],"date-time":"2026-01-06T05:39:09Z","timestamp":1767677949389,"version":"3.48.0"},"reference-count":45,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"Center for Applied Research in Artificial Intelligence (CARAI) grant funded by Defense Acquisition Program Administration (DAPA) and Agency for Defense Development","award":["UD230017TD"],"award-info":[{"award-number":["UD230017TD"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2026]]},"DOI":"10.1109\/access.2025.3645778","type":"journal-article","created":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T18:34:32Z","timestamp":1766082872000},"page":"113-130","source":"Crossref","is-referenced-by-count":0,"title":["Dual Preference Learning for Multi-Agent Reinforcement Learning"],"prefix":"10.1109","volume":"14","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1163-037X","authenticated-orcid":false,"given":"Sehyeok","family":"Kang","sequence":"first","affiliation":[{"name":"Kim Jaechul Graduate School of AI, Korea Advanced Institute of Science and Technology (KAIST), Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2744-2311","authenticated-orcid":false,"given":"Minu","family":"Kim","sequence":"additional","affiliation":[{"name":"Kim Jaechul Graduate School of AI, Korea Advanced Institute of Science and Technology (KAIST), Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3989-6708","authenticated-orcid":false,"given":"Jihwan","family":"Oh","sequence":"additional","affiliation":[{"name":"Kim Jaechul Graduate School of AI, Korea Advanced Institute of Science and Technology (KAIST), Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6675-5113","authenticated-orcid":false,"given":"Se-Young","family":"Yun","sequence":"additional","affiliation":[{"name":"Kim Jaechul Graduate School of AI, Korea Advanced Institute of Science and Technology (KAIST), Seoul, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i5.25733"},{"key":"ref2","first-page":"4299","article-title":"Deep reinforcement learning from human preferences","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Christiano"},{"key":"ref3","article-title":"Eureka: Human-level reward design via coding large language models","author":"Ma","year":"2023","journal-title":"arXiv:2310.12931"},{"key":"ref4","article-title":"Vision-language models are zero-shot reward models for reinforcement learning","author":"Rocamonde","year":"2023","journal-title":"arXiv:2310.12921"},{"key":"ref5","article-title":"Preference transformer: Modeling human preferences using transformers for RL","author":"Kim","year":"2023","journal-title":"arXiv:2303.00957"},{"key":"ref6","article-title":"PEBBLE: Feedback-efficient interactive reinforcement learning via relabeling experience and unsupervised pre-training","author":"Lee","year":"2021","journal-title":"arXiv:2106.05091"},{"issue":"1","key":"ref7","first-page":"4945","article-title":"A survey of preference-based reinforcement learning methods","volume":"18","author":"Wirth","year":"2017","journal-title":"J. Mach. Learn. Res."},{"key":"ref8","article-title":"The StarCraft multi-agent challenge","author":"Samvelyan","year":"2019","journal-title":"arXiv:1902.04043"},{"key":"ref9","first-page":"37567","article-title":"SMACv2: An improved benchmark for cooperative multi-agent reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Ellis"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5878"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-28929-8"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-41188-6_3"},{"key":"ref13","article-title":"Hierarchical deep multiagent reinforcement learning with temporal abstraction","author":"Tang","year":"2018","journal-title":"arXiv:1809.09332"},{"key":"ref14","first-page":"10041","article-title":"MASER: Multi-agent reinforcement learning with subgoals generated from experience replay buffer","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jeon"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-021-09996-w"},{"key":"ref16","first-page":"3040","article-title":"Social influence as intrinsic motivation for multi-agent deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jaques"},{"key":"ref17","article-title":"PMIC: Improving multi-agent reinforcement learning with progressive mutual information collaboration","author":"Li","year":"2022","journal-title":"arXiv:2203.08553"},{"key":"ref18","article-title":"Text2Reward: Reward shaping with language models for reinforcement learning","author":"Xie","year":"2023","journal-title":"arXiv:2309.11489"},{"key":"ref19","article-title":"A large language model-driven reward design framework via dynamic feedback for reinforcement learning","author":"Sun","year":"2024","journal-title":"arXiv:2410.14660"},{"key":"ref20","article-title":"Choices are more important than efforts: LLM enables efficient multi-agent exploration","author":"Qu","year":"2024","journal-title":"arXiv:2410.02511"},{"key":"ref21","first-page":"8011","article-title":"Reward learning from human preferences and demonstrations in atari","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Ibarz"},{"key":"ref22","article-title":"B-pref: Benchmarking preference-based reinforcement learning","author":"Lee","year":"2021","journal-title":"arXiv:2111.03026"},{"key":"ref23","article-title":"Constitutional AI: Harmlessness from AI feedback","author":"Bai","year":"2022","journal-title":"arXiv:2212.08073"},{"key":"ref24","article-title":"RLAIF vs. RLHF: Scaling reinforcement learning from human feedback with AI feedback","author":"Lee","year":"2023","journal-title":"arXiv:2309.00267"},{"key":"ref25","article-title":"Motif: Intrinsic motivation from artificial intelligence feedback","author":"Klissarov","year":"2023","journal-title":"arXiv:2310.00166"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.2307\/2334029"},{"key":"ref27","article-title":"O-MAPL: Offline multi-agent preference learning","author":"Bui","year":"2025","journal-title":"arXiv:2501.18944"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i15.29666"},{"key":"ref29","article-title":"Multiagent reinforcement learning from human feedback: Data coverage and algorithmic techniques","author":"Zhang","year":"2024","journal-title":"arXiv:2409.00717"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.2307\/2332226"},{"key":"ref31","article-title":"GPT-4 technical report","volume-title":"arXiv:2303.08774","author":"Achiam","year":"2023"},{"key":"ref32","article-title":"DeepSeek-r1: Incentivizing reasoning capability in LLMs via reinforcement learning","author":"Guo","year":"2025","journal-title":"arXiv:2501.12948"},{"key":"ref33","article-title":"Individual contributions as intrinsic exploration scaffolds for multi-agent reinforcement learning","author":"Li","year":"2024","journal-title":"arXiv:2405.18110"},{"key":"ref34","article-title":"Preference-based multi-agent reinforcement learning: Data coverage and algorithmic techniques","author":"Zhang","year":"2024","journal-title":"arXiv:2409.00717"},{"key":"ref35","article-title":"QMIX: Monotonic value function factorisation for deep multi-agent reinforcement learning","author":"Rashid","year":"2018","journal-title":"arXiv:1803.11485"},{"article-title":"DPM: Dual preferences-based multi-agent reinforcement learning","volume-title":"Proc. ICML Workshop Models Human FeedBack AI Alignment","author":"Kang","key":"ref36"},{"key":"ref37","article-title":"Rethinking the implementation tricks and monotonicity constraint in cooperative multi-agent reinforcement learning","author":"Hu","year":"2021","journal-title":"arXiv:2102.03479"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i12.29196"},{"key":"ref39","article-title":"Value-decomposition networks for cooperative multi-agent learning","author":"Sunehag","year":"2017","journal-title":"arXiv:1706.05296"},{"key":"ref40","article-title":"QPLEX: Duplex dueling multi-agent Q-learning","author":"Wang","year":"2020","journal-title":"arXiv:2008.01062"},{"key":"ref41","first-page":"16509","article-title":"Multi-agent reinforcement learning is a sequence modeling problem","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Wen"},{"key":"ref42","article-title":"JaxMARL: Multi-agent RL environments and algorithms in JAX","author":"Rutherford","year":"2023","journal-title":"arXiv:2311.10090"},{"key":"ref43","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref44","article-title":"Gemini: A family of highly capable multimodal models","author":"Anil","year":"2023","journal-title":"arXiv:2312.11805"},{"key":"ref45","article-title":"SMAC-hard: Enabling mixed opponent strategy script and self-play on SMAC","author":"Deng","year":"2024","journal-title":"arXiv:2412.17707"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/11323511\/11303653.pdf?arnumber=11303653","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,6]],"date-time":"2026-01-06T05:34:11Z","timestamp":1767677651000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11303653\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"references-count":45,"URL":"https:\/\/doi.org\/10.1109\/access.2025.3645778","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2026]]}}}