{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,25]],"date-time":"2026-04-25T11:11:18Z","timestamp":1777115478129,"version":"3.51.4"},"reference-count":60,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"7","license":[{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100004358","name":"Samsung","doi-asserted-by":"publisher","award":["IO201214-08149-01"],"award-info":[{"award-number":["IO201214-08149-01"]}],"id":[{"id":"10.13039\/100004358","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003725","name":"National Research Foundation of Korea","doi-asserted-by":"crossref","award":["2022R1A2C2004003"],"award-info":[{"award-number":["2022R1A2C2004003"]}],"id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1109\/tnnls.2024.3493113","type":"journal-article","created":{"date-parts":[[2024,12,9]],"date-time":"2024-12-09T14:01:04Z","timestamp":1733752864000},"page":"11917-11927","source":"Crossref","is-referenced-by-count":5,"title":["Generative Adversarial Soft Actor\u2013Critic"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5908-0944","authenticated-orcid":false,"given":"Hyo-Seok","family":"Hwang","sequence":"first","affiliation":[{"name":"School of Electrical Engineering, Korea University, Seoul, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6615-9116","authenticated-orcid":false,"given":"Yoojoong","family":"Kim","sequence":"additional","affiliation":[{"name":"School of Computer Science and Information Engineering, The Catholic University of Korea, Bucheon, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6475-8457","authenticated-orcid":false,"given":"Junhee","family":"Seok","sequence":"additional","affiliation":[{"name":"School of Electrical Engineering, Korea University, Seoul, Republic of Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3207346"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1177\/0278364919887447"},{"key":"ref3","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. 32nd Int. Conf. Mach. Learn., in Proceedings of Machine Learning Research","volume":"37","author":"Schulman"},{"key":"ref4","article-title":"Proximal policy optimization algorithms","volume-title":"arXiv:1707.06347","author":"Schulman","year":"2017"},{"key":"ref5","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto"},{"key":"ref6","first-page":"1","article-title":"Continuous control with deep reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Lillicrap"},{"key":"ref7","first-page":"387","article-title":"Deterministic policy gradient algorithms","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Silver"},{"key":"ref8","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mnih"},{"key":"ref9","first-page":"1","article-title":"Sample efficient actor-critic with experience replay","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Wang"},{"key":"ref10","first-page":"5279","article-title":"Scalable trust-region method for deep reinforcement learning using Kronecker-factored approximation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Wu"},{"key":"ref11","article-title":"Off-policy actor-critic","volume-title":"arXiv:1205.4839","author":"Degris","year":"2012"},{"key":"ref12","first-page":"1352","article-title":"Reinforcement learning with deep energy-based policies","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.asoc.2022.109386"},{"key":"ref14","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1812.05905"},{"key":"ref16","first-page":"1","article-title":"Maximum a posteriori policy optimisation","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Abdolmaleki"},{"key":"ref17","first-page":"1","article-title":"Maximum entropy RL (provably) solves some robust RL problems","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Eysenbach"},{"key":"ref18","volume-title":"Modeling Purposeful Adaptive Behavior With the Principle of Maximum Causal Entropy","author":"Ziebart","year":"2010"},{"key":"ref19","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","volume-title":"Proc. 23rd AAAI Conf. Artif. Intell.","volume":"8","author":"Ziebart"},{"key":"ref20","first-page":"834","article-title":"Improving stochastic policy gradients in continuous control with deep reinforcement learning using the beta distribution","volume-title":"Proc. 34th Int. Conf. Mach. Learn.","author":"Chou"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-387-45528-0"},{"key":"ref22","volume-title":"Machine Learning: A Probabilistic Perspective","author":"Murphy","year":"2012"},{"key":"ref23","article-title":"Improving exploration in soft-actor-critic with normalizing flows policies","volume-title":"arXiv:1906.02771","author":"Ward","year":"2019"},{"key":"ref24","first-page":"1","article-title":"NICE: Non-linear independent components estimation","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Dinh"},{"key":"ref25","first-page":"1","article-title":"Density estimation using real NVP","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Dinh"},{"key":"ref26","first-page":"10215","article-title":"Glow: Generative flow with invertible 1x1 convolutions","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Kingma"},{"issue":"1","key":"ref27","first-page":"2617","article-title":"Normalizing flows for probabilistic modeling and inference","volume":"22","author":"Papamakarios","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref28","article-title":"Boosting trust region policy optimization by normalizing flows policy","volume-title":"arXiv:1809.10326","author":"Tang","year":"2018"},{"key":"ref29","article-title":"A connection between generative adversarial networks, inverse reinforcement learning, and energy-based models","volume-title":"arXiv:1611.03852","author":"Finn","year":"2016"},{"key":"ref30","first-page":"10957","article-title":"Training deep energy-based models with f-divergence minimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yu"},{"key":"ref31","article-title":"Variational f-divergence minimization","volume-title":"arXiv:1907.11891","author":"Zhang","year":"2019"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.5555\/2969033.2969125"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2010.2068870"},{"key":"ref34","first-page":"271","article-title":"f-GAN: Training generative neural samplers using variational divergence minimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Nowozin"},{"key":"ref35","first-page":"2018","article-title":"Stabilizing training of generative adversarial networks through regularization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Roth"},{"key":"ref36","first-page":"214","article-title":"Wasserstein generative adversarial networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Arjovsky"},{"key":"ref37","first-page":"2513","article-title":"Fisher GAN","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Mroueh"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.304"},{"key":"ref39","article-title":"OpenAI gym","volume-title":"arXiv:1606.01540","author":"Brockman","year":"2016"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3045000"},{"key":"ref43","first-page":"16331","article-title":"Edit-GAN: High-precision semantic image editing","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Ling"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/3588432.3591500"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref46","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume-title":"Proc. NIPS","volume":"35","author":"Saharia"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3116668"},{"key":"ref49","first-page":"1","article-title":"Tackling the generative learning trilemma with denoising diffusion GANs","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Xiao"},{"key":"ref50","article-title":"Conditional generative adversarial nets","volume-title":"arXiv:1411.1784","author":"Mirza","year":"2014"},{"key":"ref51","article-title":"Learning likelihoods with conditional normalizing flows","volume-title":"arXiv:1912.00042","author":"Winkler","year":"2019"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-021-05961-4"},{"key":"ref53","first-page":"31957","article-title":"Factored adaptation for non-stationary reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Feng"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1145\/3459991"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10932"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1145\/3355089.3356515"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1145\/218380.218498"},{"key":"ref58","article-title":"Comparison of maximum likelihood and GAN-based training of real NVPs","volume-title":"arXiv:1705.05263","author":"Danihelka","year":"2017"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11829"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1007\/BF02289263"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/5962385\/11073756\/10783450.pdf?arnumber=10783450","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,7,10]],"date-time":"2025-07-10T17:48:42Z","timestamp":1752169722000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10783450\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7]]},"references-count":60,"journal-issue":{"issue":"7"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2024.3493113","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,7]]}}}