{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T18:09:27Z","timestamp":1764785367499,"version":"3.37.3"},"reference-count":62,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U2341229","62206108"],"award-info":[{"award-number":["U2341229","62206108"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key R&amp;D Program of the Ministry of Science and Technology, China","award":["2023YFF0905400"],"award-info":[{"award-number":["2023YFF0905400"]}]},{"name":"Natural Science Foundation of Jilin Province, China","award":["20240101373JC"],"award-info":[{"award-number":["20240101373JC"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Knowl. Data Eng."],"published-print":{"date-parts":[[2024,11]]},"DOI":"10.1109\/tkde.2024.3398208","type":"journal-article","created":{"date-parts":[[2024,5,8]],"date-time":"2024-05-08T17:39:00Z","timestamp":1715189940000},"page":"7217-7228","source":"Crossref","is-referenced-by-count":1,"title":["Transductive Reward Inference on Graph"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3192-8736","authenticated-orcid":false,"given":"Bohao","family":"Qu","sequence":"first","affiliation":[{"name":"School of Artificial Intelligence, Jilin University, Changchun, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7391-0334","authenticated-orcid":false,"given":"Xiaofeng","family":"Cao","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Jilin University, Changchun, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0974-9299","authenticated-orcid":false,"given":"Qing","family":"Guo","sequence":"additional","affiliation":[{"name":"Institute of High Performance Computing (IHPC) and Centre for Frontier AI Research (CFAR), Agency for Science, Technology and Research (A*STAR), Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2697-8093","authenticated-orcid":false,"given":"Yi","family":"Chang","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Jilin University, Changchun, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8095-4637","authenticated-orcid":false,"given":"Ivor W.","family":"Tsang","sequence":"additional","affiliation":[{"name":"Institute of High Performance Computing (IHPC) and Centre for Frontier AI Research (CFAR), Agency for Science, Technology and Research (A*STAR), Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5715-7154","authenticated-orcid":false,"given":"Chengqi","family":"Zhang","sequence":"additional","affiliation":[{"name":"Australian Artificial Intelligence Institute, University of Technology Sydney, Ultimo, NSW, Australia"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_2"},{"article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","year":"2020","author":"Levine","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3250269"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2020.xvi.076"},{"key":"ref5","first-page":"885","article-title":"RoboNet: Large-scale multi-robot learning","volume-title":"Proc. Conf. Robot Learn.","author":"Dasari","year":"2020"},{"article-title":"Bdd100 k: A diverse driving video database with scalable annotation tooling","year":"2018","author":"Yu","key":"ref6"},{"key":"ref7","first-page":"2217","article-title":"Learning from logged implicit exploration data","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Strehl","year":"2010"},{"issue":"11","key":"ref8","first-page":"3207","article-title":"Counterfactual reasoning and learning systems: The example of computational advertising","volume":"14","author":"Bottou","year":"2013","journal-title":"J. Mach. Learn. Res."},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-010-5229-0"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2020.xvi.076"},{"article-title":"Offline learning from demonstrations and unlabeled experience","year":"2020","author":"Zolna","key":"ref11"},{"article-title":"Semi-supervised reward learning for offline reinforcement learning","year":"2020","author":"Konyushkova","key":"ref12"},{"key":"ref13","first-page":"25611","article-title":"How to leverage unlabeled data in offline reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yu","year":"2022"},{"article-title":"Learning from labeled and unlabeled data with label propagation","year":"2002","author":"Zhu","key":"ref14"},{"volume-title":"Semi-Supervised Learning With Graphs","year":"2005","author":"Zhu","key":"ref15"},{"key":"ref16","first-page":"321","article-title":"Learning with local and global consistency","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Zhou","year":"2003"},{"key":"ref17","first-page":"1","article-title":"Hyperparameter learning for graph based semi-supervised learning algorithms","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Zhang","year":"2006"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143968"},{"key":"ref19","first-page":"1547","article-title":"Manifold-based similarity adaptation for label propagation","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Karasuyama","year":"2013"},{"key":"ref20","first-page":"1","article-title":"Learning to propagate labels: Transductive propagation network for few-shot learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Liu","year":"2019"},{"article-title":"Deepmind control suite","year":"2018","author":"Tassa","key":"ref21"},{"key":"ref22","first-page":"1094","article-title":"Meta-world: A benchmark and evaluation for multi-task and meta reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Yu","year":"2020"},{"article-title":"Unifying graph convolutional neural networks and label propagation","year":"2020","author":"Wang","key":"ref23"},{"key":"ref24","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto","year":"2019"},{"key":"ref25","first-page":"1","article-title":"Exponentially weighted imitation learning for batched historical data","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Wang","year":"2018"},{"key":"ref26","first-page":"18353","article-title":"Bail: Best-action imitation learning for batch deep reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chen","year":"2020"},{"key":"ref27","first-page":"1","article-title":"Keep doing what worked: Behavioral modelling priors for offline reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Siegel","year":"2020"},{"article-title":"Advantage-weighted regression: Simple and scalable off-policy reinforcement learning","year":"2019","author":"Peng","key":"ref28"},{"key":"ref29","first-page":"7768","article-title":"Critic regularized regression","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Wang","year":"2020"},{"key":"ref30","first-page":"5774","article-title":"Offline reinforcement learning with Fisher divergence critic regularization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kostrikov","year":"2021"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"},{"key":"ref32","article-title":"Algorithms for inverse reinforcement learning.","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ng","year":"2000"},{"key":"ref33","first-page":"1","article-title":"Generative adversarial imitation learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Ho","year":"2016"},{"article-title":"Perceptual reward functions","year":"2016","author":"Edwards","key":"ref34"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2019.XV.073"},{"key":"ref36","first-page":"12895","article-title":"Reward propagation using graph convolutional networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Klissarov","year":"2020"},{"key":"ref37","first-page":"390","article-title":"End-to-end differentiable adversarial imitation learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Baram","year":"2017"},{"key":"ref38","first-page":"49","article-title":"Guided cost learning: Deep inverse optimal control via policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Finn","year":"2016"},{"article-title":"Learning robust rewards with adversarial inverse reinforcement learning","year":"2017","author":"Fu","key":"ref39"},{"key":"ref40","first-page":"1","article-title":"InfoGAIL: Interpretable imitation learning from visual demonstrations","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Li","year":"2017"},{"article-title":"Learning human behaviors from motion capture by adversarial imitation","year":"2017","author":"Merel","key":"ref41"},{"article-title":"Unsupervised perceptual rewards for imitation learning","year":"2016","author":"Sermanet","key":"ref42"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2018.XIV.009"},{"key":"ref44","first-page":"247","article-title":"Task-relevant adversarial imitation learning","volume-title":"Proc. Conf. Robot Learn.","author":"Zolna","year":"2021"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/72.788640"},{"key":"ref46","first-page":"200","article-title":"Transductive inference for text classification using support vector machines","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Joachims","year":"1999"},{"key":"ref47","first-page":"46","article-title":"Transfer learning in a transductive setting","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Rohrbach","year":"2013"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2015.2408354"},{"key":"ref49","first-page":"1","article-title":"Semi-supervised learning on manifolds","author":"Belkin","year":"2002","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref50","first-page":"19","article-title":"Learning from labeled and unlabeled data using graph mincuts","volume-title":"Proc. 18th Int. Conf. Mach. Learn.","author":"Blum","year":"2001"},{"key":"ref51","first-page":"601","article-title":"Cluster kernels for semi-supervised learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chapelle","year":"2002"},{"key":"ref52","first-page":"912","article-title":"Semi-supervised learning using gaussian fields and harmonic functions","volume-title":"Proc. 20th Int. Conf. Mach. Learn.","author":"Zhu","year":"2003"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00521"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1016\/j.simpa.2020.100022"},{"key":"ref55","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja","year":"2018"},{"key":"ref56","first-page":"7248","article-title":"Rl unplugged: Benchmarks for offline reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Gulcehre","year":"2020"},{"article-title":"Distributed distributional deterministic policy gradients","year":"2018","author":"Barth-Maron","key":"ref57"},{"article-title":"Acme: A research framework for distributed reinforcement learning","year":"2020","author":"Hoffman","key":"ref58"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2020.3047405"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2018.2878247"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2011.141"},{"key":"ref62","article-title":"Statistical learning theory","volume":"1","author":"Vapnik","year":"1998"}],"container-title":["IEEE Transactions on Knowledge and Data Engineering"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/69\/10709365\/10526219.pdf?arnumber=10526219","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,10]],"date-time":"2024-10-10T11:21:36Z","timestamp":1728559296000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10526219\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11]]},"references-count":62,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/tkde.2024.3398208","relation":{},"ISSN":["1041-4347","1558-2191","2326-3865"],"issn-type":[{"type":"print","value":"1041-4347"},{"type":"electronic","value":"1558-2191"},{"type":"electronic","value":"2326-3865"}],"subject":[],"published":{"date-parts":[[2024,11]]}}}