{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,4]],"date-time":"2025-09-04T14:07:07Z","timestamp":1756994827076,"version":"3.28.0"},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,10,24]]},"DOI":"10.1109\/iros45743.2020.9341705","type":"proceedings-article","created":{"date-parts":[[2021,2,13]],"date-time":"2021-02-13T02:26:48Z","timestamp":1613183208000},"page":"5179-5186","source":"Crossref","is-referenced-by-count":9,"title":["Domain-Adversarial and -Conditional State Space Model for Imitation Learning"],"prefix":"10.1109","author":[{"given":"Ryo","family":"Okumura","sequence":"first","affiliation":[{"name":"Panasonic Corporation,Core Element Technology Development Center,Japan"}]},{"given":"Masashi","family":"Okada","sequence":"additional","affiliation":[{"name":"Panasonic Corporation,AI Solutions Center, Business Innovation Division,Japan"}]},{"given":"Tadahiro","family":"Taniguchi","sequence":"additional","affiliation":[{"name":"Panasonic Corporation,AI Solutions Center, Business Innovation Division,Japan"}]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1080\/01691864.2020.1778521"},{"key":"ref38","doi-asserted-by":"crossref","DOI":"10.1109\/IROS45743.2020.9340873","article-title":"Planet of the bayesians: Reconsidering and improving deep planning network by incorporating bayesian inference","author":"okada","year":"2020"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1016\/0005-1098(89)90002-2"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.244"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2020.XVI.024"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.69"},{"key":"ref37","article-title":"Dream to control: Learning behaviors by latent imagination","author":"hafner","year":"2020","journal-title":"ICLRE"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/s10479-005-5724-z"},{"article-title":"Path integral networks: End-to-end differentiable optimal control","year":"2017","author":"okada","key":"ref35"},{"key":"ref34","article-title":"Variational inference mpc for bayesian model-based reinforcement learning","author":"okada","year":"2019","journal-title":"CoRL"},{"article-title":"Stochastic latent actor-critic: Deep reinforcement learning with a latent variable model","year":"2019","author":"lee","key":"ref10"},{"key":"ref40","article-title":"Multi-objective model-based policy search for data-efficient learning with sparse rewards","author":"kaushik","year":"2018","journal-title":"CoRL"},{"key":"ref11","article-title":"Learning belief representations for imitation learning in pomdps","author":"gangwani","year":"2019","journal-title":"UAI"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.316"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460875"},{"key":"ref14","article-title":"Multitask domain adaptation for deep learning of instance grasping from simulation","author":"fang","year":"2018","journal-title":"ICRA"},{"key":"ref15","first-page":"59:1","article-title":"Domain-adversarial training of neural networks","volume":"17","author":"ganin","year":"2015","journal-title":"J Mach Learn Res"},{"key":"ref16","article-title":"Image-to-image translation for cross-domain disentanglement","author":"gonzalez-garcia","year":"2018","journal-title":"NIPS"},{"key":"ref17","article-title":"Domain separation networks","author":"bousmalis","year":"2016","journal-title":"NIPS"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/S1364-6613(99)01327-3"},{"key":"ref19","article-title":"Generative adversarial nets","author":"goodfellow","year":"2014","journal-title":"NIPS"},{"article-title":"Learning human behaviors from motion capture by adversarial imitation","year":"2017","author":"merel","key":"ref28"},{"key":"ref4","article-title":"Learning latent dynamics for planning from pixels","author":"hafner","year":"2019","journal-title":"ICML"},{"key":"ref27","article-title":"Third-person imitation learning","author":"stadie","year":"2017","journal-title":"ICLRE"},{"key":"ref3","article-title":"Generative adversarial imitation learning","author":"ho","year":"2016","journal-title":"NIPS"},{"key":"ref6","article-title":"Semisupervised learning with deep generative models","author":"kingma","year":"2014","journal-title":"NIPS"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8462901"},{"article-title":"Deep domain confusion: Maximizing for domain invariance","year":"2014","author":"tzeng","key":"ref5"},{"key":"ref8","article-title":"Recurrent world models facilitate policy evolution","author":"ha","year":"2018","journal-title":"NIPS"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2018.07.006"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2019.XV.074"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/882"},{"key":"ref20","article-title":"Discriminator-actor-critic: Addressing sample inefficiency and reward bias in adversarial imitation learning","author":"kostrikov","year":"2018","journal-title":"ICLRE"},{"key":"ref22","article-title":"Infogail: Interpretable imitation learning from visual demonstrations","author":"li","year":"2017","journal-title":"NIPS"},{"key":"ref21","article-title":"End-to-end differentiable adversarial imitation learning","author":"baram","year":"2017","journal-title":"ICML"},{"article-title":"Deepmind control suite","year":"2018","author":"yuval","key":"ref42"},{"key":"ref24","article-title":"Graphstructured visual imitation","author":"sieb","year":"2019","journal-title":"CoRL"},{"article-title":"Reinforcement learning and control as probabilistic inference: Tutorial and review","year":"2018","author":"levine","key":"ref41"},{"key":"ref23","article-title":"Directed-info gail: Learning hierarchical policies from unsegmented demonstrations using directed information","author":"sharma","year":"2019","journal-title":"ICLRE"},{"key":"ref26","article-title":"To follow or not to follow: Selective imitation learning from observations","author":"lee","year":"2019","journal-title":"CoRL"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2016.7759557"}],"event":{"name":"2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2020,10,24]]},"location":"Las Vegas, NV, USA","end":{"date-parts":[[2021,1,24]]}},"container-title":["2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9340668\/9340635\/09341705.pdf?arnumber=9341705","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T21:57:00Z","timestamp":1656453420000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9341705\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,10,24]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/iros45743.2020.9341705","relation":{},"subject":[],"published":{"date-parts":[[2020,10,24]]}}}