{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T22:28:55Z","timestamp":1769552935432,"version":"3.49.0"},"reference-count":13,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"DOI":"10.1109\/iros.2004.1389841","type":"proceedings-article","created":{"date-parts":[[2005,4,1]],"date-time":"2005-04-01T20:16:50Z","timestamp":1112386610000},"page":"2849-2854","source":"Crossref","is-referenced-by-count":146,"title":["Stochastic policy gradient reinforcement learning on a simple 3D biped"],"prefix":"10.1109","volume":"3","author":[{"given":"R.","family":"Tedrake","sequence":"first","affiliation":[]},{"given":"T.W.","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"H.S.","family":"Seung","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"13","doi-asserted-by":"publisher","DOI":"10.1177\/02783640122067561"},{"key":"11","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2004.1302452"},{"key":"12","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"key":"3","doi-asserted-by":"publisher","DOI":"10.1016\/S0921-8890(97)00043-2"},{"key":"2","doi-asserted-by":"publisher","DOI":"10.1109\/37.257893"},{"key":"1","article-title":"Minimax differential dynamic programming: An application to robust biped walking","author":"morimoto","year":"2002","journal-title":"Neural Information Processing Systems"},{"key":"10","article-title":"Walking toy","author":"wilson","year":"1936","journal-title":"United States Patent Office Tech Rep"},{"key":"7","first-page":"278","article-title":"An analysis of actor\/critic algorithms using eligibility traces: Reinforcement learning with imperfect value functions","author":"kimura","year":"1998","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"6","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevLett.80.3658"},{"key":"5","doi-asserted-by":"publisher","DOI":"10.1177\/027836499000900206"},{"key":"4","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2004.1307456"},{"key":"9","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"1999","journal-title":"Advances in neural information processing systems"},{"key":"8","doi-asserted-by":"crossref","first-page":"319","DOI":"10.1613\/jair.806","article-title":"Infinite-horizon policy-gradient estimation","volume":"15","author":"baxter","year":"2001","journal-title":"Journal of Artificial Intelligence Research"}],"event":{"name":"2004 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS) (IEEE Cat. No.04CH37566)","location":"Sendai, Japan","acronym":"IROS-04"},"container-title":["2004 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS) (IEEE Cat. No.04CH37566)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx5\/9577\/30277\/01389841.pdf?arnumber=1389841","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,2,8]],"date-time":"2019-02-08T12:45:59Z","timestamp":1549629959000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/1389841\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[null]]},"references-count":13,"URL":"https:\/\/doi.org\/10.1109\/iros.2004.1389841","relation":{},"subject":[]}}