{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,5]],"date-time":"2025-11-05T11:19:52Z","timestamp":1762341592073,"version":"3.37.3"},"reference-count":12,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100006754","name":"Army Research Laboratory","doi-asserted-by":"publisher","award":["Cooperative Agreement Number W911NF-10-2-0022"],"award-info":[{"award-number":["Cooperative Agreement Number W911NF-10-2-0022"]}],"id":[{"id":"10.13039\/100006754","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Des. Test"],"published-print":{"date-parts":[[2022,6]]},"DOI":"10.1109\/mdat.2021.3063363","type":"journal-article","created":{"date-parts":[[2021,3,2]],"date-time":"2021-03-02T20:45:25Z","timestamp":1614717925000},"page":"37-44","source":"Crossref","is-referenced-by-count":6,"title":["A Hardware Accelerator for Language-Guided Reinforcement Learning"],"prefix":"10.1109","volume":"39","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5402-0988","authenticated-orcid":false,"given":"Aidin","family":"Shiri","sequence":"first","affiliation":[{"name":"Department of Computer Science and Electrical Engineering, University of Maryland, Baltimore, MD, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9550-7917","authenticated-orcid":false,"given":"Arnab Neelim","family":"Mazumder","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Electrical Engineering, University of Maryland, Baltimore, MD, USA"}]},{"given":"Bharat","family":"Prakash","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Electrical Engineering, University of Maryland, Baltimore, MD, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8904-4699","authenticated-orcid":false,"given":"Houman","family":"Homayoun","sequence":"additional","affiliation":[{"name":"University of California at Davis, Davis, CA, USA"}]},{"given":"Nicholas R.","family":"Waytowich","sequence":"additional","affiliation":[{"name":"U.S. Army Research Laboratory, Adelphi, MD, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5551-2124","authenticated-orcid":false,"given":"Tinoosh","family":"Mohsenin","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Electrical Engineering, University of Maryland, Baltimore, MD, USA"}]}],"member":"263","reference":[{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/331"},{"key":"ref3","first-page":"1970","article-title":"Learning behaviors from a single video demonstration using human feedback","author":"gandhi","year":"2019","journal-title":"Proc 18th Int Conf Auton Agents MultiAgent Syst"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3386263.3407652"},{"journal-title":"Guiding safe reinforcement learning policies using structured language constraints","year":"2020","author":"prakash","key":"ref6"},{"journal-title":"Minimalistic grid-world environment for openai gym","year":"2018","author":"chevalier-boisvert","key":"ref11"},{"key":"ref5","first-page":"1","article-title":"Guiding safe reinforcement learning policies using structured language constraints","author":"prakash","year":"2020","journal-title":"Proc SafeAI Workshop 34th AAAI Conf Artif Intell"},{"journal-title":"Gym-Miniworld Environment for Openai Gym","year":"2018","author":"chevalier-boisvert","key":"ref12"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3316781.3317873"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TCSI.2018.2848647"},{"key":"ref2","first-page":"4299","article-title":"Deep reinforcement learning from human preferences","author":"christiano","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TVLSI.2018.2825145"},{"key":"ref1","volume":"2","author":"sutton","year":"1998","journal-title":"Introduction to Reinforcement Learning"}],"container-title":["IEEE Design &amp; Test"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6221038\/9761171\/09367213.pdf?arnumber=9367213","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,30]],"date-time":"2022-05-30T21:37:31Z","timestamp":1653946651000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9367213\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6]]},"references-count":12,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/mdat.2021.3063363","relation":{},"ISSN":["2168-2356","2168-2364"],"issn-type":[{"type":"print","value":"2168-2356"},{"type":"electronic","value":"2168-2364"}],"subject":[],"published":{"date-parts":[[2022,6]]}}}