{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T13:11:31Z","timestamp":1774703491192,"version":"3.50.1"},"reference-count":34,"publisher":"IBM","issue":"4\/5","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IBM J. Res. &amp; Dev."],"published-print":{"date-parts":[[2019,7,1]]},"DOI":"10.1147\/jrd.2019.2940428","type":"journal-article","created":{"date-parts":[[2019,9,11]],"date-time":"2019-09-11T02:13:13Z","timestamp":1568167993000},"page":"2:1-2:9","source":"Crossref","is-referenced-by-count":34,"title":["Teaching AI agents ethical values using reinforcement learning and policy orchestration"],"prefix":"10.1147","volume":"63","author":[{"given":"R.","family":"Noothigattu","sequence":"first","affiliation":[]},{"given":"D.","family":"Bouneffouf","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3569-4335","authenticated-orcid":false,"given":"N.","family":"Mattei","sequence":"additional","affiliation":[]},{"given":"R.","family":"Chandra","sequence":"additional","affiliation":[]},{"given":"P.","family":"Madan","sequence":"additional","affiliation":[]},{"given":"K. R.","family":"Varshney","sequence":"additional","affiliation":[]},{"given":"M.","family":"Campbell","sequence":"additional","affiliation":[]},{"given":"M.","family":"Singh","sequence":"additional","affiliation":[]},{"given":"F.","family":"Rossi","sequence":"additional","affiliation":[]}],"member":"3082","reference":[{"key":"ref33","first-page":"127","article-title":"Thompson sampling for contextual bandits with linear payoffs","author":"agrawal","year":"2013","journal-title":"Proc 30th Int Conf Int Conf Mach Learn"},{"key":"ref32","first-page":"817","article-title":"The epoch-greedy algorithm for contextual multi-armed bandits","author":"langford","year":"2008","journal-title":"Proc 21st Int Conf Neural Inf Process Syst"},{"key":"ref31","author":"sutton","year":"2017","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref30","first-page":"663","article-title":"Algorithms for inverse reinforcement learning","author":"ng","year":"2000","journal-title":"Proc 17th Int Conf Mach Learn"},{"key":"ref34","first-page":"6377","article-title":"Teaching AI agents ethical values using reinforcement learning and policy orchestration (extended abstract)","author":"noothigattu","year":"2019","journal-title":"Proc Int Joint Artif Intell Conf"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aimag.v36i4.2577"},{"key":"ref11","article-title":"Why is my robot behaving like that? Designing transparency for real time inspection of autonomous robots","author":"theodorou","year":"0","journal-title":"Proc AISB Workshop Principles Robot"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/s10676-006-0004-4"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/843"},{"key":"ref15","article-title":"A survey on practical applications of multi-armed and contextual bandits","author":"bouneffouf","year":"2019"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/203"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"},{"key":"ref18","article-title":"AI safety gridworlds","author":"leike","year":"2017"},{"key":"ref19","first-page":"185","article-title":"Ethics as aesthetic: A computational creativity approach to ethical behavior","author":"ventura","year":"2018","journal-title":"Proc Int Conf Comput Creativity"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"ref4","doi-asserted-by":"crossref","DOI":"10.1201\/9781351251389-18","article-title":"Value alignment via tractable preference distance","author":"loreggia","year":"2018","journal-title":"Artificial Intelligence Safety and Security"},{"key":"ref27","first-page":"74","article-title":"Interpretable policies for dynamic product recommendations","author":"luss","year":"2016","journal-title":"Proc Conf Uncertainty Artif Intell"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3278721.3278723"},{"key":"ref6","article-title":"When bots teach themselves to cheat","author":"simonite","year":"2018","journal-title":"Wired"},{"key":"ref29","first-page":"512","author":"bertsekas","year":"1996","journal-title":"Neuro-Dynamic Programming"},{"key":"ref5","article-title":"Value alignment or misalignment&#x2014;What will keep systems accountable?","author":"arnold","year":"0","journal-title":"Proc AI Ethics Soc Papers AAAI Workshops"},{"key":"ref8","author":"wallach","year":"2008","journal-title":"Moral Machines Teaching Robots Right from Wrong"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/779"},{"key":"ref2","article-title":"Choice, ordering and morality","author":"sen","year":"1974","journal-title":"Practical Reason"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511978036"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33019785"},{"key":"ref20","first-page":"5052","article-title":"Programmatically interpretable reinforcement learning","author":"verma","year":"0","journal-title":"Proc 35th Int Conf Mach Learn"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1126\/science.aaf2654"},{"key":"ref21","article-title":"Toward interpretable deep reinforcement learning with linear model U-trees","author":"liu","year":"2018"},{"key":"ref24","article-title":"Concrete problems in AI safety","author":"amodei","year":"2016"},{"key":"ref23","first-page":"1587","article-title":"A voting-based system for ethical decision making","author":"noothigattu","year":"0","journal-title":"Proc 32nd AAAI Conf Artif Intell"},{"key":"ref26","article-title":"Reinforcement learning algorithm selection","author":"laroche","year":"2017","journal-title":"presented at the 4th Int Conf on Learning Representations"},{"key":"ref25","first-page":"1687","article-title":"A low-cost ethics shaping approach for designing reinforcement learning agents","author":"wu","year":"0","journal-title":"Proc 32nd AAAI Conf Artif Intell"}],"container-title":["IBM Journal of Research and Development"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5288520\/8895606\/08827920.pdf?arnumber=8827920","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,27]],"date-time":"2025-10-27T18:03:35Z","timestamp":1761588215000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8827920\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,7,1]]},"references-count":34,"journal-issue":{"issue":"4\/5"},"URL":"https:\/\/doi.org\/10.1147\/jrd.2019.2940428","relation":{},"ISSN":["0018-8646","0018-8646"],"issn-type":[{"value":"0018-8646","type":"print"},{"value":"0018-8646","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019,7,1]]}}}