{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T05:33:15Z","timestamp":1730266395453,"version":"3.28.0"},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,7,18]]},"DOI":"10.1109\/ijcnn55064.2022.9889793","type":"proceedings-article","created":{"date-parts":[[2022,9,30]],"date-time":"2022-09-30T19:56:04Z","timestamp":1664567764000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Planning Large-scale Object Rearrangement Using Deep Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Sourav","family":"Ghosh","sequence":"first","affiliation":[{"name":"Robotics and Autonomous Systems, TCS Research,India"}]},{"given":"Dipanjan","family":"Das","sequence":"additional","affiliation":[{"name":"Robotics and Autonomous Systems, TCS Research,India"}]},{"given":"Abhishek","family":"Chakraborty","sequence":"additional","affiliation":[{"name":"Robotics and Autonomous Systems, TCS Research,India"}]},{"given":"Marichi","family":"Agarwal","sequence":"additional","affiliation":[{"name":"Robotics and Autonomous Systems, TCS Research,India"}]},{"given":"Brojeshwar","family":"Bhowmick","sequence":"additional","affiliation":[{"name":"Robotics and Autonomous Systems, TCS Research,India"}]}],"member":"263","reference":[{"key":"ref30","article-title":"Parametrized deep q-networks learning: Reinforcement learning with discrete-continuous hybrid action space","author":"xiong","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1177\/0278364918780999"},{"key":"ref11","article-title":"Deep reinforcement learning in parameterized action space","author":"hausknecht","year":"2015","journal-title":"ArXiv Preprint"},{"key":"ref12","article-title":"-Hierarchical task and motion plan-ning inthe now","author":"kaelbling","year":"2010","journal-title":"IEEE International Conference on Robotics and Automation An- chorage Alaska Workshop on Mobile Manipulation"},{"key":"ref13","article-title":"Qt-opt: Scalable deep reinforcement learning for vision-based robotic manipulation","author":"kalashnikov","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref14","article-title":"Ai2-thor: An interactive 3d environment for visual ai","author":"kolve","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref15","article-title":"Attention, learn to solve routing problems!","author":"kool","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2015.XI.045"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487581"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2980984"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511546877"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561716"},{"key":"ref4","article-title":"Combining reinforcement learning and constraint programming for combinatorial optimization","author":"cappart","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3414685.3417788"},{"key":"ref3","article-title":"Multi-pass q-networks for deep reinforcement learning with parameterised action spaces","author":"bester","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref6","article-title":"Learning combinatorial optimization algorithms over graphs","author":"dai","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00586"},{"key":"ref5","article-title":"Reinforcement learning for classical planning: Viewing heuristics as dense reward generators","author":"chitnis","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref8","article-title":"On minimizing the num-ber of running buffers for tabletop rearrangement","author":"gao","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref7","first-page":"170","article-title":"Learning heuristics for the tsp by policy gradient","author":"deudon","year":"2018","journal-title":"International Conference on the Integration of Constraint Programming Artificial Intelligence and Operations Research"},{"key":"ref2","article-title":"Neural combinatorial optimization with reinforcement learning","author":"bello","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2017.XIII.051"},{"key":"ref1","article-title":"Rearrangement: A challenge for embodied ai","author":"batra","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref20","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"ArXiv Preprint"},{"key":"ref22","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2016.7759083"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1177\/0278364904045471"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2021.XVII.072"},{"key":"ref26","article-title":"Logic-geometric programming: An optimization-based approach to combined task and motion planning","author":"toussaint","year":"2015","journal-title":"Twenty-Fourth International Joint Conference on Artificial Intelligence"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2014.6906922"}],"event":{"name":"2022 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2022,7,18]]},"location":"Padua, Italy","end":{"date-parts":[[2022,7,23]]}},"container-title":["2022 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9891857\/9889787\/09889793.pdf?arnumber=9889793","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,3]],"date-time":"2022-11-03T22:59:40Z","timestamp":1667516380000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9889793\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,18]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/ijcnn55064.2022.9889793","relation":{},"subject":[],"published":{"date-parts":[[2022,7,18]]}}}