{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,5]],"date-time":"2025-10-05T04:26:37Z","timestamp":1759638397277,"version":"3.28.0"},"reference-count":11,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2017,10]]},"DOI":"10.1109\/smc.2017.8122877","type":"proceedings-article","created":{"date-parts":[[2017,11,30]],"date-time":"2017-11-30T22:22:47Z","timestamp":1512080567000},"page":"1799-1804","source":"Crossref","is-referenced-by-count":18,"title":["Effective lazy training method for deep q-network in obstacle avoidance and path planning"],"prefix":"10.1109","author":[{"given":"Juan","family":"Wu","sequence":"first","affiliation":[]},{"given":"Seabyuk","family":"Shin","sequence":"additional","affiliation":[]},{"given":"Cheong-Gil","family":"Kim","sequence":"additional","affiliation":[]},{"given":"Shin-Dug","family":"Kim","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ISCID.2016.2054"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1631\/FITEE.1601650"},{"key":"ref10","first-page":"34","article-title":"Obstacle Avoidance Path Planning for UAV Using Reinforcement Learning Under Simulated Environment","author":"kim","year":"2017","journal-title":"IASER 3rd International Conference on Electronics Electrical Engineering Computer Science Okinawa"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2016.7803357"},{"journal-title":"TENSOR","year":"0","key":"ref11"},{"key":"ref5","first-page":"2863","article-title":"Action-conditional video prediction using deep networks in atari games","author":"oh","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref8","article-title":"Deep Reinforcement Learning for Robotic Manipulation with Asynchronous Off-Policy Updates","author":"gu","year":"2016","journal-title":"arXiv preprint arXiv 1610 01292"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/RCAR.2016.7784001"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref9","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013","journal-title":"arXiv preprint arXiv 1312 5602"},{"key":"ref1","volume":"1","author":"sutton","year":"1998","journal-title":"Reinforcement Learning An Introduction"}],"event":{"name":"2017 IEEE International Conference on Systems, Man and Cybernetics (SMC)","start":{"date-parts":[[2017,10,5]]},"location":"Banff, AB","end":{"date-parts":[[2017,10,8]]}},"container-title":["2017 IEEE International Conference on Systems, Man, and Cybernetics (SMC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8114675\/8122565\/08122877.pdf?arnumber=8122877","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2018,1,17]],"date-time":"2018-01-17T23:14:08Z","timestamp":1516230848000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/8122877\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017,10]]},"references-count":11,"URL":"https:\/\/doi.org\/10.1109\/smc.2017.8122877","relation":{},"subject":[],"published":{"date-parts":[[2017,10]]}}}