{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,24]],"date-time":"2026-01-24T13:55:56Z","timestamp":1769262956875,"version":"3.49.0"},"reference-count":23,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Science and Technology Major Project of China","award":["2021ZD0113100"],"award-info":[{"award-number":["2021ZD0113100"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62293510"],"award-info":[{"award-number":["62293510"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Science and Technology Innovation Program of Hunan Province","award":["2025RC3070"],"award-info":[{"award-number":["2025RC3070"]}]},{"name":"Major Project of Yuelushan Industrial Innovation Center","award":["2023YCII0102"],"award-info":[{"award-number":["2023YCII0102"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1109\/lra.2026.3653369","type":"journal-article","created":{"date-parts":[[2026,1,12]],"date-time":"2026-01-12T23:58:44Z","timestamp":1768262324000},"page":"2618-2625","source":"Crossref","is-referenced-by-count":0,"title":["Efficient Robotic 3D Measurement Through Multi-DoF Reinforcement Learning for Continuous Viewpoint Planning"],"prefix":"10.1109","volume":"11","author":[{"given":"Jun","family":"Ye","sequence":"first","affiliation":[{"name":"National Engineering Research Center for Robot Visual Perception and Control Technology, School of Artificial Intelligence and Robotics, Hunan University, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9797-2081","authenticated-orcid":false,"given":"Qiu","family":"Fang","sequence":"additional","affiliation":[{"name":"National Engineering Research Center for Robot Visual Perception and Control Technology, School of Artificial Intelligence and Robotics, Hunan University, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5117-6095","authenticated-orcid":false,"given":"Shi","family":"Wang","sequence":"additional","affiliation":[{"name":"National Engineering Research Center for Robot Visual Perception and Control Technology, School of Artificial Intelligence and Robotics, Hunan University, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-2659-8628","authenticated-orcid":false,"given":"Changqing","family":"Gao","sequence":"additional","affiliation":[{"name":"National Engineering Research Center for Robot Visual Perception and Control Technology, School of Artificial Intelligence and Robotics, Hunan University, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5594-5146","authenticated-orcid":false,"given":"Weixing","family":"Peng","sequence":"additional","affiliation":[{"name":"National Engineering Research Center for Robot Visual Perception and Control Technology, School of Artificial Intelligence and Robotics, Hunan University, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0519-6458","authenticated-orcid":false,"given":"Yaonan","family":"Wang","sequence":"additional","affiliation":[{"name":"National Engineering Research Center for Robot Visual Perception and Control Technology, School of Artificial Intelligence and Robotics, Hunan University, Changsha, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2017.2655144"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-66823-5_33"},{"key":"ref3","first-page":"20731","article-title":"SCONE: Surface coverage optimization in unknown environments by volumetric integration","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Gudon","year":"2022"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00097"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01555"},{"key":"ref6","first-page":"5558","article-title":"GLEAM: Learning generalizable exploration policy for active mapping in complex 3D indoor scenes","volume-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis.","author":"Chen","year":"2025"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_27"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2020.2987286"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487527"},{"key":"ref10","article-title":"Boundary exploration of next best view policy in 3D robotic scanning","author":"Li","year":"2024"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.aei.2022.101849"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1134\/S1064226920120141"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.patrec.2020.02.024"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/3DIM.2007.41"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/s10514-017-9634-0"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2024.3507993"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.3390\/app15147757"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9340916"},{"key":"ref19","first-page":"21577","article-title":"ReLU to the rescue: Improve your on-policy actor-critic with positive advantages","volume-title":"Proc. 41st Int. Conf. Mach. Learn.","volume":"235","author":"Jesson","year":"2024"},{"key":"ref20","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. 33rd Int. Conf. Mach. Learn.","volume":"48","author":"Mnih","year":"2016"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2025.3541180"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/s11554-013-0386-6"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.5772\/58759"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7083369\/11359420\/11347509.pdf?arnumber=11347509","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,23]],"date-time":"2026-01-23T21:23:06Z","timestamp":1769203386000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11347509\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3]]},"references-count":23,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/lra.2026.3653369","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3]]}}}