{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,24]],"date-time":"2025-12-24T08:50:20Z","timestamp":1766566220872,"version":"3.48.0"},"reference-count":22,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2025,10,8]],"date-time":"2025-10-08T00:00:00Z","timestamp":1759881600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,10,8]],"date-time":"2025-10-08T00:00:00Z","timestamp":1759881600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Auton Robot"],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1007\/s10514-025-10214-7","type":"journal-article","created":{"date-parts":[[2025,10,8]],"date-time":"2025-10-08T13:52:57Z","timestamp":1759931577000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Autonomous robotic manipulation for grasping a target object in cluttered environments"],"prefix":"10.1007","volume":"49","author":[{"given":"Sanraj","family":"Lachhiramka","sequence":"first","affiliation":[]},{"family":"Pradeep J","sequence":"additional","affiliation":[]},{"given":"Archanaa A.","family":"Chandaragi","sequence":"additional","affiliation":[]},{"given":"Arjun","family":"Achar","sequence":"additional","affiliation":[]},{"given":"Shikha","family":"Tripathi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,10,8]]},"reference":[{"key":"10214_CR2","doi-asserted-by":"publisher","unstructured":"Chen, C., Yan, S., Yuan, M., Tay, C., Choi, D., & Le, Q. D. (2023). A minimal collision strategy of synergy between pushing and grasping for large clusters of objects. In 2023 IEEE\/RSJ international conference on intelligent robots and systems (IROS) (pp. 6817\u20136822). https:\/\/doi.org\/10.1109\/IROS55552.2023.10341452","DOI":"10.1109\/IROS55552.2023.10341452"},{"key":"10214_CR1","doi-asserted-by":"publisher","unstructured":"Chen, Y., Ju, Z., & Yang, C. (2020). Combining reinforcement learning and rule-based method to manipulate objects in clutter. In 2020 International joint conference on neural networks (IJCNN), Glasgow, UK (pp. 1\u20136). https:\/\/doi.org\/10.1109\/IJCNN48605.2020.9207153","DOI":"10.1109\/IJCNN48605.2020.9207153"},{"key":"10214_CR3","doi-asserted-by":"publisher","first-page":"521","DOI":"10.1007\/978-3-030-95892-3_41","volume-title":"Intelligent autonomous systems 16. IAS 2021, lecture notes in networks and systems","author":"A Franceschetti","year":"2022","unstructured":"Franceschetti, A., Tosello, E., Castaman, N., & Ghidoni, S. (2022). Robotic arm control and task training through deep reinforcement learning. In M. H. Ang Jr., H. Asama, W. Lin, & S. Foong (Eds.), Intelligent autonomous systems 16. IAS 2021, lecture notes in networks and systems (Vol. 412, pp. 521\u2013531). Cham: Springer. https:\/\/doi.org\/10.1007\/978-3-030-95892-3_41"},{"key":"10214_CR4","unstructured":"Fujimoto, S., van Hoof, H., & Meger, D. (2018). Addressing function approximation error in actor-critic methods. arXiv:1802.09477"},{"key":"10214_CR5","unstructured":"Haarnoja, T., Zhou, A., Abbeel, P., & Levine, S. (2018). Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. arXiv:abs\/1801.01290"},{"key":"10214_CR6","doi-asserted-by":"publisher","unstructured":"Imtiaz, M.B., Qiao, Y., & Lee, B. (2022). Prehensile robotic pick-and-place in clutter with deep reinforcement learning. In 2022 international conference on electrical, computer and energy technologies (ICECET), Prague, Czech Republic (pp. 1\u20136). https:\/\/doi.org\/10.1109\/ICECET55527.2022.9873426","DOI":"10.1109\/ICECET55527.2022.9873426"},{"key":"10214_CR7","unstructured":"Kalashnikov, D., Irpan, A., Pastor, P., Ibarz, J., Herzog, A., Jang, E., Quillen, D., Holly, E., Kalakrishnan, M., Vanhoucke, V., & Levine, S. (2018). Scalable deep reinforcement learning for vision-based robotic manipulation. In Proceedings of the 2nd conference on robot learning, proceedings of machine learning research (vol. 87, pp. 651\u2013673). https:\/\/proceedings.mlr.press\/v87\/kalashnikov18a.html"},{"key":"10214_CR8","doi-asserted-by":"publisher","first-page":"465","DOI":"10.1007\/s10994-021-06116-1","volume":"111","author":"O Kilinc","year":"2022","unstructured":"Kilinc, O., & Montana, G. (2022). Reinforcement learning for robotic manipulation using simulated locomotion demonstrations. Machine Learning, 111, 465\u2013486. https:\/\/doi.org\/10.1007\/s10994-021-06116-1","journal-title":"Machine Learning"},{"key":"10214_CR9","doi-asserted-by":"publisher","unstructured":"Kumra, S., Joshi, S., & Sahin, F. (2020). Antipodal robotic grasping using generative residual convolutional neural network. In 2020 IEEE\/RSJ international conference on intelligent robots and systems (IROS), Las Vegas, NV, USA (pp. 9626\u20139633). https:\/\/doi.org\/10.1109\/IROS45743.2020.9340777","DOI":"10.1109\/IROS45743.2020.9340777"},{"key":"10214_CR10","unstructured":"Lillicrap, T. P., Hunt, J., & Pritzel, J.A., et al. (2015). Continuous control with deep reinforcement learning. arXiv:1509.02971"},{"key":"10214_CR11","doi-asserted-by":"publisher","first-page":"1661","DOI":"10.1007\/s12555-020-0069-6","volume":"19","author":"T Lindner","year":"2021","unstructured":"Lindner, T., Milecki, A., & Wyrwa\u0142, D. (2021). Positioning of the robotic arm using different reinforcement learning algorithms. International Journal of Control, Automation and Systems, 19, 1661\u20131676. https:\/\/doi.org\/10.1007\/s12555-020-0069-6","journal-title":"International Journal of Control, Automation and Systems"},{"key":"10214_CR12","doi-asserted-by":"publisher","unstructured":"Ren, D., Ren, X., Wang, X., Digumarti, S. T., & Shi, G. (2021). Fast-learning grasping and pre-grasping via clutter quantization and q-map masking. In 2021 IEEE\/RSJ international conference on intelligent robots and systems (IROS), Prague, Czech Republic (pp. 3611\u20133618). https:\/\/doi.org\/10.1109\/IROS51168.2021.9636165","DOI":"10.1109\/IROS51168.2021.9636165"},{"key":"10214_CR13","unstructured":"Schulman, J., Levine, S., Moritz, P., Jordan, M. I., & Abbeel, P. (2015). Trust region policy optimization. arXiv:1502.05477"},{"key":"10214_CR14","unstructured":"Schulman, J., Wolski, F., Dhariwal, P., Radford, A., & Klimov, O. (2017). Proximal policy optimization algorithms. Available: arXiv:abs\/1707.06347"},{"key":"10214_CR15","doi-asserted-by":"publisher","first-page":"483","DOI":"10.1007\/s10514-022-10034-z","volume":"46","author":"AA Shahid","year":"2022","unstructured":"Shahid, A. A., Piga, D., Braghin, F., & Roveda, L. (2022). Continuous control actions learning and adaptation for robotic manipulation through reinforcement learning. Autonomous Robots, 46, 483\u2013498. https:\/\/doi.org\/10.1007\/s10514-022-10034-z","journal-title":"Autonomous Robots"},{"key":"10214_CR16","doi-asserted-by":"publisher","unstructured":"Wang, Y., Mokhtar, K., Heemskerk, C., & Kasaei, H. (2024). Self-supervised learning for joint pushing and grasping policies in highly cluttered environments. In 2024 IEEE international conference on robotics and automation (ICRA), Yokohama, Japan (pp. 13840\u201313847). https:\/\/doi.org\/10.1109\/ICRA57147.2024.10611650","DOI":"10.1109\/ICRA57147.2024.10611650"},{"key":"10214_CR17","doi-asserted-by":"publisher","unstructured":"Xiang, Y., et al. (2023). RMBench: Benchmarking deep reinforcement learning for robotic manipulator control. In 2023 IEEE\/RSJ international conference on intelligent robots and systems (IROS), Detroit, MI, USA (pp. 1207\u20131214). https:\/\/doi.org\/10.1109\/IROS5555_2.2023.10342479","DOI":"10.1109\/IROS5555_2.2023.10342479"},{"key":"10214_CR18","doi-asserted-by":"crossref","unstructured":"Yang, S., & Wang, Q. (2022). Robotic arm motion planning with autonomous obstacle avoidance based on deep reinforcement learning. In 2022 41st chinese control conference (CCC), Hefei, China (Vol. 2022, pp. 3692\u20133697).","DOI":"10.23919\/CCC55666.2022.9902722"},{"issue":"2","key":"10214_CR19","doi-asserted-by":"publisher","first-page":"2232","DOI":"10.1109\/LRA.2020.2970622","volume":"5","author":"Y Yang","year":"2020","unstructured":"Yang, Y., Liang, H., & Choi, C. (2020). A deep learning approach to grasping the invisible. IEEE Robotics and Automation Letters, 5(2), 2232\u20132239. https:\/\/doi.org\/10.1109\/LRA.2020.2970622","journal-title":"IEEE Robotics and Automation Letters"},{"key":"10214_CR20","doi-asserted-by":"publisher","unstructured":"Zeng, A., Song, S., Welker, S., Lee, J., Rodriguez, A., & Funkhouser, T. (2018). Learning synergies between pushing and grasping with self-supervised deep reinforcement learning. In 2018 IEEE\/RSJ international conference on intelligent robots and systems (IROS), Madrid, Spain (pp. 4238\u20134245). https:\/\/doi.org\/10.1109\/IROS.2018.8593986","DOI":"10.1109\/IROS.2018.8593986"},{"key":"10214_CR21","doi-asserted-by":"publisher","unstructured":"Zhang, T., & Mo, H. (2023). Robust grasp operation in clutter for multi-objective robotic tasks using deep reinforcement learning. In 2023 42nd Chinese control conference (CCC), Tianjin, China (pp. 4751\u20134755). https:\/\/doi.org\/10.23919\/CCC58697.2023.10240314","DOI":"10.23919\/CCC58697.2023.10240314"},{"key":"10214_CR22","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-019-08302-9","author":"J Zhang","year":"2020","unstructured":"Zhang, J., Li, M., Feng, Y., & Yang, C. (2020). Robotic grasp detection based on image processing and random forest. Multimedia Tools and Applications. https:\/\/doi.org\/10.1007\/s11042-019-08302-9","journal-title":"Multimedia Tools and Applications"}],"container-title":["Autonomous Robots"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10514-025-10214-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10514-025-10214-7","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10514-025-10214-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,24]],"date-time":"2025-12-24T08:33:43Z","timestamp":1766565223000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10514-025-10214-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,8]]},"references-count":22,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2025,12]]}},"alternative-id":["10214"],"URL":"https:\/\/doi.org\/10.1007\/s10514-025-10214-7","relation":{},"ISSN":["0929-5593","1573-7527"],"issn-type":[{"type":"print","value":"0929-5593"},{"type":"electronic","value":"1573-7527"}],"subject":[],"published":{"date-parts":[[2025,10,8]]},"assertion":[{"value":"13 July 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 July 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 August 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 October 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"30"}}