{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T15:53:37Z","timestamp":1774454017519,"version":"3.50.1"},"reference-count":35,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Department of Mechanical and Mechatronics Engineering, University of Waterloo"},{"DOI":"10.13039\/100012338","name":"Alan Turing Institute","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100012338","id-type":"DOI","asserted-by":"publisher"}]},{"name":"EU H2020 Project Harmony","award":["101017008"],"award-info":[{"award-number":["101017008"]}]},{"name":"EU H2020 Project Harmony","award":["101017008"],"award-info":[{"award-number":["101017008"]}]},{"name":"EU H2020 Project Harmony","award":["101017008"],"award-info":[{"award-number":["101017008"]}]},{"name":"BrainLinks-BrainTools Center"},{"name":"Carl Zeiss Foundation ReScaLe Project"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Auton Robot"],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1007\/s10514-025-10233-4","type":"journal-article","created":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T04:46:38Z","timestamp":1772772398000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Online estimation and manipulation of articulated objects"],"prefix":"10.1007","volume":"50","author":[{"given":"Russell","family":"Buchanan","sequence":"first","affiliation":[]},{"given":"Adrian","family":"R\u00f6fer","sequence":"additional","affiliation":[]},{"given":"Jo\u00e3o","family":"Moura","sequence":"additional","affiliation":[]},{"given":"Abhinav","family":"Valada","sequence":"additional","affiliation":[]},{"given":"Sethu","family":"Vijayakumar","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,3,6]]},"reference":[{"issue":"1","key":"10233_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s12532-018-0139-4","volume":"11","author":"JAE Andersson","year":"2019","unstructured":"Andersson, J. A. E., Gillis, J., Horn, G., Rawlings, J. B., & Diehl, M. (2019). CasADi - A software framework for nonlinear optimization and optimal control. Mathematical Programming Computation, 11(1), 1\u201336.","journal-title":"Mathematical Programming Computation"},{"key":"10233_CR2","doi-asserted-by":"crossref","unstructured":"Aoyama, M. Y., Moura, J., Saito, N., & Vijayakumar, S. (2024). Few-shot learning of force-based motions from demonstration through pre-training of haptic representation. In: IEEE International Conference on Robotics and Automation (ICRA)","DOI":"10.1109\/ICRA57147.2024.10610502"},{"key":"10233_CR3","doi-asserted-by":"crossref","unstructured":"Bahl, S., Mendonca, R., Chen, L., Jain, U., & Pathak, D. (2023). Affordances from human videos as a versatile representation for robotics. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","DOI":"10.1109\/CVPR52729.2023.01324"},{"issue":"6","key":"10233_CR4","doi-asserted-by":"publisher","first-page":"1273","DOI":"10.1109\/TRO.2017.2721939","volume":"33","author":"J Bohg","year":"2017","unstructured":"Bohg, J., Hausman, K., Sankaran, B., Brock, O., Kragic, D., Schaal, S., & Sukhatme, G. (2017). Interactive Perception: Leveraging Action in Perception and Perception in Action. IEEE Transactions on Robotics, 33(6), 1273\u20131291.","journal-title":"IEEE Transactions on Robotics"},{"key":"10233_CR5","doi-asserted-by":"crossref","unstructured":"Buchanan, R., R\u00f6fer, A., Moura, J., Valada, A., & Vijayakumar, S. (2024) Online estimation of articulated objects with factor graphs using vision and proprioceptive sensing. In: IEEE International Conference on Robotics and Automation (ICRA)","DOI":"10.1109\/ICRA57147.2024.10610590"},{"key":"10233_CR6","doi-asserted-by":"publisher","unstructured":"Charles, R. Q., Su, H., Kaichun, M., Guibas, L. J. (2017). Pointnet: Deep learning on point sets for 3d classification and segmentation. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp 77\u201385, https:\/\/doi.org\/10.1109\/CVPR.2017.16","DOI":"10.1109\/CVPR.2017.16"},{"key":"10233_CR7","unstructured":"Del Aguila Ferrandis, J., Pousa De Moura, J., & Vijayakumar, S. (2024). Learning visuotactile estimation and control for non-prehensile manipulation under occlusions. In: Conference on Robot Learning (CoRL)"},{"key":"10233_CR8","doi-asserted-by":"publisher","unstructured":"Dellaert, F., & GTSAM Contributors. (2022). borglab\/gtsam. https:\/\/doi.org\/10.5281\/zenodo.5794541, https:\/\/github.com\/borglab\/gtsam","DOI":"10.5281\/zenodo.5794541"},{"key":"10233_CR9","doi-asserted-by":"crossref","unstructured":"Eisner, B., Zhang, H., & Held, D. (2022). Flowbot3d: Learning 3d articulation flow to manipulate articulated objects. In: Robotics: Science and Systems (RSS)","DOI":"10.15607\/RSS.2022.XVIII.018"},{"key":"10233_CR10","doi-asserted-by":"crossref","unstructured":"Heppert, N., Migimatsu, T., Yi, B., Chen, C., & Bohg, J. (2022). Category-independent articulated object tracking with factor graphs. In: IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp 3800\u20133807","DOI":"10.1109\/IROS47612.2022.9982029"},{"key":"10233_CR11","doi-asserted-by":"crossref","unstructured":"Jain, A., & Kemp, C. C. (2010). Pulling open doors and drawers: Coordinating an omni-directional base and a compliant arm with equilibrium point control. In: IEEE International Conference on Robotics and Automation (ICRA), pp 1807\u20131814","DOI":"10.1109\/ROBOT.2010.5509445"},{"key":"10233_CR12","doi-asserted-by":"publisher","unstructured":"Jain, A., Lioutikov, R., Chuck, C., & Niekum, S. (2021). ScrewNet: Category-Independent Articulation Model Estimation From Depth Images Using Screw Theory. In: IEEE International Conference on Robotics and Automation (ICRA), pp 13670\u201313677, https:\/\/doi.org\/10.1109\/ICRA48506.2021.9561132","DOI":"10.1109\/ICRA48506.2021.9561132"},{"key":"10233_CR13","doi-asserted-by":"crossref","unstructured":"Jiang, H., Mao, Y., Savva, M., Chang, A. X. (2022a). Opd: Single-view 3d openable part detection. In: European Conference on Computer Vision (ECCV), pp 410\u2013426","DOI":"10.1007\/978-3-031-19842-7_24"},{"key":"10233_CR14","doi-asserted-by":"crossref","unstructured":"Jiang, Z., Hsu, C. C., & Zhu, Y. (2022b). Ditto: Building Digital Twins of Articulated Objects from Interaction. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 5606\u20135616","DOI":"10.1109\/CVPR52688.2022.00553"},{"key":"10233_CR15","doi-asserted-by":"publisher","first-page":"301","DOI":"10.1007\/978-3-642-28572-1_21","volume-title":"Experimental Robotics: The 12th International Symposium on Experimental Robotics","author":"D Katz","year":"2014","unstructured":"Katz, D., Orthey, A., & Brock, O. (2014). Interactive Perception of Articulated Objects. Experimental Robotics: The 12th International Symposium on Experimental Robotics (pp. 301\u2013315). Springer Tracts in Advanced Robotics."},{"key":"10233_CR16","doi-asserted-by":"crossref","unstructured":"Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A. C., Lo, W. Y., Doll\u00e1r, P., Girshick, R. (2023). Segment anything. arXiv preprint arXiv:2304.02643","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"10233_CR17","doi-asserted-by":"crossref","unstructured":"Li, X., Wang, H., Yi, L., Guibas, L. J., Abbott, A. L., & Song, S. (2020). Category-level articulated object pose estimation. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 3703\u20133712","DOI":"10.1109\/CVPR42600.2020.00376"},{"issue":"2","key":"10233_CR18","doi-asserted-by":"publisher","first-page":"363","DOI":"10.1007\/s11263-021-01540-1","volume":"130","author":"Z Li","year":"2022","unstructured":"Li, Z., Sedlar, J., Carpentier, J., Laptev, I., Mansard, N., & Sivic, J. (2022). Estimating 3d motion and forces of human-object interactions from internet videos. International Journal of Computer Vision, 130(2), 363\u2013383.","journal-title":"International Journal of Computer Vision"},{"key":"10233_CR19","doi-asserted-by":"publisher","first-page":"741","DOI":"10.1177\/0278364919848850","volume":"41","author":"R Mart\u00edn-Mart\u00edn","year":"2022","unstructured":"Mart\u00edn-Mart\u00edn, R., & Brock, O. (2022). Coupled recursive estimation for online interactive perception of articulated objects. International Journal of Robotics Research, 41, 741\u2013777.","journal-title":"International Journal of Robotics Research"},{"key":"10233_CR20","doi-asserted-by":"crossref","unstructured":"Mittal, M., Hoeller, D., Farshidian, F., Hutter, M., & Garg, A. (2021). Articulated object interaction in unknown scenes with whole-body mobile manipulation. arXiv preprint arXiv:2103.10534","DOI":"10.1109\/IROS47612.2022.9981779"},{"key":"10233_CR21","doi-asserted-by":"crossref","unstructured":"Mo, K., Zhu, S., Chang, A. X., Yi, L., Tripathi, S., Guibas, L. J., & Su, H. (2019). Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 909\u2013918","DOI":"10.1109\/CVPR.2019.00100"},{"key":"10233_CR22","doi-asserted-by":"crossref","unstructured":"Mo, K., Guibas, L., Mukadam, M., Gupta, A., & Tulsiani, S. (2021). Where2act: From pixels to actions for articulated 3d objects. In: IEEE\/CVF International Conference on Computer Vision (ICCV), pp 6793\u20136803","DOI":"10.1109\/ICCV48922.2021.00674"},{"key":"10233_CR23","volume-title":"A Mathematical Introduction to Robotic Manipulation","author":"RM Murray","year":"1994","unstructured":"Murray, R. M., Li, Z., & Sastry, S. (1994). A Mathematical Introduction to Robotic Manipulation (1st ed.). CRC Press.","edition":"1"},{"key":"10233_CR24","doi-asserted-by":"crossref","unstructured":"Nie, N., Gadre, S. Y., Ehsani, K., & Song, S. (2023). Structure from Action: Learning Interactions for 3D Articulated Object Structure Discovery. In: IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp 1222\u20131229","DOI":"10.1109\/IROS55552.2023.10342135"},{"key":"10233_CR25","unstructured":"Radford, A., Kim, J. W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al. (2021). Learning transferable visual models from natural language supervision. In: International conference on machine learning, pp 8748\u20138763"},{"issue":"12","key":"10233_CR26","doi-asserted-by":"publisher","first-page":"7937","DOI":"10.1109\/TNNLS.2021.3086757","volume":"33","author":"RL Russell","year":"2022","unstructured":"Russell, R. L., & Reale, C. (2022). Multivariate uncertainty in deep learning. IEEE Transactions on Neural Networks and Learning Systems, 33(12), 7937\u20137943.","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"issue":"2","key":"10233_CR27","doi-asserted-by":"publisher","first-page":"3372","DOI":"10.1109\/LRA.2022.3146515","volume":"7","author":"A R\u00f6fer","year":"2022","unstructured":"R\u00f6fer, A., Bartels, G., Burgard, W., Valada, A., & Beetz, M. (2022). Kineverse: A symbolic articulation model framework for model-agnostic mobile manipulation. IEEE Robotics and Automation Letters, 7(2), 3372\u20133379. https:\/\/doi.org\/10.1109\/LRA.2022.3146515","journal-title":"IEEE Robotics and Automation Letters"},{"key":"10233_CR28","doi-asserted-by":"crossref","unstructured":"Schiavi, G., Wulkop, P., Rizzi, G., Ott, L., Siegwart, R., Chung, J. J. (2023). Learning agent-aware affordances for closed-loop interaction with articulated objects. In: IEEE International Conference on Robotics and Automation (ICRA), pp 5916\u20135922","DOI":"10.1109\/ICRA48891.2023.10160747"},{"issue":"2","key":"10233_CR29","doi-asserted-by":"publisher","first-page":"477","DOI":"10.1613\/jair.3229","volume":"41","author":"J Sturm","year":"2011","unstructured":"Sturm, J., Stachniss, C., & Burgard, W. (2011). A probabilistic framework for learning kinematic models of articulated objects. Journal of Artificial Intelligence Research, 41(2), 477\u2013526.","journal-title":"Journal of Artificial Intelligence Research"},{"key":"10233_CR30","doi-asserted-by":"publisher","unstructured":"Sundermeyer, M., Mousavian, A., Triebel, R., & Fox, D. (2021). Contact-graspnet: Efficient 6-dof grasp generation in cluttered scenes. In: 2021 IEEE International Conference on Robotics and Automation (ICRA), pp 13438\u201313444, https:\/\/doi.org\/10.1109\/ICRA48506.2021.9561877","DOI":"10.1109\/ICRA48506.2021.9561877"},{"key":"10233_CR31","doi-asserted-by":"publisher","unstructured":"Vuong, A. D., Vu, M. N., Le, H., Huang, B., Binh, H. T. T., Vo, T., Kugi, A., & Nguyen, A. (2024). Grasp-anything: Large-scale grasp dataset from foundation models. In: 2024 IEEE International Conference on Robotics and Automation (ICRA), pp 14030\u201314037, https:\/\/doi.org\/10.1109\/ICRA57147.2024.10611277","DOI":"10.1109\/ICRA57147.2024.10611277"},{"key":"10233_CR32","unstructured":"Werby, A., B\u00fcchner, M., R\u00f6fer, A., Huang, C., Burgard, W., & Valada, A. (2025). Articulated object estimation in the wild. arXiv:2509.01708"},{"issue":"2","key":"10233_CR33","doi-asserted-by":"publisher","first-page":"2447","DOI":"10.1109\/LRA.2022.3142397","volume":"7","author":"Z Xu","year":"2022","unstructured":"Xu, Z., He, Z., & Song, S. (2022). Universal manipulation policy network for articulated objects. IEEE Robotics and Automation Letters, 7(2), 2447\u20132454.","journal-title":"IEEE Robotics and Automation Letters"},{"key":"10233_CR34","doi-asserted-by":"crossref","unstructured":"Zeng, V., Lee, T.E., Liang, J., & Kroemer, O. (2021). Visual identification of articulated object parts. In: IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp 2443\u20132450","DOI":"10.1109\/IROS51168.2021.9636054"},{"key":"10233_CR35","unstructured":"Zhang, H., Eisner, B., & Held, D. (2022). Flowbot++: Learning generalized articulated objects manipulation via articulation projection. In: Conference on Robot Learning (CoRL)"}],"container-title":["Autonomous Robots"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10514-025-10233-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10514-025-10233-4","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10514-025-10233-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T15:02:21Z","timestamp":1774450941000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10514-025-10233-4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3]]},"references-count":35,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,3]]}},"alternative-id":["10233"],"URL":"https:\/\/doi.org\/10.1007\/s10514-025-10233-4","relation":{},"ISSN":["0929-5593","1573-7527"],"issn-type":[{"value":"0929-5593","type":"print"},{"value":"1573-7527","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3]]},"assertion":[{"value":"13 June 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 September 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 November 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 March 2026","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"13"}}