{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,11]],"date-time":"2025-06-11T05:40:05Z","timestamp":1749620405980,"version":"3.41.0"},"reference-count":43,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"7","license":[{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62376031"],"award-info":[{"award-number":["62376031"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1109\/lra.2025.3575013","type":"journal-article","created":{"date-parts":[[2025,5,30]],"date-time":"2025-05-30T17:48:14Z","timestamp":1748627294000},"page":"7286-7293","source":"Crossref","is-referenced-by-count":0,"title":["M${}^{3}$Bench: Benchmarking Whole-Body <u>M<\/u>otion Generation for <u>M<\/u>obile <u>M<\/u>anipulation in 3D Scenes"],"prefix":"10.1109","volume":"10","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8929-134X","authenticated-orcid":false,"given":"Zeyu","family":"Zhang","sequence":"first","affiliation":[{"name":"State Key Laboratory of General Artificial Intelligence, Beijing Institute for General Artificial Intelligence, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-8502-1163","authenticated-orcid":false,"given":"Sixu","family":"Yan","sequence":"additional","affiliation":[{"name":"School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China"}]},{"given":"Muzhi","family":"Han","sequence":"additional","affiliation":[{"name":"Center for Vision, Cognition, Learning, and Autonomy (VCLA), Statistics Department, University of California, Los Angeles (UCLA), Los Angeles, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-7925-356X","authenticated-orcid":false,"given":"Zaijin","family":"Wang","sequence":"additional","affiliation":[{"name":"State Key Laboratory of General Artificial Intelligence, Beijing Institute for General Artificial Intelligence, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6732-7823","authenticated-orcid":false,"given":"Xinggang","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China"}]},{"given":"Song-Chun","family":"Zhu","sequence":"additional","affiliation":[{"name":"State Key Laboratory of General Artificial Intelligence, Beijing Institute for General Artificial Intelligence, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3003-8611","authenticated-orcid":false,"given":"Hangxin","family":"Liu","sequence":"additional","affiliation":[{"name":"State Key Laboratory of General Artificial Intelligence, Beijing Institute for General Artificial Intelligence, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636351"},{"key":"ref2","first-page":"1666","article-title":"O2O-afford: Annotation-free large-scale object-object affordance learning","volume-title":"Proc. Conf. Robot Learn.","author":"Mo","year":"2021"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01670-0"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561546"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IROS55552.2023.10342208"},{"key":"ref6","article-title":"Multi-skill mobile manipulation for object rearrangement","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gu","year":"2022"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160667"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3191793"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3313063"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/IROS58592.2024.10801328"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00280"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636554"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00387"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2974707"},{"key":"ref15","article-title":"Maniskill: Generalizable manipulation skill benchmark with large-scale demonstrations","volume-title":"Proc. Neural Inf. Process. Syst. (NeurIPS) Track Datasets Benchmarks","author":"Mu","year":"2021"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3133603"},{"key":"ref17","article-title":"Maniskill2: A unified benchmark for generalizable manipulation skills","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gu","year":"2022"},{"key":"ref18","first-page":"251","article-title":"Habitat 2.0: Training home assistants to rearrange their habitat","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Szot","year":"2021"},{"key":"ref19","article-title":"Closed-loop open-vocabulary mobile manipulation with gpt-4v","volume-title":"Proc. Int. Conf. Robot. Automat.","author":"Zhi","year":"2025"},{"key":"ref20","doi-asserted-by":"crossref","DOI":"10.1109\/TPAMI.2025.3553454","article-title":"M2 Diffuser: Diffusion-based trajectory optimization for mobile manipulation in 3D scenes","volume-title":"Proc. Trans. Pattern Anal. Mach. Intell.","author":"Yan","year":"2025"},{"key":"ref21","doi-asserted-by":"crossref","DOI":"10.1002\/rob.22588","article-title":"PR2: A physics-and photo-realistic testbed for embodied AI and humanoid robots","author":"Liu","year":"2025","journal-title":"J. Field Robot."},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10161569"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989545"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01075"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3180108"},{"key":"ref26","first-page":"477","article-title":"Behavior: Benchmark for everyday household activities in virtual, interactive, and ecological environments","volume-title":"Proc. Conf. Robot Learn.","author":"Srivastava","year":"2022"},{"key":"ref27","first-page":"665","article-title":"VLMbench: A compositional benchmark for vision-and-language manipulation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Zheng","year":"2022"},{"key":"ref28","first-page":"726","article-title":"Transporter networks: Rearranging the visual world for robotic manipulation","volume-title":"Proc. Conf. Robot Learn.","author":"Zeng","year":"2021"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01873"},{"key":"ref30","article-title":"Isaac Gym: High performance GPU based physics simulation for robot learning","volume-title":"Proc. Proc. Neural Inf. Process. Syst. Track Datasets Benchmarks","author":"Makoviychuk","year":"2021"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2013.IX.031"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1177\/0278364914528132"},{"key":"ref33","first-page":"967","article-title":"Motion policy networks","volume-title":"Proc. Conf. Robot Learn.","author":"Fishman","year":"2023"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/IROS55552.2023.10341424"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.36288\/roscon2018-900279"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01539"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2000.844730"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/3528223.3530087"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00996"},{"key":"ref40","first-page":"5105","article-title":"PointNet++: Deep hierarchical feature learning on point sets in a metric space","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Qi","year":"2017"},{"key":"ref41","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chen","year":"2021"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01607"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01595"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7083369\/11008675\/11018341.pdf?arnumber=11018341","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,11]],"date-time":"2025-06-11T05:18:22Z","timestamp":1749619102000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11018341\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7]]},"references-count":43,"journal-issue":{"issue":"7"},"URL":"https:\/\/doi.org\/10.1109\/lra.2025.3575013","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"type":"electronic","value":"2377-3766"},{"type":"electronic","value":"2377-3774"}],"subject":[],"published":{"date-parts":[[2025,7]]}}}