{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T18:40:04Z","timestamp":1775068804817,"version":"3.50.1"},"reference-count":64,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,5,19]]},"DOI":"10.1109\/icra55743.2025.11128428","type":"proceedings-article","created":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T17:28:56Z","timestamp":1756834136000},"page":"14820-14827","source":"Crossref","is-referenced-by-count":3,"title":["SARO: Space-Aware Robot System for Terrain Crossing via Vision-Language Model"],"prefix":"10.1109","author":[{"given":"Shaoting","family":"Zhu","sequence":"first","affiliation":[{"name":"Tsinghua University,IIIS,Beijing,China"}]},{"given":"Derun","family":"Li","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,SEIEE,Shanghai,China"}]},{"given":"Linzhan","family":"Mou","sequence":"additional","affiliation":[{"name":"University of Pennsylvania,GRASP Lab,Philadelphia,PA,USA"}]},{"given":"Yong","family":"Liu","sequence":"additional","affiliation":[{"name":"Zhejiang University,CSE,Hangzhou,China"}]},{"given":"Ningyi","family":"Xu","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,SEIEE,Shanghai,China"}]},{"given":"Hang","family":"Zhao","sequence":"additional","affiliation":[{"name":"Tsinghua University,IIIS,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2021.XVII.011"},{"key":"ref2","first-page":"22","article-title":"Walk these ways: Tuning robot control for generalization with multiplicity of behavior","volume-title":"Conference on Robot Learning. PMLR","author":"Margolis","year":"2023"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01710"},{"key":"ref4","article-title":"Moka: Open-vocabulary robotic manipulation through mark-based visual prompting","author":"Liu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IROS58592.2024.10801352"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10611220"},{"key":"ref7","article-title":"Policy adaptation via language optimization: Decomposing tasks for few-shot imitation","author":"Myers","year":"2024","journal-title":"arXiv preprint"},{"key":"ref8","article-title":"Rekep: Spatio-temporal reasoning of relational keypoint constraints for robotic manipulation","author":"Huang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref9","article-title":"Commonsense reasoning for legged robot adaptation with vision-language models","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1177\/0278364906066768"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.1997.620037"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593885"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8594448"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abc5986"},{"key":"ref15","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv preprint"},{"issue":"240","key":"ref16","first-page":"1","article-title":"Palm: Scaling language modeling with pathways","volume":"24","author":"Chowdhery","year":"2023","journal-title":"Journal of Machine Learning Research"},{"key":"ref17","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023","journal-title":"arXiv preprint"},{"key":"ref18","first-page":"12888","article-title":"Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"International conference on machine learning. PMLR","author":"Li","year":"2022"},{"key":"ref19","author":"Liu","year":"2023","journal-title":"Visual instruction tuning"},{"key":"ref20","article-title":"Toward general-purpose robots via foundation models: A survey and meta-analysis","author":"Hu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref21","first-page":"287","article-title":"Do as i can, not as i say: Grounding language in robotic affordances","volume-title":"Conference on robot learning. PMLR","author":"Brohan","year":"2023"},{"key":"ref22","article-title":"Inner monologue: Embodied reasoning through planning with language models","author":"Huang","year":"2022","journal-title":"arXiv preprint"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10161534"},{"key":"ref24","first-page":"9118","article-title":"Language models as zero-shot planners: Extracting actionable knowledge for embodied agents","volume-title":"International Conference on Machine Learning. PMLR","author":"Huang","year":"2022"},{"key":"ref25","article-title":"Creative robot tool use with large language models","author":"Xu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref26","article-title":"Long-horizon locomotion and manipulation on a quadrupedal robot with large language models","author":"Ouyang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160591"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10161317"},{"key":"ref29","article-title":"Eureka: Human-level reward design via coding large language models","author":"Ma","year":"2023","journal-title":"ar Xiv preprint"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10161178"},{"key":"ref31","first-page":"14743","article-title":"Zero-shot reward specification via grounded natural language","volume-title":"International Conference on Machine Learning. PMLR","author":"Mahmoudieh","year":"2022"},{"key":"ref32","first-page":"23301","article-title":"Liv: Language-image representations and rewards for robotic control","volume-title":"International Conference on Machine Learning. PMLR","author":"Ma","year":"2023"},{"key":"ref33","article-title":"Open-world object manipulation using pre-trained vision-language models","author":"Stone","year":"2023","journal-title":"arXiv preprint"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02219"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/icra57147.2024.10610712"},{"key":"ref36","article-title":"Look before you leap: Unveiling the power of gpt-4v in robotic vision-language planning","author":"Hu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref37","article-title":"Drivevlm: The convergence of autonomous driving and large vision-language models","author":"Tian","year":"2024","journal-title":"arXiv preprint"},{"key":"ref38","article-title":"Say tap: Language to quadrupedal locomotion","author":"Tang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref39","article-title":"Vint: A foundation model for visual navigation","author":"Shah","year":"2023","journal-title":"ar Xiv preprint"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3610978.3641080"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/IROS58592.2024.10801816"},{"key":"ref43","article-title":"Quadrupedgpt: Towards a versatile quadruped agent in open-ended worlds","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2010.5509646"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3068908"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2023.3275384"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aau5872"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abk2822"},{"key":"ref49","first-page":"403","article-title":"Legged locomotion in challenging terrains using egocentric vision","volume-title":"Conference on robot learning. PMLR","author":"Agarwal","year":"2023"},{"key":"ref50","article-title":"Robot parkour learning","author":"Zhuang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref51","article-title":"Extreme parkour with legged robots","author":"Cheng","year":"2023","journal-title":"arXiv preprint"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2024.3459797"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811373"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1177\/02783649231224053"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3290509"},{"key":"ref56","first-page":"2226","article-title":"Day-dreamer: World models for physical robot learning","volume-title":"Conference on Robot Learning. PMLR","author":"Wu","year":"2023"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10161144"},{"key":"ref58","article-title":"Hybrid internal model: A simple and efficient learner for agile legged locomotion","author":"Long","year":"2023","journal-title":"ar Xiv preprint"},{"key":"ref59","article-title":"Learning h-infinity locomotion control","author":"Long","year":"2024","journal-title":"arXiv preprint"},{"key":"ref60","article-title":"Slr: Learning quadruped locomotion without privileged information","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref61","volume-title":"Llava-next: Improved reasoning, ocr, and world knowledge","author":"Liu","year":"2024"},{"key":"ref62","author":"Qin","year":"2019","journal-title":"A general optimization-based framework for local odometry estimation with multiple sensors"},{"key":"ref63","article-title":"NoMaD: Goal Masked Diffusion Policies for Navigation and Exploration","volume-title":"arXiv pre-print","author":"Sridhar","year":"2023"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3151396"}],"event":{"name":"2025 IEEE International Conference on Robotics and Automation (ICRA)","location":"Atlanta, GA, USA","start":{"date-parts":[[2025,5,19]]},"end":{"date-parts":[[2025,5,23]]}},"container-title":["2025 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11127273\/11127223\/11128428.pdf?arnumber=11128428","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T06:07:46Z","timestamp":1756879666000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11128428\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,19]]},"references-count":64,"URL":"https:\/\/doi.org\/10.1109\/icra55743.2025.11128428","relation":{},"subject":[],"published":{"date-parts":[[2025,5,19]]}}}