{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,5]],"date-time":"2026-03-05T04:15:32Z","timestamp":1772684132559,"version":"3.50.1"},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iros60139.2025.11246030","type":"proceedings-article","created":{"date-parts":[[2025,11,27]],"date-time":"2025-11-27T18:54:45Z","timestamp":1764269685000},"page":"3986-3993","source":"Crossref","is-referenced-by-count":3,"title":["RoboNurse-VLA: Robotic Scrub Nurse System based on Vision-Language-Action Model"],"prefix":"10.1109","author":[{"given":"Shunlei","family":"Li","sequence":"first","affiliation":[{"name":"Multi-Scale Medical Robotics Centre, Ltd., The Chinese University of Hong Kong,Hong Kong,China"}]},{"given":"Jin","family":"Wang","sequence":"additional","affiliation":[{"name":"Humanoids and Human-Centered Mechatronics (HHCM), Istituto Italiano di Tecnologia,Genoa,Italy,16163"}]},{"given":"Rui","family":"Dai","sequence":"additional","affiliation":[{"name":"Humanoids and Human-Centered Mechatronics (HHCM), Istituto Italiano di Tecnologia,Genoa,Italy,16163"}]},{"given":"Wanyu","family":"Ma","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Department of Surgery,Hong Kong,China"}]},{"given":"Wing Yin","family":"Ng","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Department of Surgery,Hong Kong,China"}]},{"given":"Yingbai","family":"Hu","sequence":"additional","affiliation":[{"name":"Multi-Scale Medical Robotics Centre, Ltd., The Chinese University of Hong Kong,Hong Kong,China"}]},{"given":"Zheng","family":"Li","sequence":"additional","affiliation":[{"name":"Multi-Scale Medical Robotics Centre, Ltd., The Chinese University of Hong Kong,Hong Kong,China"}]}],"member":"263","reference":[{"issue":"4","key":"ref1","first-page":"16","article-title":"What are the non-technical skills used by scrub nurses\u0192 an integrated review","volume":"27","author":"Kang","year":"2014","journal-title":"ACORN: the journal of perioperative nursing in Australia"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijnurstu.2014.01.007"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1136\/bmj.39548.418009.80"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/BIOROB.2010.5626941"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICSMC.2011.6083972"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1515\/cdbme-2021-1035"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s00464-021-08569-w"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.20965\/jaciii.2022.p0074"},{"key":"ref9","article-title":"Sorting surgical tools from a cluttered tray - object detection and occlusion reasoning","volume-title":"Master\u2019s thesis","author":"Lavado","year":"2018"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-97546-3_14"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICEAST52143.2021.9426258"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/AIM52237.2022.9863381"},{"key":"ref13","first-page":"1282","article-title":"Quantifying generalization in reinforcement learning","volume-title":"Proceedings of the 36th International Conference on Machine Learning","volume":"97","author":"Cobbe"},{"key":"ref14","article-title":"Diffusion policy: Visuomotor policy learning via action diffusion","author":"Chi","year":"2024"},{"key":"ref15","article-title":"Decomposing the generalization gap in imitation learning for visual robotic manipulation","author":"Xie","year":"2023"},{"key":"ref16","article-title":"Learning transferable visual models from natural language supervision","author":"Radford","year":"2021"},{"key":"ref17","doi-asserted-by":"crossref","DOI":"10.1109\/ICCV51070.2023.01100","article-title":"Sigmoid loss for language image pre-training","author":"Zhai","year":"2023"},{"key":"ref18","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref19","article-title":"Octo: An open-source generalist robot policy","author":"Team","year":"2024"},{"key":"ref20","article-title":"R3m: A universal visual representation for robot manipulation","author":"Nair","year":"2022"},{"key":"ref21","doi-asserted-by":"crossref","DOI":"10.15607\/RSS.2023.XIX.032","article-title":"Language-driven representation learning for robotics","author":"Karamcheti","year":"2023"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/IROS58592.2024.10801816"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/icra55743.2025.11127736"},{"key":"ref24","article-title":"Open-world object manipulation using pre-trained vision-language models","author":"Stone","year":"2023"},{"key":"ref25","doi-asserted-by":"crossref","DOI":"10.1109\/IROS58592.2024.10802344","article-title":"Autonomous behavior planning for humanoid loco-manipulation through grounded language model","author":"Wang","year":"2024"},{"key":"ref26","article-title":"HYPERmotion: Learning hybrid behavior planning for autonomous loco-manipulation","volume-title":"8th Annual Conference on Robot Learning","author":"Wang"},{"key":"ref27","article-title":"Sam 2: Segment anything in images and videos","author":"Ravi","year":"2024"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00686"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00851"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102602"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3369699"},{"key":"ref32","first-page":"19 730","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"International conference on machine learning","author":"Li"},{"key":"ref33","article-title":"Obelics: An open web-scale filtered dataset of interleaved image-text documents","volume":"36","author":"Lauren\u00e7on","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref34","article-title":"Prismatic vlms: Investigating the design space of visually-conditioned language models","author":"Karamcheti","year":"2024"},{"key":"ref35","article-title":"Open-vla: An open-source vision-language-action model","author":"Kim","year":"2024"},{"key":"ref36","article-title":"Open x-embodiment: Robotic learning datasets and rt-x models","author":"Padalkar","year":"2023"},{"key":"ref37","article-title":"Vision-language foundation models as effective robot imitators","author":"Li","year":"2023"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-42227-1_5"},{"key":"ref39","article-title":"Labelimg","year":"2015"},{"key":"ref40","article-title":"Rt-2: Vision-language-action models transfer web knowledge to robotic control","author":"Brohan","year":"2023"},{"key":"ref41","article-title":"Lora: Low-rank adaptation of large language models","author":"Hu","year":"2021"}],"event":{"name":"2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","location":"Hangzhou, China","start":{"date-parts":[[2025,10,19]]},"end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11245651\/11245652\/11246030.pdf?arnumber=11246030","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T12:39:27Z","timestamp":1766061567000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11246030\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/iros60139.2025.11246030","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}