{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,18]],"date-time":"2026-02-18T08:10:40Z","timestamp":1771402240344,"version":"3.50.1"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,9,14]],"date-time":"2025-09-14T00:00:00Z","timestamp":1757808000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,9,14]],"date-time":"2025-09-14T00:00:00Z","timestamp":1757808000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,9,14]]},"DOI":"10.1109\/icipw68931.2025.11386296","type":"proceedings-article","created":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T21:05:43Z","timestamp":1771362343000},"page":"310-315","source":"Crossref","is-referenced-by-count":0,"title":["Multimodal Monocular 3D Object Localization for Human-Robot Interaction"],"prefix":"10.1109","author":[{"given":"Ari","family":"Wahl","sequence":"first","affiliation":[{"name":"Fraunhofer HHI,Berlin,Germany"}]},{"given":"Dorian","family":"Gawlinski","sequence":"additional","affiliation":[{"name":"Fraunhofer HHI,Berlin,Germany"}]},{"given":"David","family":"Przewozny","sequence":"additional","affiliation":[{"name":"Fraunhofer HHI,Berlin,Germany"}]},{"given":"Paul","family":"Chojecki","sequence":"additional","affiliation":[{"name":"Fraunhofer HHI,Berlin,Germany"}]},{"given":"Felix","family":"Bie\u03b2mann","sequence":"additional","affiliation":[{"name":"Berliner Hochschule f&#x03CB;r Technik (BHT),Berlin,Germany"}]},{"given":"Sebastian","family":"Bosse","sequence":"additional","affiliation":[{"name":"Fraunhofer HHI,Berlin,Germany"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/Humanoids57100.2023.10375211"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1177\/02783649241281508"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2023.XIX.025"},{"key":"ref4","article-title":"RT-2: Vision-language-action models transfer web knowledge to robotic control","author":"Brohan","year":"2023","journal-title":"arXiv preprint arXiv:2307.15818"},{"key":"ref5","article-title":"Bringing the RT-1-X foundation model to a SCARA robot","author":"Salzer","year":"2024","journal-title":"arXiv preprint arXiv:2409.03299"},{"key":"ref6","article-title":"OpenVLA: An open-source vision-language-action model","author":"Kim","year":"2024","journal-title":"arXiv preprint arXiv:2406.09246"},{"key":"ref7","article-title":"CogACT: A foundational vision-language-action model for synergizing cognition and action in robotic manipulation","author":"Li","year":"2024","journal-title":"arXiv preprint arXiv:2411.19650"},{"key":"ref8","article-title":"\u03c0_0: ~ A ~ v i s i o n-~ language-action flow model for general robot control","author":"Black","year":"2024","journal-title":"arXiv preprint arXiv:2410.24164"},{"key":"ref9","article-title":"DINOv2: Learning robust visual features without supervision","author":"Oquab","year":"2024","journal-title":"arXiv preprint arXiv:2304.07193"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01100"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2024.XX.079"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01807"},{"key":"ref13","article-title":"Language embedded radiance fields for zero-shot task-oriented grasping","author":"Rashid","year":"2023","journal-title":"arXiv preprint arXiv:2309.07970"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10611174"},{"key":"ref15","article-title":"RoboPoint: A vision-language model for spatial affordance prediction for robotics","author":"Yuan","year":"2024","journal-title":"arXiv preprint arXiv:2406.10721"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.00018"},{"key":"ref17","article-title":"MOKA: Open-world robotic manipulation through mark-based visual prompting","author":"Liu","year":"2024","journal-title":"arXiv preprint arXiv:2403.03174"},{"key":"ref18","article-title":"VFMM3D: Releasing the potential of image by vision foundation model for monocular 3D object detection","author":"Ding","year":"2024","journal-title":"arXiv preprint arXiv:2404.09431"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00840"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00506"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58580-8_38"},{"key":"ref22","article-title":"Open vocabulary monocular 3D object detection","author":"Yao","year":"2024","journal-title":"arXiv preprint arXiv:2411.16833"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2024.XX.106"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.52202\/079017-2475"}],"event":{"name":"2025 IEEE International Conference on Image Processing Workshops (ICIPW)","location":"Anchorage, AK, USA","start":{"date-parts":[[2025,9,14]]},"end":{"date-parts":[[2025,9,17]]}},"container-title":["2025 IEEE International Conference on Image Processing Workshops (ICIPW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11385856\/11385840\/11386296.pdf?arnumber=11386296","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,18]],"date-time":"2026-02-18T07:16:22Z","timestamp":1771398982000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11386296\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,14]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icipw68931.2025.11386296","relation":{},"subject":[],"published":{"date-parts":[[2025,9,14]]}}}