{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T16:36:32Z","timestamp":1757608592007,"version":"3.44.0"},"reference-count":50,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,5,19]]},"DOI":"10.1109\/icra55743.2025.11128131","type":"proceedings-article","created":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T17:28:56Z","timestamp":1756834136000},"page":"16042-16049","source":"Crossref","is-referenced-by-count":0,"title":["Segment Any Repeated Object"],"prefix":"10.1109","author":[{"given":"Yushi","family":"Liu","sequence":"first","affiliation":[{"name":"Bosch Research"}]},{"given":"Christian","family":"Graf","sequence":"additional","affiliation":[{"name":"Bosch Research"}]},{"given":"Markus","family":"Spies","sequence":"additional","affiliation":[{"name":"Bosch Research"}]},{"given":"Margret","family":"Keuper","sequence":"additional","affiliation":[{"name":"University of Mannheim &#x0026; Max Planck Institute for Informatics,Saarland Informatics Campus"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"volume-title":"Grounding dino: Marrying dino with grounded pre-training for open-set object detection","year":"2023","author":"Liu","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10610797"},{"key":"ref4","article-title":"Countgd: Multi-modal open-world counting","author":"Amini-Naieni","year":"2024","journal-title":"arXiv preprint"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02071"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2024.3500882"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160846"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1023\/b:visi.0000029664.99615.94"},{"key":"ref9","volume-title":"CoRR","volume":"\/2102.12213","author":"Pelosin","year":"2021"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2013.261"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP42928.2021.9506799"},{"journal-title":"Novel 3d scene understanding applications from recurrence in a single image","year":"2022","author":"Zhang","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/WACV.2017.13"},{"key":"ref14","volume-title":"CoRR","volume":"abs\/1710.06231","author":"Abbeloos","year":"2017"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2015.7298799"},{"journal-title":"Grounded sam: Assembling open-world models for diverse visual tasks","year":"2024","author":"Ren","key":"ref16"},{"volume-title":"Simple open-vocabulary object detection with vision transformers","year":"2022","author":"Minderer","key":"ref17"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00695"},{"journal-title":"Dounseen: Zero-shot object detection for robotic grasping","year":"2023","author":"Gouda","key":"ref19"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58555-6_38"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.12"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00723"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10605-2_29"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2007.383486"},{"key":"ref25","first-page":"45","volume-title":"Image and Vision Computing","volume":"86","author":"Santra","year":"2019"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3265051"},{"key":"ref27","doi-asserted-by":"crossref","DOI":"10.1109\/CVPR46437.2021.00340","volume-title":"Learning to count everything","author":"Ranjan","year":"2021"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01730"},{"key":"ref29","article-title":"Can sam count anything? an empirical study on sam counting","author":"Ma","year":"2023","journal-title":"arXiv preprint"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01492"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3611789"},{"volume-title":"Unsupervised pre-training for person re-identification","year":"2020","author":"Fu","key":"ref32"},{"volume-title":"Self-supervised pre-training for transformer-based person re-identification.","author":"Luo","key":"ref33"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-33718-5_7"},{"volume-title":"Relation preserving triplet mining for stabilising the triplet loss in re-identification systems","year":"2021","author":"Ghosh","key":"ref35"},{"key":"ref36","doi-asserted-by":"crossref","DOI":"10.1109\/CVPRW53098.2021.00468","volume-title":"A strong baseline for vehicle re-identification","author":"Huynh","year":"2021"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/cvprw.2018.00060"},{"key":"ref38","article-title":"Neural outlier rejection for self-supervised keypoint learning","author":"Tang","year":"2019","journal-title":"arXiv preprint"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2019.09.100"},{"journal-title":"Dinov2: Learning robust visual features without supervision","year":"2023","author":"Oquab","key":"ref40"},{"journal-title":"Deep vit features as dense visual descriptors","year":"2022","author":"Amir","key":"ref41"},{"journal-title":"Fast segment anything","year":"2023","author":"Zhao","key":"ref42"},{"key":"ref43","article-title":"Faster segment anything: Towards lightweight sam for mobile applications","author":"Zhang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref44","first-page":"871","article-title":"Learning dense visual descriptors using image augmentations for robot manipulation tasks","volume-title":"Proceedings of The 6th Conference on Robot Learning, ser. Proceedings of Machine Learning Research","volume":"205","author":"Graf","year":"2023"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/BF01581239"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.204"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-33712-3_56"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6248074"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341229"}],"event":{"name":"2025 IEEE International Conference on Robotics and Automation (ICRA)","start":{"date-parts":[[2025,5,19]]},"location":"Atlanta, GA, USA","end":{"date-parts":[[2025,5,23]]}},"container-title":["2025 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11127273\/11127223\/11128131.pdf?arnumber=11128131","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T06:06:50Z","timestamp":1756879610000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11128131\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,19]]},"references-count":50,"URL":"https:\/\/doi.org\/10.1109\/icra55743.2025.11128131","relation":{},"subject":[],"published":{"date-parts":[[2025,5,19]]}}}