{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:28:20Z","timestamp":1763191700590,"version":"3.45.0"},"reference-count":48,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100020487","name":"Nature","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100020487","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11227665","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["SISAM: An Enhanced Promptable Segmentation Model with Expanded Dataset for Surgical Instruments"],"prefix":"10.1109","author":[{"given":"Yuchu","family":"Liu","sequence":"first","affiliation":[{"name":"South China Normal University,School of Computer Science,Guangzhou,China"}]},{"given":"Jing","family":"Xiao","sequence":"additional","affiliation":[{"name":"South China Normal University,School of Computer Science,Guangzhou,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2023.102918"},{"key":"ref3","article-title":"Accuracy of segment-anything model (sam) in medical image segmentation tasks","volume":"2","author":"He","year":"2023"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2023.103061"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-024-44824-z"},{"journal-title":"Sam-med2d","year":"2023","author":"Cheng","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1038\/s41592-020-01008-z"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2025.103547"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","year":"2020","author":"Dosovitskiy","key":"ref12"},{"key":"ref13","first-page":"17864","article-title":"Per-pixel classification is not all you need for semantic segmentation","volume":"34","author":"Cheng","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20077-9_17"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref16","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International conference on machine learning","author":"Radford"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2024.108238"},{"article-title":"Sa-med2d-20m dataset: Segment anything in 2d medical imaging with 20 million masks","year":"2023","author":"Ye","key":"ref18"},{"article-title":"Lora: Low-rank adaptation of large language models","year":"2021","author":"Hu","key":"ref19"},{"article-title":"Sam-unet: Enhancing zero-shot segmentation of sam for universal medical images","year":"2024","author":"Yang","key":"ref20"},{"article-title":"Ternausnet: U-net with vgg11 encoder pre-trained on imagenet for image segmentation","year":"2018","author":"Iglovikov","key":"ref21"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-32254-0_49"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6850"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-59716-0_57"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811873"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ISBI53787.2023.10230819"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i7.28514"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-47401-9_23"},{"journal-title":"2017 robotic instrument segmentation challenge","year":"2019","author":"Allan","key":"ref30"},{"journal-title":"2018 robotic scene segmentation challenge","year":"2020","author":"Allan","key":"ref31"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"article-title":"What do vision transformers learn? a visual exploration","year":"2022","author":"Ghiasi","key":"ref33"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02037"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-16449-1_53"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2021.3057884"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-67835-7_19"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-59716-0_57"},{"article-title":"Segment and track anything","year":"2023","author":"Cheng","key":"ref39"},{"article-title":"Scalable multi-object identification for video object segmentation","year":"2022","author":"Yang","key":"ref40"},{"key":"ref41","first-page":"2491","article-title":"Associating objects with transformers for video object segmentation","volume":"34","author":"Yang","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2023\/178"},{"key":"ref43","first-page":"2","article-title":"Towards multi-object association from foreground-background integration","volume-title":"CVPR Workshops","volume":"2","author":"Yang"},{"key":"ref44","first-page":"36324","article-title":"Decoupling features in hierarchical propagation for video object segmentation","volume":"35","author":"Yang","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-25085-9_25"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00147"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.324"},{"journal-title":"Fully convolutional neural networks for volumetric medical image segmentation","year":"2016","key":"ref48"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11227665.pdf?arnumber=11227665","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:25:11Z","timestamp":1763191511000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11227665\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":48,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11227665","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}