{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,5]],"date-time":"2026-03-05T15:28:40Z","timestamp":1772724520433,"version":"3.50.1"},"reference-count":41,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100018537","name":"National Science and Technology Major Project","doi-asserted-by":"publisher","award":["2021YFF1201200"],"award-info":[{"award-number":["2021YFF1201200"]}],"id":[{"id":"10.13039\/501100018537","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62372316"],"award-info":[{"award-number":["62372316"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100013077","name":"Sichuan Science and Technology Program Key Project","doi-asserted-by":"publisher","award":["2024YFHZ0091"],"award-info":[{"award-number":["2024YFHZ0091"]}],"id":[{"id":"10.13039\/501100013077","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Med. Imaging"],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1109\/tmi.2024.3440311","type":"journal-article","created":{"date-parts":[[2024,8,8]],"date-time":"2024-08-08T18:34:12Z","timestamp":1723142052000},"page":"310-319","source":"Crossref","is-referenced-by-count":12,"title":["Boosting Your Context by Dual Similarity Checkup for In-Context Learning Medical Image Segmentation"],"prefix":"10.1109","volume":"44","author":[{"ORCID":"https:\/\/orcid.org\/0009-0006-6830-5849","authenticated-orcid":false,"given":"Jun","family":"Gao","sequence":"first","affiliation":[{"name":"College of Computer Science, Sichuan University, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6032-8548","authenticated-orcid":false,"given":"Qicheng","family":"Lao","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Beijing University of Posts and Telecommunications (BUPT), Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4919-5246","authenticated-orcid":false,"given":"Qingbo","family":"Kang","sequence":"additional","affiliation":[{"name":"West China Biomedical Big Data Center, West China Hospital, Sichuan University, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8092-6353","authenticated-orcid":false,"given":"Paul","family":"Liu","sequence":"additional","affiliation":[{"name":"Stork Healthcare, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7039-8542","authenticated-orcid":false,"given":"Chenlin","family":"Du","sequence":"additional","affiliation":[{"name":"School of Biomedical Engineering, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8136-9816","authenticated-orcid":false,"given":"Kang","family":"Li","sequence":"additional","affiliation":[{"name":"West China Biomedical Big Data Center, West China Hospital, Sichuan University, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3708-1727","authenticated-orcid":false,"given":"Le","family":"Zhang","sequence":"additional","affiliation":[{"name":"College of Computer Science, Sichuan University, Chengdu, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2019.2959609"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/s41592-020-01008-z"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2023.3264513"},{"key":"ref4","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","author":"Brown"},{"key":"ref5","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv:2302.13971"},{"key":"ref6","article-title":"Goat: Fine-tuned LLaMA outperforms GPT-4 on arithmetic tasks","author":"Liu","year":"2023","journal-title":"arXiv:2305.14201"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00660"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00110"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-024-44824-z"},{"key":"ref11","article-title":"SAM-Med2D","author":"Cheng","year":"2023","journal-title":"arXiv:2308.16184"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i7.28514"},{"key":"ref13","article-title":"Medical SAM adapter: Adapting segment anything model for medical image segmentation","author":"Wu","year":"2023","journal-title":"arXiv:2304.12620"},{"key":"ref14","article-title":"SAMUS: Adapting segment anything model for clinically-friendly and generalizable ultrasound image segmentation","author":"Lin","year":"2023","journal-title":"arXiv:2309.06824"},{"key":"ref15","first-page":"25005","article-title":"Visual prompting via image inpainting","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","volume":"35","author":"Bar"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01960"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.dib.2019.104863"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2022.09.019"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.micron.2018.01.010"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1117\/1.JMI.2.4.044003"},{"key":"ref21","article-title":"Personalize segment anything model with one shot","author":"Zhang","year":"2023","journal-title":"arXiv:2305.03048"},{"key":"ref22","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","author":"Ouyang"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.201"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.759"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.191"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.203"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.378"},{"key":"ref28","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","volume":"35","author":"Alayrac"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.564"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.deelio-1.10"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.622"},{"key":"ref33","first-page":"17773","article-title":"What makes good examples for visual in-context learning?","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","author":"Zhang"},{"key":"ref34","article-title":"Exploring effective factors for improving visual in-context learning","author":"Sun","year":"2023","journal-title":"arXiv:2304.04748"},{"key":"ref35","first-page":"63758","article-title":"Towards in-context scene understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","author":"Balazevic"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref37","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ISBI.2018.8363547"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1155\/2017\/4037190"},{"issue":"86","key":"ref40","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"van der Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.2307\/1932409.JSTOR1932409"}],"container-title":["IEEE Transactions on Medical Imaging"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/42\/10820125\/10630863.pdf?arnumber=10630863","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,14]],"date-time":"2025-01-14T19:52:18Z","timestamp":1736884338000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10630863\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1]]},"references-count":41,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tmi.2024.3440311","relation":{},"ISSN":["0278-0062","1558-254X"],"issn-type":[{"value":"0278-0062","type":"print"},{"value":"1558-254X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,1]]}}}