{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T05:58:48Z","timestamp":1778047128395,"version":"3.51.4"},"reference-count":48,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62306254"],"award-info":[{"award-number":["62306254"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Foshan Hong Kong University of Science and Technology (HKUST) Projects","award":["FSUST21-HKUST10E"],"award-info":[{"award-number":["FSUST21-HKUST10E"]}]},{"name":"Foshan Hong Kong University of Science and Technology (HKUST) Projects","award":["FSUST21-HKUST11E"],"award-info":[{"award-number":["FSUST21-HKUST11E"]}]},{"name":"Project of Hetao Shenzhen Hong Kong Science and Technology Innovation Cooperation Zone","award":["HZQB-KCZYB-2020083"],"award-info":[{"award-number":["HZQB-KCZYB-2020083"]}]},{"DOI":"10.13039\/501100010839","name":"Guangdong Provincial Science and Technology Fund","doi-asserted-by":"publisher","award":["2023A0505030004"],"award-info":[{"award-number":["2023A0505030004"]}],"id":[{"id":"10.13039\/501100010839","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Med. Imaging"],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1109\/tmi.2024.3518067","type":"journal-article","created":{"date-parts":[[2024,12,23]],"date-time":"2024-12-23T19:19:15Z","timestamp":1734981555000},"page":"1711-1722","source":"Crossref","is-referenced-by-count":11,"title":["MultiEYE: Dataset and Benchmark for OCT-Enhanced Retinal Disease Recognition From Fundus Images"],"prefix":"10.1109","volume":"44","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-4707-6828","authenticated-orcid":false,"given":"Lehan","family":"Wang","sequence":"first","affiliation":[{"name":"Department of Electronic and Computer Engineering, The Hong Kong University of Science and Technology, Hong Kong, SAR, China"}]},{"given":"Chongchong","family":"Qi","sequence":"additional","affiliation":[{"name":"Yunnan United Vision Innovations Technology Company Ltd., Kunming, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2614-0418","authenticated-orcid":false,"given":"Chubin","family":"Ou","sequence":"additional","affiliation":[{"name":"Guangdong Weiren Meditech Company Ltd., Foshan, China"}]},{"given":"Lin","family":"An","sequence":"additional","affiliation":[{"name":"Guangdong Weiren Meditech Company Ltd., Foshan, China"}]},{"given":"Mei","family":"Jin","sequence":"additional","affiliation":[{"name":"Department of Ophthalmology, Guangdong Hospital of Integrated Traditional Chinese and Western Medicine, Foshan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8137-5362","authenticated-orcid":false,"given":"Xiangbin","family":"Kong","sequence":"additional","affiliation":[{"name":"Department of Ophthalmology, The Second People&#x2019;s Hospital of Foshan, Foshan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1105-8083","authenticated-orcid":false,"given":"Xiaomeng","family":"Li","sequence":"additional","affiliation":[{"name":"Department of Electronic and Computer Engineering, The Hong Kong University of Science and Technology, Hong Kong, SAR, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1159\/000329597"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/IST.2015.7294517"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.3390\/diagnostics12051100"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1155\/2022\/6556867"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-43990-2_60"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s11517-018-1915-z"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-32239-7_18"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475418"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-16525-2_6"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/JBHI.2022.3171523"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICoDT255437.2022.9787482"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1038\/s41433-020-01263-6"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-16638-0_4"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0237352"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1167\/iovs.07-1257"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2021.3059956"},{"key":"ref17","article-title":"Interpretable bilingual multimodal large language model for diverse biomedical tasks","author":"Wang","year":"2024","journal-title":"arXiv:2410.18387"},{"key":"ref18","article-title":"GPT-4 technical report","volume-title":"arXiv:2303.08774","author":"Achiam","year":"2023"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01297"},{"key":"ref20","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2024.103357"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01438"},{"key":"ref23","article-title":"Text descriptions are compressive and invariant representations for visual learning","author":"Feng","year":"2023","journal-title":"arXiv:2307.04317"},{"key":"ref24","article-title":"A ChatGPT aided explainable framework for zero-shot medical image diagnosis","author":"Liu","year":"2023","journal-title":"arXiv:2307.01981"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01839"},{"key":"ref26","article-title":"Label-free concept bottleneck models","author":"Oikarinen","year":"2023","journal-title":"arXiv:2304.06129"},{"key":"ref27","article-title":"Energy-based concept bottleneck models: Unifying prediction, concept intervention, and conditional interpretations","volume-title":"Proc. ICLR","author":"Xu"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-47401-9_22"},{"key":"ref29","article-title":"Few-shot medical image classification with simple shape and texture text descriptors using vision-language models","author":"Byra","year":"2023","journal-title":"arXiv:2308.04005"},{"key":"ref30","article-title":"Robust and interpretable medical image classifiers via concept bottleneck models","author":"Yan","year":"2023","journal-title":"arXiv:2310.03182"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1503.02531"},{"key":"ref32","first-page":"365","article-title":"Free lunch for surgical video understanding by distilling self-supervisions","volume-title":"Proc. MICCAI","author":"Ding"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2023.3327428"},{"key":"ref34","first-page":"668","article-title":"Spatial-division augmented occupancy field for bone shape reconstruction from biplanar X-rays","volume-title":"Proc. MICCAI","author":"Ji-xiang"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2021.3132291"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2021.3119385"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2022.106283"},{"key":"ref38","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume-title":"Proc. NeurIPS","author":"Lee"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.3390\/data6020014"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.3390\/data8020029"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-021-25138-w"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1016\/j.jfranklin.2008.04.009"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1038\/s41597-022-01564-3"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2019.06.011"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1016\/j.compeleceng.2019.106532"},{"key":"ref46","first-page":"135","article-title":"Dataset and evaluation algorithm design for goals challenge","volume-title":"Proc. Int. Workshop Ophthalmic Med. Image Anal.","author":"Fang"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/FG52635.2021.9667055"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00278"}],"container-title":["IEEE Transactions on Medical Imaging"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/42\/10948536\/10812957.pdf?arnumber=10812957","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,4]],"date-time":"2025-04-04T19:57:59Z","timestamp":1743796679000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10812957\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4]]},"references-count":48,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tmi.2024.3518067","relation":{},"ISSN":["0278-0062","1558-254X"],"issn-type":[{"value":"0278-0062","type":"print"},{"value":"1558-254X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,4]]}}}