{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,31]],"date-time":"2026-01-31T03:30:34Z","timestamp":1769830234125,"version":"3.49.0"},"reference-count":28,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,5,27]],"date-time":"2024-05-27T00:00:00Z","timestamp":1716768000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,5,27]],"date-time":"2024-05-27T00:00:00Z","timestamp":1716768000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,5,27]]},"DOI":"10.1109\/isbi56570.2024.10635843","type":"proceedings-article","created":{"date-parts":[[2024,8,22]],"date-time":"2024-08-22T17:49:54Z","timestamp":1724348994000},"page":"1-5","source":"Crossref","is-referenced-by-count":1,"title":["Envisioning Medclip: A Deep Dive into Explainability for Medical Vision-Language Models"],"prefix":"10.1109","author":[{"given":"Anees Ur Rehman","family":"Hashmi","sequence":"first","affiliation":[{"name":"Mohamed bin Zayed University of Artificial Intelligence,Abu Dhabi,UAE"}]},{"given":"Dwarikanath","family":"Mahapatra","sequence":"additional","affiliation":[{"name":"Inception Institute of Artificial Intelligence,Abu Dhabi,UAE"}]},{"given":"Mohammad","family":"Yaqub","sequence":"additional","affiliation":[{"name":"Mohamed bin Zayed University of Artificial Intelligence,Abu Dhabi,UAE"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s13347-021-00477-0"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2019.12.012"},{"key":"ref3","article-title":"Deep inside convolutional networks: Visualizing image classification models and saliency maps","author":"Simonyan","year":"2013"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0130140"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.319"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2798607"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s00371-021-02166-7"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.81"},{"key":"ref9","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"International conference on machine learning","author":"Jia"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.279"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2024.3369699"},{"key":"ref12","article-title":"Contrastive learning of medical visual representations from paired images and text","volume-title":"Machine Learning for Healthcare Conference","author":"Zhang"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00391"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.256"},{"key":"ref15","article-title":"Roentgen: vision-language foundation model for chest x-ray generation","author":"Chambon","year":"2022"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.iswa.2022.200160"},{"key":"ref17","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International conference on machine learning","author":"Radford"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2022.102470"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.7312\/haza92762-003"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10590-1_53"},{"key":"ref21","article-title":"Axiomatic attribution for deep networks","volume-title":"International conference on machine learning","author":"Sundararajan"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1038\/sdata.2016.35"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1038\/s41597-019-0322-0"},{"key":"ref27","article-title":"Pytorch: High-performance deep learning library","author":"Paszke","year":"2019"},{"key":"ref28","article-title":"Captum: A unified model interpretability library for pytorch","author":"Kokhlikyan","year":"2020"}],"event":{"name":"2024 IEEE International Symposium on Biomedical Imaging (ISBI)","location":"Athens, Greece","start":{"date-parts":[[2024,5,27]]},"end":{"date-parts":[[2024,5,30]]}},"container-title":["2024 IEEE International Symposium on Biomedical Imaging (ISBI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10635099\/10635102\/10635843.pdf?arnumber=10635843","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,2]],"date-time":"2024-09-02T04:28:20Z","timestamp":1725251300000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10635843\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,27]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/isbi56570.2024.10635843","relation":{},"subject":[],"published":{"date-parts":[[2024,5,27]]}}}