{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,14]],"date-time":"2026-02-14T03:01:07Z","timestamp":1771038067247,"version":"3.50.1"},"publisher-location":"Cham","reference-count":17,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031456756","type":"print"},{"value":"9783031456763","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,10,15]],"date-time":"2023-10-15T00:00:00Z","timestamp":1697328000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,10,15]],"date-time":"2023-10-15T00:00:00Z","timestamp":1697328000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-45676-3_23","type":"book-chapter","created":{"date-parts":[[2023,10,14]],"date-time":"2023-10-14T08:02:16Z","timestamp":1697270536000},"page":"224-233","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":15,"title":["Is Visual Explanation with\u00a0Grad-CAM More Reliable for\u00a0Deeper Neural Networks? A\u00a0Case Study with\u00a0Automatic Pneumothorax Diagnosis"],"prefix":"10.1007","author":[{"given":"Zirui","family":"Qiu","sequence":"first","affiliation":[]},{"given":"Hassan","family":"Rivaz","sequence":"additional","affiliation":[]},{"given":"Yiming","family":"Xiao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,15]]},"reference":[{"key":"23_CR1","doi-asserted-by":"publisher","first-page":"82","DOI":"10.1016\/j.inffus.2019.12.012","volume":"58","author":"AB Arrieta","year":"2020","unstructured":"Arrieta, A.B., et al.: Explainable artificial intelligence (XAI): concepts, taxonomies, opportunities and challenges toward responsible AI. Inf. Fusion 58, 82\u2013115 (2020)","journal-title":"Inf. Fusion"},{"key":"23_CR2","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2021.102125","volume":"72","author":"E \u00c7all\u0131","year":"2021","unstructured":"\u00c7all\u0131, E., Sogancioglu, E., van Ginneken, B., van Leeuwen, K.G., Murphy, K.: Deep learning for chest x-ray analysis: a survey. Med. Image Anal. 72, 102125 (2021)","journal-title":"Med. Image Anal."},{"key":"23_CR3","unstructured":"Chen, J., et al.: Transunet: transformers make strong encoders for medical image segmentation. arXiv preprint arXiv:2102.04306 (2021)"},{"key":"23_CR4","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"23_CR5","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"1","key":"23_CR6","doi-asserted-by":"publisher","first-page":"11352","DOI":"10.1038\/s41598-022-15231-5","volume":"12","author":"Y-H Lee","year":"2022","unstructured":"Lee, Y.-H., Won, J.H., Kim, S., Auh, Q.-S., Noh, Y.-K.: Advantages of deep learning with convolutional neural network in detecting disc displacement of the temporomandibular joint in magnetic resonance imaging. Sci. Rep. 12(1), 11352 (2022)","journal-title":"Sci. Rep."},{"key":"23_CR7","doi-asserted-by":"crossref","unstructured":"Mijwil, M.M.: Implementation of machine learning techniques for the classification of lung x-ray images used to detect covid-19 in humans. Iraqi J. Sci. 2099\u20132109 (2021)","DOI":"10.24996\/ijs.2021.62.6.35"},{"key":"23_CR8","unstructured":"Rong, Y., et al.: Towards human-centered explainable AI: user studies for model explanations. arXiv preprint arXiv:2210.11584 (2022)"},{"key":"23_CR9","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., Batra, D.: Grad-cam: visual explanations from deep networks via gradient-based localization. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 618\u2013626 (2017)","DOI":"10.1109\/ICCV.2017.74"},{"issue":"16","key":"23_CR10","doi-asserted-by":"publisher","first-page":"3591","DOI":"10.3390\/jcm10163591","volume":"10","author":"H Seo","year":"2021","unstructured":"Seo, H., Hwang, J.J., Jeong, T., Shin, J.: Comparison of deep learning models for cervical vertebral maturation stage classification on lateral cephalometric radiographs. J. Clin. Med. 10(16), 3591 (2021)","journal-title":"J. Clin. Med."},{"key":"23_CR11","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"key":"23_CR12","unstructured":"Steiner, A., Kolesnikov, A., Zhai, X., Wightman, R., Uszkoreit, J., Beyer, L.: How to train your vit? data, augmentation, and regularization in vision transformers. arXiv preprint arXiv:2106.10270 (2021)"},{"key":"23_CR13","unstructured":"Sun, S., Woerner, S., Maier, A., Koch, L.M., Baumgartner, C.F.: Inherently interpretable multi-label classification using class-specific counterfactuals. arXiv preprint arXiv:2303.00500 (2023)"},{"issue":"1","key":"23_CR14","doi-asserted-by":"publisher","first-page":"231","DOI":"10.1002\/mp.15328","volume":"49","author":"Y Tian","year":"2022","unstructured":"Tian, Y., Wang, J., Yang, W., Wang, J., Qian, D.: Deep multi-instance transfer learning for pneumothorax classification in chest x-ray images. Med. Phys. 49(1), 231\u2013243 (2022)","journal-title":"Med. Phys."},{"key":"23_CR15","doi-asserted-by":"crossref","unstructured":"Wollek, A., et al.: Attention-based saliency maps improve interpretability of pneumothorax classification. Radiol. Artif. Intell. 5(2), e220187 (2022)","DOI":"10.1148\/ryai.220187"},{"key":"23_CR16","unstructured":"Yuan, H., Jiang, P.-T., Zhao, G.: Human-guided design to explain deep learning-based pneumothorax classifier. In: Medical Imaging with Deep Learning, Short Paper Track (2023)"},{"key":"23_CR17","unstructured":"Zhou, D., et al.: Deepvit: towards deeper vision transformer. arXiv preprint arXiv:2103.11886 (2021)"}],"container-title":["Lecture Notes in Computer Science","Machine Learning in Medical Imaging"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-45676-3_23","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,13]],"date-time":"2024-03-13T11:52:07Z","timestamp":1710330727000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-45676-3_23"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,15]]},"ISBN":["9783031456756","9783031456763"],"references-count":17,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-45676-3_23","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,10,15]]},"assertion":[{"value":"15 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MLMI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Machine Learning in Medical Imaging","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vancouver, BC","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Canada","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mlmi-med2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/sites.google.com\/view\/mlmi2023?pli=1","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"139","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"93","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"67% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}