{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,19]],"date-time":"2025-12-19T09:50:02Z","timestamp":1766137802959,"version":"3.40.3"},"publisher-location":"Cham","reference-count":27,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030874438"},{"type":"electronic","value":"9783030874445"}],"license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021]]},"DOI":"10.1007\/978-3-030-87444-5_4","type":"book-chapter","created":{"date-parts":[[2021,9,21]],"date-time":"2021-09-21T02:17:10Z","timestamp":1632190630000},"page":"34-43","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["This Explains That: Congruent Image\u2013Report Generation for Explainable Medical Image Analysis with Cyclic Generative Adversarial Networks"],"prefix":"10.1007","author":[{"given":"Abhineet","family":"Pandey","sequence":"first","affiliation":[]},{"given":"Bhawna","family":"Paliwal","sequence":"additional","affiliation":[]},{"given":"Abhinav","family":"Dhall","sequence":"additional","affiliation":[]},{"given":"Ramanathan","family":"Subramanian","sequence":"additional","affiliation":[]},{"given":"Dwarikanath","family":"Mahapatra","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,9,21]]},"reference":[{"key":"4_CR1","doi-asserted-by":"crossref","unstructured":"Jing, B., Xie, P., Xing, E.: On the automatic generation of medical imaging reports. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (2018)","DOI":"10.18653\/v1\/P18-1240"},{"key":"4_CR2","doi-asserted-by":"crossref","unstructured":"Girshicka, R., Donahuea, J., Darrell, T., Malik, J.: Rich feature hierarchies for accurate object detection and semantic segmentation. In: CVPR (2014)","DOI":"10.1109\/CVPR.2014.81"},{"key":"4_CR3","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: ImageNet classification with deep convolutional neural networks. In: NIPS (2012)"},{"key":"4_CR4","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., Das, A., Vedantam, R., Cogswell, M., Parikh, D., Batra, D.: Grad-CAM: why did you say that? visual explanations from deep networks via gradient-based localization. CoRR, abs\/1610.02391 (2016). http:\/\/arxiv.org\/abs\/1610.02391","DOI":"10.1109\/ICCV.2017.74"},{"key":"4_CR5","unstructured":"Paszke, A., et al.: Pytorch: an imperative style, high-performance deep learning library. In: Wallach, H., Larochelle, H., Beygelzimer, A., dAlche-Buc, F., Fox, E., Garnett, R. (eds.) Advances in Neural Information Processing Systems, vol. 32, pp. 8024\u20138035. Curran Associates Inc, (2019). http:\/\/papers.neurips.cc\/paper\/9015-pytorch:an-imperative-style-high-performance-deep-learning-library.pdf"},{"key":"4_CR6","doi-asserted-by":"publisher","unstructured":"Cortez, P., Embrechts, M.J.: Using sensitivity analysis and visualization techniques to open black box data mining models. Inf. Sci. 225, 1\u201317 (2013). https:\/\/doi.org\/10.1016\/j.ins.2012.10.039","DOI":"10.1016\/j.ins.2012.10.039"},{"key":"4_CR7","doi-asserted-by":"publisher","unstructured":"Chattopadhay, A., Sarkar, A., Howlader, P., Balasubramanian, V.N.: Grad-cam++: generalized gradient based visual explanations for deep convolutional networks. IN: 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), March 2018. https:\/\/doi.org\/10.1109\/WACV.2018.00097","DOI":"10.1109\/WACV.2018.00097"},{"key":"4_CR8","unstructured":"Reed, S.E., Akata, Z., Yan, X., Logeswaran, L., Schiele, B., Lee, H.: Generative adversarial text to image synthesis. CoRR, abs\/1605.05396, (2016). http:\/\/arxiv.org\/abs\/1605.05396"},{"key":"4_CR9","unstructured":"Xu, T., et al.: Attngan: fine-grained text to image generation with attentional generative adversarial networks. CoRR, abs\/1711.10485 (2017). http:\/\/arxiv.org\/abs\/1711.10485"},{"key":"4_CR10","unstructured":"Zhang, H., et al.: Stackgan: text to photo-realistic image synthesis with stacked generative adversarial networks. CoRR, vol. abs\/1612.03242 (2016). http:\/\/arxiv.org\/abs\/1612.03242"},{"key":"4_CR11","unstructured":"Liu, G., et al.: Clinically accurate chest x-ray report generation. CoRR, abs\/1904.02633 (2019). http:\/\/arxiv.org\/abs\/1904.02633"},{"key":"4_CR12","unstructured":"Adebayo, J., Gilmer, J., Muelly, M., Goodfellow, I., Hardt, M., Kim, B.: Sanity checks for saliency maps (2020)"},{"key":"4_CR13","doi-asserted-by":"crossref","unstructured":"Ribeiro, M.T., Singh, S., Guestrin, C.: Why should i trust you?: Explaining the predictions of any classifier (2016)","DOI":"10.1145\/2939672.2939778"},{"key":"4_CR14","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks (2020)"},{"key":"4_CR15","unstructured":"Chen, Z., Song, Y., Chang, T.H., Wan, X.: Generating radiology reports via memory-driven transformer (2020). In: Mahapatra, D., Poellinger, A., Shao, L., Reyes, M. (eds.) @articleMahapatraTMI2021, Interpretability-Driven Sample Selection Using Self Supervised Learning For Disease Classification And Segmentation, pp. 1\u201315. IEEE (2021)"},{"key":"4_CR16","doi-asserted-by":"crossref","unstructured":"Mahapatra, D., Poellinger, A., Shao, L., Reyes, M.: A interpretability-driven sample selection using self supervised learning for disease classification and segmentation. IEEE Trans. Med. Imag. (2021)","DOI":"10.1109\/TMI.2021.3061724"},{"key":"4_CR17","doi-asserted-by":"crossref","unstructured":"Krause, J., Johnson, J., Krishna, R., Fei-Fei, L.: A hierarchical approach for generating descriptive image paragraphs. In: Computer Vision and Patterm Recognition (CVPR) (2017)","DOI":"10.1109\/CVPR.2017.356"},{"key":"4_CR18","unstructured":"Chen, C., Li, D., Barnett, A., Su, J., Rudin, C.: This looks like that: deep learning for interpretable image recognition. CoRR, abs\/1806.10574 (2018). http:\/\/arxiv.org\/abs\/1806.10574"},{"key":"4_CR19","unstructured":"Rajpurkar, P., et al.: Chexnet: radiologist-level pneumonia detection on chest x-rays with deep learning. CoRR, abs\/1711.05225 (2017). http:\/\/arxiv.org\/abs\/1711.05225"},{"key":"4_CR20","doi-asserted-by":"publisher","unstructured":"Demner-Fushman, D., et al.: Preparing a collection of radiology examinations for distribution and retrieval. J. Am. Med. Inform. Assoc. 23(2), 304\u2013310. https:\/\/doi.org\/10.1093\/jamia\/ocv080","DOI":"10.1093\/jamia\/ocv080"},{"key":"4_CR21","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. CoRR, abs\/1512.03385 (2015). http:\/\/arxiv.org\/abs\/1512.03385"},{"key":"4_CR22","unstructured":"Simonyan, K.: Zisserman, A.: Very deep convolutional networks for large-scale image recognition (2015)"},{"key":"4_CR23","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.: Bleu: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting on Association for Computational Linguistics, pp. 311\u2013318. Association for Computational Linguistics (2012)","DOI":"10.3115\/1073083.1073135"},{"key":"4_CR24","unstructured":"Lin, C.-Y.: Rouge: a package for automatic evaluation of summaries. In: Proceedings of the ACL-04 Workshop on Text Summarization Branches Out, vol. 8. Barcelona, Spain (2004)"},{"key":"4_CR25","doi-asserted-by":"crossref","unstructured":"Xue, Y., et al.: Multimodal recurrent model with attention for automated radiology report generation. In: Proceedings of the 21st International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI 2018) (2018)","DOI":"10.1007\/978-3-030-00928-1_52"},{"key":"4_CR26","doi-asserted-by":"crossref","unstructured":"Olah, C.M., Ludwig, A.S.: Feature visualization. Distill (2017)","DOI":"10.23915\/distill.00007"},{"key":"4_CR27","unstructured":"Lipton, Z.C.: The mythos of model interpretability. In: Workshop on Human Interpretability in Machine Learning (WHI 2016) (2016)"}],"container-title":["Lecture Notes in Computer Science","Interpretability of Machine Intelligence in Medical Image Computing, and Topological Data Analysis and Its Applications for Medical Data"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-87444-5_4","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,8]],"date-time":"2024-09-08T16:29:04Z","timestamp":1725812944000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-87444-5_4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"ISBN":["9783030874438","9783030874445"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-87444-5_4","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2021]]},"assertion":[{"value":"21 September 2021","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"IMIMIC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Interpretability of Machine Intelligence in Medical Image Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Strasbourg","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"France","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2021","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 September 2021","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 September 2021","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"imimic2021","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/imimic-workshop.com\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT Microsoft","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"12","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"58% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"The conference was held virtually due to the COVID-19 pandemic.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}