{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,9]],"date-time":"2026-05-09T09:37:58Z","timestamp":1778319478816,"version":"3.51.4"},"reference-count":30,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T00:00:00Z","timestamp":1756857600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0"},{"start":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T00:00:00Z","timestamp":1756857600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0"}],"funder":[{"DOI":"10.13039\/100000049","name":"National Institute on Aging","doi-asserted-by":"publisher","award":["R01AG078154"],"award-info":[{"award-number":["R01AG078154"]}],"id":[{"id":"10.13039\/100000049","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000054","name":"National Cancer Institute","doi-asserted-by":"publisher","award":["R01CA287413"],"award-info":[{"award-number":["R01CA287413"]}],"id":[{"id":"10.13039\/100000054","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000002","name":"National Institutes of Health","doi-asserted-by":"publisher","award":["R01AT009457"],"award-info":[{"award-number":["R01AT009457"]}],"id":[{"id":"10.13039\/100000002","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Healthc Inform Res"],"published-print":{"date-parts":[[2025,12]]},"abstract":"<jats:title>Abstract<\/jats:title>\n                  <jats:p>\n                    Whole slide images (WSIs) are critical for cancer diagnosis but pose computational challenges due to their gigapixel resolution. While automated AI tools can accelerate diagnostic workflows, they often rely on precise annotations and require substantial training data. Integrating multimodal data\u2014such as WSIs and corresponding pathology reports\u2014offers a promising solution to improve classification accuracy and reduce diagnostic variability. In this study, we introduce MPath-Net, an end-to-end multimodal framework that combines WSIs and pathology reports for enhanced cancer subtype classification. Using the TCGA dataset (1684 cases: 916 kidney, 768 lung), we applied multiple-instance learning (MIL) for WSI feature extraction and Sentence-BERT for report encoding, followed by joint fine-tuning for tumor classification. MPath-Net achieved 94.65% accuracy, 0.9553 precision, 0.9472 recall, and 0.9473 F1-score, significantly outperforming baseline models (\n                    <jats:italic>P<\/jats:italic>\n                    \u2009&lt;\u20090.05). In addition, attention heatmaps provided interpretable tumor tissue localization, demonstrating the clinical utility of our approach. These findings suggest that MPath-Net can support pathologists by improving diagnostic accuracy, reducing inter-reader variability, and advancing precision medicine through multimodal AI integration.\n                  <\/jats:p>","DOI":"10.1007\/s41666-025-00212-w","type":"journal-article","created":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T20:52:29Z","timestamp":1756932749000},"page":"513-532","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["Multimodal Data Fusion for Whole-Slide Histopathology Image Classification"],"prefix":"10.1007","volume":"9","author":[{"given":"Yiran","family":"Song","sequence":"first","affiliation":[]},{"given":"Mousumi","family":"Roy","sequence":"additional","affiliation":[]},{"given":"Minghao","family":"Zhong","sequence":"additional","affiliation":[]},{"given":"Liam","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Mingquan","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Rui","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,3]]},"reference":[{"key":"212_CR1","doi-asserted-by":"publisher","first-page":"147","DOI":"10.1109\/RBME.2009.2034865","volume":"2","author":"MN Gurcan","year":"2009","unstructured":"Gurcan MN, Boucheron LE, Can A, Madabhushi A, Rajpoot NM, Yener B (2009) Histopathological image analysis: a review. IEEE Rev Biomed Eng 2:147\u2013171","journal-title":"IEEE Rev Biomed Eng"},{"key":"212_CR2","doi-asserted-by":"crossref","unstructured":"Silva LAV, Rohr K (2020) Pan-cancer prognosis prediction using multimodal deep learning. In: 2020 IEEE 17th International Symposium on Biomedical Imaging (ISBI). IEEE, pp 568\u2013571","DOI":"10.1109\/ISBI45749.2020.9098665"},{"key":"212_CR3","doi-asserted-by":"crossref","unstructured":"Li S, Shi H, Sui D, Hao A and Qin H (2020) A novel pathological images and genomic data fusion framework for breast cancer survival prediction. 2020 42nd Annual International Conference of the IEEE Engineering in Medicine & Biology Society (EMBC). IEEE, p. 1384\u20137.","DOI":"10.1109\/EMBC44109.2020.9176360"},{"key":"212_CR4","doi-asserted-by":"publisher","first-page":"E2970","DOI":"10.1073\/pnas.1717139115","volume":"115","author":"P Mobadersany","year":"2018","unstructured":"Mobadersany P, Yousefi S, Amgad M et al (2018) Predicting cancer outcomes from histology and genomics using convolutional networks. Proc Natl Acad Sci U S A 115:E2970\u2013E2979","journal-title":"Proc Natl Acad Sci U S A"},{"key":"212_CR5","doi-asserted-by":"crossref","unstructured":"Li B, Li Y, Eliceiri KW (2021) Dual-stream multiple instance learning network for whole slide image classification with self-supervised contrastive learning. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 14318\u201314328","DOI":"10.1109\/CVPR46437.2021.01409"},{"key":"212_CR6","doi-asserted-by":"crossref","unstructured":"Zhang Y, Li H, Sun Y et al (2024) Attention-challenging multiple instance learning for whole slide image classification. In: European conference on computer vision. Cham: Springer Nature Switzerland, pp 125\u2013143","DOI":"10.1007\/978-3-031-73668-1_8"},{"key":"212_CR7","unstructured":"Ilse M, Tomczak J, Welling M (2018) Attention-based deep multiple instance learning. In: International conference on machine learning. PMLR, pp 2127\u20132136"},{"key":"212_CR8","first-page":"2136","volume":"34","author":"Z Shao","year":"2021","unstructured":"Shao Z, Bian H, Chen Y, Wang Y, Zhang J, Ji X (2021) Transmil: transformer based correlated multiple instance learning for whole slide image classification. Adv Neural Inf Process Syst 34:2136\u20132147","journal-title":"Adv Neural Inf Process Syst"},{"key":"212_CR9","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.102134","volume":"103","author":"M Salvi","year":"2024","unstructured":"Salvi M, Loh HW, Seoni S et al (2024) Multi-modality approaches for medical support systems: a systematic review of the last decade. Inf Fusion 103:102134","journal-title":"Inf Fusion"},{"key":"212_CR10","doi-asserted-by":"crossref","unstructured":"Neumann M, King D, Beltagy I, Ammar W (2019) ScispaCy: fast and robust models for biomedical natural language processing. In: Proceedings of the 18th BioNLP workshop and shared task. Association for computational linguistics","DOI":"10.18653\/v1\/W19-5034"},{"key":"212_CR11","unstructured":"Devlin J, Chang M-W, Lee K, Toutanova K (2019) Bert: Pre-training of deep bidirectional transformers for language understanding. Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long and short papers), pp 4171\u201386"},{"key":"212_CR12","unstructured":"Huang K, Altosaar J, Ranganath R (2019) Clinicalbert: modeling clinical notes and predicting hospital readmission. arXiv preprint arXiv:1904.05342"},{"key":"212_CR13","doi-asserted-by":"crossref","unstructured":"Li Y, Wang H, Luo Y (2020) A comparison of pre-trained vision-and-language models for multimodal representation learning across medical images and reports. 2020 IEEE international conference on bioinformatics and biomedicine (BIBM). IEEE, pp 1999\u20132004","DOI":"10.1109\/BIBM49941.2020.9313289"},{"key":"212_CR14","doi-asserted-by":"publisher","first-page":"841","DOI":"10.1109\/TCBB.2018.2806438","volume":"16","author":"D Sun","year":"2018","unstructured":"Sun D, Wang M, Li A (2018) A multimodal deep neural network for human breast cancer prognosis prediction by integrating multi-dimensional data. IEEE ACM Trans Comput Biol Bioinform 16:841\u2013850","journal-title":"IEEE ACM Trans Comput Biol Bioinform"},{"key":"212_CR15","doi-asserted-by":"publisher","first-page":"5829","DOI":"10.1016\/j.csbj.2023.11.011","volume":"21","author":"A Qoku","year":"2023","unstructured":"Qoku A, Katsaouni N, Flinner N, Buettner F, Schulz MH (2023) Multimodal analysis methods in predictive biomedicine. Comput Struct Biotechnol J 21:5829\u20135838","journal-title":"Comput Struct Biotechnol J"},{"key":"212_CR16","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/s12911-020-01340-6","volume":"21","author":"R Yan","year":"2021","unstructured":"Yan R, Zhang F, Rao X et al (2021) Richer fusion network for breast cancer classification based on multimodal data. BMC Med Inform Decis Mak 21:1\u201315","journal-title":"BMC Med Inform Decis Mak"},{"key":"212_CR17","doi-asserted-by":"crossref","unstructured":"Reimers N, Gurevych I (2019) Sentence-BERT: sentence embeddings using siamese BERT-networks. In: Proceedings of the 2019 conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP). Association for Computational Linguistics, p 3982","DOI":"10.18653\/v1\/D19-1410"},{"key":"212_CR18","unstructured":"Allada AA (2021) Histopathology image analysis and NLP for Digital Pathology (Doctoral dissertation, University of Waterloo)"},{"key":"212_CR19","doi-asserted-by":"crossref","unstructured":"Settino M, Cannataro M (2018) Survey of main tools for querying and analyzing tcga data. 2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM). IEEE, 2018, pp 1711\u20138","DOI":"10.1109\/BIBM.2018.8621270"},{"key":"212_CR20","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. Proceedings of the IEEE conference on computer vision and pattern recognition p. 770\u20138","DOI":"10.1109\/CVPR.2016.90"},{"key":"212_CR21","doi-asserted-by":"crossref","unstructured":"Kefeli J, Tatonetti N (2023) Benchmark pathology report text corpus with cancer type classification. Medrxiv: the preprint server for health sciences","DOI":"10.1101\/2023.08.03.23293618"},{"key":"212_CR22","doi-asserted-by":"publisher","first-page":"1234","DOI":"10.1093\/bioinformatics\/btz682","volume":"36","author":"J Lee","year":"2020","unstructured":"Lee J, Yoon W, Kim S et al (2020) BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics 36:1234\u20131240","journal-title":"Bioinformatics"},{"key":"212_CR23","doi-asserted-by":"crossref","unstructured":"Alsentzer E, Murphy J, Boag W, Weng WH, Jindi D, Naumann T, McDermott M (2019) Publicly available clinical BERT embeddings. In: Proceedings of the 2nd clinical natural language processing workshop, pp 72\u201378","DOI":"10.18653\/v1\/W19-1909"},{"key":"212_CR24","unstructured":"Santos T, Tariq A, Das S et al (2023) PathologyBERT-pre-trained vs. a new transformer language model for pathology domain. AMIA annual symposium proceedings. p. 962"},{"key":"212_CR25","unstructured":"Kingma DP, BA J (2015) Adam: a method for stochastic optimization[C]\/\/BENGIO Y, LECUN Y. In: 3rd International Conference on Learning Representations (ICLR) 2015, Conference Track Proceedings. San Diego, pp 1\u201315"},{"key":"212_CR26","unstructured":"Chen T, Kornblith S, Norouzi M, Hinton G (2020) A simple framework for contrastive learning of visual representations. International conference on machine learning. PmLR pp 1597\u2013607"},{"key":"212_CR27","doi-asserted-by":"publisher","first-page":"850","DOI":"10.1038\/s41591-024-02857-3","volume":"30","author":"RJ Chen","year":"2024","unstructured":"Chen RJ, Ding T, Lu MY et al (2024) Towards a general-purpose foundation model for computational pathology. Nat Med 30:850\u2013862","journal-title":"Nat Med"},{"issue":"9","key":"212_CR28","first-page":"2379","volume":"29","author":"E Vorontsov","year":"2024","unstructured":"Vorontsov E, Bozkurt A, Casson A, Shaikovski V (2024) A foundation model for clinical-grade computational pathology and rare disease diagnostics. Nat Med 29(9):2379\u20132391","journal-title":"Nat Med"},{"key":"212_CR29","doi-asserted-by":"publisher","first-page":"181","DOI":"10.1038\/s41586-024-07441-w","volume":"630","author":"H Xu","year":"2024","unstructured":"Xu H, Usuyama N, Bagga J et al (2024) A whole-slide foundation model for digital pathology from real-world data. Nature 630:181\u2013188","journal-title":"Nature"},{"key":"212_CR30","unstructured":"Bioptimus (2025) H-optimus-1 (Version 1) [Software]. Hugging face. https:\/\/huggingface.co\/bioptimus\/H-optimus-1"}],"container-title":["Journal of Healthcare Informatics Research"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s41666-025-00212-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s41666-025-00212-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s41666-025-00212-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,10]],"date-time":"2025-11-10T14:56:54Z","timestamp":1762786614000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s41666-025-00212-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,3]]},"references-count":30,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2025,12]]}},"alternative-id":["212"],"URL":"https:\/\/doi.org\/10.1007\/s41666-025-00212-w","relation":{},"ISSN":["2509-4971","2509-498X"],"issn-type":[{"value":"2509-4971","type":"print"},{"value":"2509-498X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,3]]},"assertion":[{"value":"17 April 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 July 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 August 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 September 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}]}}