{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,14]],"date-time":"2026-02-14T07:43:26Z","timestamp":1771055006681,"version":"3.50.1"},"reference-count":54,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,11,17]],"date-time":"2025-11-17T00:00:00Z","timestamp":1763337600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,11,17]],"date-time":"2025-11-17T00:00:00Z","timestamp":1763337600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["npj Digit. Med."],"DOI":"10.1038\/s41746-025-02032-z","type":"journal-article","created":{"date-parts":[[2025,11,17]],"date-time":"2025-11-17T21:56:01Z","timestamp":1763416561000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["EVA-X: a foundation model for general chest x-ray analysis with self-supervised learning"],"prefix":"10.1038","volume":"8","author":[{"given":"Jingfeng","family":"Yao","sequence":"first","affiliation":[]},{"given":"Xinggang","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yuehao","family":"Song","sequence":"additional","affiliation":[]},{"given":"Huangxuan","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Jun","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Yajie","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Wenyu","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Bo","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,17]]},"reference":[{"key":"2032_CR1","unstructured":"Organization, W. H. et al. Communicating radiation risks in paediatric imaging: information to support health care discussions about benefit and risk (WHO, 2016)."},{"key":"2032_CR2","doi-asserted-by":"publisher","first-page":"e44","DOI":"10.1016\/S2589-7500(23)00218-2","volume":"6","author":"YD Cid","year":"2024","unstructured":"Cid, Y. D. et al. Development and validation of open-source deep neural networks for comprehensive chest x-ray reading: a retrospective, multicentre study. Lancet Digital Health 6, e44\u2013e57 (2024).","journal-title":"Lancet Digital Health"},{"key":"2032_CR3","doi-asserted-by":"crossref","unstructured":"Irvin, J. et al. Chexpert: A large chest radiograph dataset with uncertainty labels and expert comparison. Proc. Artif. Intell. 33, 590\u2013597 (2019).","DOI":"10.1609\/aaai.v33i01.3301590"},{"key":"2032_CR4","doi-asserted-by":"crossref","unstructured":"Wang, X. et al. Chestx-ray8: Hospital-scale chest x-ray database and benchmarks on weakly-supervised classification and localization of common thorax diseases. In Proc. IEEE Conference on Computer Vision and Pattern Recognition, 2097\u20132106 (IEE, 2017).","DOI":"10.1109\/CVPR.2017.369"},{"key":"2032_CR5","doi-asserted-by":"crossref","unstructured":"Johnson, A. E. et al. Mimic-cxr-jpg, a large publicly available database of labeled chest radiographs. arXiv preprint arXiv:1901.07042 (2019).","DOI":"10.1038\/s41597-019-0322-0"},{"key":"2032_CR6","doi-asserted-by":"publisher","first-page":"259","DOI":"10.1038\/s41586-023-05881-4","volume":"616","author":"M Moor","year":"2023","unstructured":"Moor, M. et al. Foundation models for generalist medical artificial intelligence. Nature 616, 259\u2013265 (2023).","journal-title":"Nature"},{"key":"2032_CR7","doi-asserted-by":"publisher","first-page":"156","DOI":"10.1038\/s41586-023-06555-x","volume":"622","author":"Y Zhou","year":"2023","unstructured":"Zhou, Y. et al. A foundation model for generalizable disease detection from retinal images. Nature 622, 156\u2013163 (2023).","journal-title":"Nature"},{"key":"2032_CR8","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-024-44824-z","volume":"15","author":"J Ma","year":"2024","unstructured":"Ma, J. et al. Segment anything in medical images. Nat. Commun. 15, 654 (2024).","journal-title":"Nat. Commun."},{"key":"2032_CR9","unstructured":"Hemdan, E. E.-D., Shouman, M. A. & Karar, M. E. Covidx-net: A framework of deep learning classifiers to diagnose covid-19 in x-ray images. arXiv preprint arXiv:2003.11055 (2020)."},{"key":"2032_CR10","unstructured":"Stein, A. et al. Rsna pneumonia detection challenge https:\/\/kaggle.com\/competitions\/rsna-pneumonia-detection-challenge (2018)."},{"key":"2032_CR11","unstructured":"Zawacki, A. et al. Siim-acr pneumothorax segmentation https:\/\/kaggle.com\/competitions\/siim-acr-pneumothorax-segmentation (2019)."},{"key":"2032_CR12","first-page":"475","volume":"4","author":"S Jaeger","year":"2014","unstructured":"Jaeger, S. et al. Two public chest x-ray datasets for computer-aided screening of pulmonary diseases. Quant. Imaging Med. Surg. 4, 475 (2014).","journal-title":"Quant. Imaging Med. Surg."},{"key":"2032_CR13","unstructured":"Zhang, Y. Lung segmentation with nasnet-large-decoder net. arXiv preprint arXiv:2303.10315 (2023)."},{"key":"2032_CR14","first-page":"33536","volume":"35","author":"F Wang","year":"2022","unstructured":"Wang, F., Zhou, Y., Wang, S., Vardhanabhuti, V. & Yu, L. Multi-granularity cross-modal alignment for generalized medical visual representation learning. Adv. Neural Inf. Process Syst. 35, 33536\u201333549 (2022).","journal-title":"Adv. Neural Inf. Process Syst."},{"key":"2032_CR15","doi-asserted-by":"crossref","unstructured":"Boecking, B. et al. Making the most of text semantics to improve biomedical vision\u2013language processing. In European Conference on Computer Vision, 1\u201321 (Springer, 2022).","DOI":"10.1007\/978-3-031-20059-5_1"},{"key":"2032_CR16","doi-asserted-by":"crossref","unstructured":"Wu, C., Zhang, X., Zhang, Y., Wang, Y. & Xie, W. Medklip: Medical knowledge enhanced language-image pre-training. Proceedings of the IEEE\/CVF International Conference on Computer Vision (IEEE, 2023).","DOI":"10.1101\/2023.01.10.23284412"},{"key":"2032_CR17","unstructured":"Zhang, Y., Jiang, H., Miura, Y., Manning, C. D. & Langlotz, C. P. Contrastive learning of medical visual representations from paired images and text. In Machine Learning for Healthcare Conference, 2\u201325 (PMLR, 2022)."},{"key":"2032_CR18","doi-asserted-by":"crossref","unstructured":"Huang, S.-C., Shen, L., Lungren, M. P. & Yeung, S. Gloria: A multimodal global-local representation learning framework for label-efficient medical image recognition. In Proceedings of the IEEE\/CVF International Conference on Computer Vision, 3942\u20133951 (IEEE, 2021).","DOI":"10.1109\/ICCV48922.2021.00391"},{"key":"2032_CR19","doi-asserted-by":"crossref","unstructured":"Zhou, L., Liu, H., Bae, J., He, J., Samaras, D. & Prasanna, P. Self pre-training with masked autoencoders for medical image classification and segmentation. In IEEE International Symposium on Biomedical Imaging, 1\u20136 (IEEE, 2023).","DOI":"10.1109\/ISBI53787.2023.10230477"},{"key":"2032_CR20","doi-asserted-by":"crossref","unstructured":"Xiao, J., Bai, Y., Yuille, A. & Zhou, Z. Delving into masked autoencoders for multi-label thorax disease classification. In Proc. IEEE\/CVF Winter Conference on Applications of Computer Vision, 3588\u20133600 (IEEE, 2023).","DOI":"10.1109\/WACV56688.2023.00358"},{"key":"2032_CR21","unstructured":"Dosovitskiy, A. et al. An image is worth 16\u2009\u00d7\u200916 words: Transformers for image recognition at scale. In International Conference on Learning Representations (2021)."},{"key":"2032_CR22","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S. & Sun, J. Deep residual learning for image recognition. In Proc. IEEE conference on computer vision and pattern recognition, 770\u2013778 (IEEE, 2016).","DOI":"10.1109\/CVPR.2016.90"},{"key":"2032_CR23","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der Maaten, L. & Weinberger, K. Q. Densely connected convolutional networks. In Proc. IEEE Conference on Computer Vision and Pattern Recognition, 4700\u20134708 (IEEE, 2017).","DOI":"10.1109\/CVPR.2017.243"},{"key":"2032_CR24","doi-asserted-by":"publisher","first-page":"105171","DOI":"10.1016\/j.imavis.2024.105171","volume":"149","author":"Y Fang","year":"2024","unstructured":"Fang, Y. et al. Eva-02: A visual representation for neon genesis. Image Vis. Computing 149, 105171 (2024).","journal-title":"Image Vis. Computing"},{"key":"2032_CR25","doi-asserted-by":"publisher","DOI":"10.1186\/s41747-023-00411-3","volume":"8","author":"S Tayebi Arasteh","year":"2024","unstructured":"Tayebi Arasteh, S., Misera, L., Kather, J. N., Truhn, D. & Nebelung, S. Enhancing diagnostic deep learning via self-supervised pretraining on large-scale, unlabeled non-medical images. Eur. Radiol. Exp. 8, 10 (2024).","journal-title":"Eur. Radiol. Exp."},{"key":"2032_CR26","doi-asserted-by":"crossref","unstructured":"He, K. et al. Masked autoencoders are scalable vision learners. In Proc. IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 16000\u201316009 (2022).","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"2032_CR27","unstructured":"Chen, X., Fan, H., Girshick, R. & He, K. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297 (2020)."},{"key":"2032_CR28","doi-asserted-by":"publisher","first-page":"259","DOI":"10.1016\/j.patrec.2018.10.027","volume":"130","author":"Q Guan","year":"2020","unstructured":"Guan, Q. & Huang, Y. Multi-label chest x-ray image classification via category-wise residual attention learning. Pattern Recognit. Lett. 130, 259\u2013266 (2020).","journal-title":"Pattern Recognit. Lett."},{"key":"2032_CR29","doi-asserted-by":"crossref","unstructured":"Ma, C., Wang, H. & Hoi, S. C. Multi-label thoracic disease image classification with cross-attention networks. In Medical Image Computing and Computer Assisted Intervention\u2013MICCAI 2019: 22nd International Conference, Shenzhen, China, October 13\u201317, 2019, Proceedings, Part VI 22, 730\u2013738 (Springer, 2019).","DOI":"10.1007\/978-3-030-32226-7_81"},{"key":"2032_CR30","doi-asserted-by":"crossref","unstructured":"Haghighi, F., Taher, M. R. H., Gotway, M. B. & Liang, J. Dira: Discriminative, restorative, and adversarial learning for self-supervised medical image analysis. In Proc. IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 20824\u201320834 (IEEE, 2022).","DOI":"10.1109\/CVPR52688.2022.02016"},{"key":"2032_CR31","doi-asserted-by":"crossref","unstructured":"Liu, F. et al. Acpl: Anti-curriculum pseudo-labelling for semi-supervised medical image classification. In Proc. IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 20697\u201320706 (IEEE, 2022).","DOI":"10.1109\/CVPR52688.2022.02004"},{"key":"2032_CR32","doi-asserted-by":"crossref","unstructured":"Hermoza, R., Maicas, G., Nascimento, J. C. & Carneiro, G. Region proposals for saliency map refinement for weakly-supervised disease localisation and classification. In Medical Image Computing and Computer Assisted Intervention\u2013MICCAI 2020: 23rd International Conference, Lima, Peru, October 4\u20138, 2020, Proceedings, Part VI 23, 539\u2013549 (Springer, 2020).","DOI":"10.1007\/978-3-030-59725-2_52"},{"key":"2032_CR33","unstructured":"Kim, E., Kim, S., Seo, M. & Yoon, S. Xprotonet: diagnosis in chest radiography with global and local explanations. In Proc. IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 15719\u201315728 (IEEE, 021)."},{"key":"2032_CR34","doi-asserted-by":"crossref","unstructured":"Seyyed-Kalantari, L., Liu, G., McDermott, M., Chen, I. Y. & Ghassemi, M. Chexclusion: Fairness gaps in deep chest x-ray classifiers. In BIOCOMPUTING 2021: proceedings of the Pacific symposium, 232\u2013243 (World Scientific, 2020).","DOI":"10.1142\/9789811232701_0022"},{"key":"2032_CR35","doi-asserted-by":"publisher","first-page":"186","DOI":"10.1016\/j.neucom.2020.03.127","volume":"437","author":"HH Pham","year":"2021","unstructured":"Pham, H. H., Le, T. T., Tran, D. Q., Ngo, D. T. & Nguyen, H. Q. Interpreting chest x-rays via cnns that exploit hierarchical disease dependencies and uncertainty labels. Neurocomputing 437, 186\u2013194 (2021).","journal-title":"Neurocomputing"},{"key":"2032_CR36","doi-asserted-by":"crossref","unstructured":"Touvron, H. et al. Training data-efficient image transformers & distillation through attention. In International conference on machine learning, 10347\u201310357 (PMLR, 2021).","DOI":"10.1109\/ICCV48922.2021.00010"},{"key":"2032_CR37","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/s41747-020-00159-0","volume":"4","author":"G Baselli","year":"2020","unstructured":"Baselli, G., Codari, M. & Sardanelli, F. Opening the black box of machine learning in radiology: can the proximity of annotated cases be a way? Eur. Radiol. Exp. 4, 1\u20137 (2020).","journal-title":"Eur. Radiol. Exp."},{"key":"2032_CR38","doi-asserted-by":"crossref","unstructured":"Selvaraju, R. R. et al. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proc. IEEE International Conference on Computer Vision, 618\u2013626 (IEEE, 2017).","DOI":"10.1109\/ICCV.2017.74"},{"key":"2032_CR39","doi-asserted-by":"publisher","first-page":"867","DOI":"10.1038\/s42256-022-00536-x","volume":"4","author":"A Saporta","year":"2022","unstructured":"Saporta, A. et al. Benchmarking saliency methods for chest x-ray interpretation. Nat. Mach. Intell. 4, 867\u2013878 (2022).","journal-title":"Nat. Mach. Intell."},{"key":"2032_CR40","unstructured":"Liu, A. et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437 (2024)."},{"key":"2032_CR41","doi-asserted-by":"publisher","first-page":"e230060","DOI":"10.1148\/ryai.230060","volume":"5","author":"B Glocker","year":"2023","unstructured":"Glocker, B., Jones, C., Roschewitz, M. & Winzeck, S. Risk of bias in chest radiography deep learning foundation models. Radiol. Artif. Intell. 5, e230060 (2023).","journal-title":"Radiol. Artif. Intell."},{"key":"2032_CR42","doi-asserted-by":"crossref","unstructured":"Ma, D., Pang, J., Gotway, M. B. & Liang, J. A fully open AI foundation model applied to chest radiography. Nature 643, 488\u2013498 (2025).","DOI":"10.1038\/s41586-025-09079-8"},{"key":"2032_CR43","unstructured":"Xu, S. et al. Elixr: Towards a general purpose x-ray artificial intelligence system through alignment of large language models and radiology vision encoders. arXiv preprint arXiv:2308.01317 (2023)."},{"key":"2032_CR44","unstructured":"Chen, Z. et al. Chexagent: Towards a foundation model for chest x-ray interpretation. arXiv preprint arXiv:2401.12208 (2024)."},{"key":"2032_CR45","doi-asserted-by":"crossref","unstructured":"Thawakar, O. C. et al. Xraygpt: Chest radiographs summarization using large medical vision-language models. In Proc. 23rd Workshop on Biomedical Natural Language Processing, 440\u2013448 (IEEE, 2024).","DOI":"10.18653\/v1\/2024.bionlp-1.35"},{"key":"2032_CR46","doi-asserted-by":"crossref","unstructured":"Deng, J. et al. Imagenet: A large-scale hierarchical image database. In Proc. IEEE Conference on Computer Vision and Pattern Recognition, 248\u2013255 (IEEE, 2009).","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"2032_CR47","doi-asserted-by":"crossref","unstructured":"Fang, Y. et al. Eva: Exploring the limits of masked visual representation learning at scale. In Proc. of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 19358\u201319369 (IEEE, 2023).","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"2032_CR48","doi-asserted-by":"publisher","first-page":"102125","DOI":"10.1016\/j.media.2021.102125","volume":"72","author":"E \u00c7all\u00ed","year":"2021","unstructured":"\u00c7all\u00ed, E., Sogancioglu, E., van Ginneken, B., van Leeuwen, K. G. & Murphy, K. Deep learning for chest x-ray analysis: a survey. Med. Image Anal. 72, 102125 (2021).","journal-title":"Med. Image Anal."},{"key":"2032_CR49","unstructured":"Wang, H. et al. Foundation transformers. arXiv preprint arXiv:2210.06423 (2022)."},{"key":"2032_CR50","unstructured":"Shazeer, N. Glu variants improve transformer. arXiv preprint arXiv:2002.05202 (2020)."},{"key":"2032_CR51","unstructured":"Sun, Q., Fang, Y., Wu, L., Wang, X. & Cao, Y. Eva-clip: improved training techniques for clip at scale. arXiv preprint arXiv:2303.15389 (2023)."},{"key":"2032_CR52","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P. & Brox, T. U-net: Convolutional networks for biomedical image segmentation. In Medical Image Computing and Computer-Assisted Intervention\u2013MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18, 234\u2013241 (Springer, 2015).","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"2032_CR53","doi-asserted-by":"crossref","unstructured":"Li, Y., Mao, H., Girshick, R. & He, K. Exploring plain vision transformer backbones for object detection. In European Conference on Computer Vision, 280\u2013296 (Springer, 2022).","DOI":"10.1007\/978-3-031-20077-9_17"},{"key":"2032_CR54","doi-asserted-by":"crossref","unstructured":"Xiao, T., Liu, Y., Zhou, B., Jiang, Y. & Sun, J. Unified perceptual parsing for scene understanding. In Proceedings of the European conference on computer vision (ECCV), 418\u2013434 (ECCV, 2018).","DOI":"10.1007\/978-3-030-01228-1_26"}],"container-title":["npj Digital Medicine"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.nature.com\/articles\/s41746-025-02032-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/www.nature.com\/articles\/s41746-025-02032-z","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/www.nature.com\/articles\/s41746-025-02032-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,17]],"date-time":"2025-11-17T21:56:06Z","timestamp":1763416566000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.nature.com\/articles\/s41746-025-02032-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,17]]},"references-count":54,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2025,12]]}},"alternative-id":["2032"],"URL":"https:\/\/doi.org\/10.1038\/s41746-025-02032-z","relation":{},"ISSN":["2398-6352"],"issn-type":[{"value":"2398-6352","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,17]]},"assertion":[{"value":"20 January 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 September 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 November 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"The authors declare no competing interests.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"678"}}