{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,18]],"date-time":"2026-04-18T00:30:18Z","timestamp":1776472218450,"version":"3.51.2"},"publisher-location":"Cham","reference-count":28,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031164484","type":"print"},{"value":"9783031164491","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-16449-1_23","type":"book-chapter","created":{"date-parts":[[2022,9,16]],"date-time":"2022-09-16T08:04:54Z","timestamp":1663315494000},"page":"234-243","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":27,"title":["Multi-transSP: Multimodal Transformer for Survival Prediction of Nasopharyngeal Carcinoma Patients"],"prefix":"10.1007","author":[{"given":"Hanci","family":"Zheng","sequence":"first","affiliation":[]},{"given":"Zongying","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Qizheng","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Xingchen","family":"Peng","sequence":"additional","affiliation":[]},{"given":"Jianghong","family":"Xiao","sequence":"additional","affiliation":[]},{"given":"Chen","family":"Zu","sequence":"additional","affiliation":[]},{"given":"Zhengyang","family":"Jiao","sequence":"additional","affiliation":[]},{"given":"Yan","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,9,17]]},"reference":[{"key":"23_CR1","doi-asserted-by":"publisher","first-page":"108021","DOI":"10.1016\/j.knosys.2021.108021","volume":"239","author":"L Hu","year":"2022","unstructured":"Hu, L., Li, J., Peng, X., et al.: Semi-supervised NPC segmentation with uncertainty and attention guided consistency. Knowl.-Based Syst. 239, 108021\u2013108033 (2022)","journal-title":"Knowl.-Based Syst."},{"key":"23_CR2","doi-asserted-by":"publisher","first-page":"102339","DOI":"10.1016\/j.media.2021.102339","volume":"77","author":"B Zhan","year":"2022","unstructured":"Zhan, B., Xiao, J., Cao, C., et al.: Multi-constraint generative adversarial network for dose prediction in radiotherapy. Med. Image Anal. 77, 102339\u2013102352 (2022)","journal-title":"Med. Image Anal."},{"key":"23_CR3","doi-asserted-by":"publisher","first-page":"749","DOI":"10.1038\/nrclinonc.2017.141","volume":"14","author":"P Lambin","year":"2017","unstructured":"Lambin, P., Leijenaar, R.T.H., Deist, T.M., et al.: Radiomics: the bridge between medical imaging and personalized medicine. Nat Rev Clin Oncol 14, 749\u2013762 (2017)","journal-title":"Nat Rev Clin Oncol"},{"key":"23_CR4","doi-asserted-by":"crossref","unstructured":"Wang, Y., Zhou, L., Yu, B. et al.: 3D auto-context-based locality adaptive multi-modality GANs for PET synthesis. IEEE Trans. Med. Imaging 38, 1328\u20131339 (2019)","DOI":"10.1109\/TMI.2018.2884053"},{"key":"23_CR5","doi-asserted-by":"publisher","first-page":"102335","DOI":"10.1016\/j.media.2021.102335","volume":"77","author":"Y Luo","year":"2022","unstructured":"Luo, Y., Zhou, L., Zhan, B., et al.: Adaptive rectification based adversarial network with spectrum constraint for high-quality PET image synthesis. Med. Image Anal. 77, 102335\u2013102347 (2022)","journal-title":"Med. Image Anal."},{"key":"23_CR6","doi-asserted-by":"publisher","first-page":"102447","DOI":"10.1016\/j.media.2022.102447","volume":"79","author":"K Wang","year":"2022","unstructured":"Wang, K., Zhan, B., Zu, C., et al.: Semi-supervised medical image segmentation via a tripled-uncertainty guided mean teacher model with contrastive learning. Med. Image Anal. 79, 102447\u2013102460 (2022)","journal-title":"Med. Image Anal."},{"key":"23_CR7","doi-asserted-by":"publisher","first-page":"1074","DOI":"10.1002\/jmri.27202","volume":"52","author":"Q Yang","year":"2020","unstructured":"Yang, Q., Guo, Y., Ou, X., et al.: Automatic T staging using weakly supervised deep learning for nasopharyngeal carcinoma on MR images. J. Magn. Reson. Imaging 52, 1074\u20131082 (2020)","journal-title":"J. Magn. Reson. Imaging"},{"key":"23_CR8","doi-asserted-by":"publisher","first-page":"1298","DOI":"10.1002\/cam4.2802","volume":"9","author":"K Liu","year":"2020","unstructured":"Liu, K., Xia, W., Qiang, M., et al.: Deep learning pathological microscopic features in endemic nasopharyngeal cancer: prognostic value and protentional role for individual induction chemotherapy. Cancer Med 9, 1298\u20131306 (2020)","journal-title":"Cancer Med"},{"key":"23_CR9","unstructured":"Huang, Y., Zhao, H., Huang, L.: What Makes Multi-modal Learning Better than Single (Provably). arXiv preprint arXiv: 2106.04538 [Cs] (2021)"},{"key":"23_CR10","doi-asserted-by":"publisher","first-page":"108566","DOI":"10.1016\/j.patcog.2022.108566","volume":"126","author":"Y Shi","year":"2022","unstructured":"Shi, Y., Zu, C., Hong, M., et al.: ASMFS: adaptive-similarity-based multi-modality feature selection for classification of Alzheimer\u2019s disease. Pattern Recogn. 126, 108566\u2013108580 (2022)","journal-title":"Pattern Recogn."},{"key":"23_CR11","doi-asserted-by":"publisher","first-page":"105684","DOI":"10.1016\/j.cmpb.2020.105684","volume":"197","author":"B Jing","year":"2020","unstructured":"Jing, B., Deng, Y., Zhang, T., et al.: Deep learning for risk prediction in patients with nasopharyngeal carcinoma using multi-parametric MRIs. Comput. Methods Programs Biomed. 197, 105684\u2013105690 (2020)","journal-title":"Comput. Methods Programs Biomed."},{"key":"23_CR12","doi-asserted-by":"publisher","first-page":"606","DOI":"10.1093\/jnci\/djaa149","volume":"113","author":"M Qiang","year":"2021","unstructured":"Qiang, M., Li, C., Sun, Y., et al.: A prognostic predictive system based on deep learning for locoregionally advanced nasopharyngeal carcinoma. J. Natl Cancer Inst. 113, 606\u2013615 (2021)","journal-title":"J. Natl Cancer Inst."},{"key":"23_CR13","doi-asserted-by":"crossref","unstructured":"Vale-Silva, L.A., Rohr, K.: Pan-cancer prognosis prediction using multimodal deep learning. In: IEEE 17th International Symposium on Biomedical Imaging, pp. 568\u2013571. IEEE (2020)","DOI":"10.1109\/ISBI45749.2020.9098665"},{"key":"23_CR14","doi-asserted-by":"publisher","first-page":"167","DOI":"10.1002\/jmri.27308","volume":"53","author":"L Zhang","year":"2021","unstructured":"Zhang, L., Wu, X., Liu, J., et al.: MRI-based deep-learning model for distant metastasis-free survival in locoregionally advanced nasopharyngeal carcinoma. J. Magn. Reson. Imaging 53, 167\u2013178 (2021)","journal-title":"J. Magn. Reson. Imaging"},{"key":"23_CR15","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1007\/978-3-030-59713-9_51","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2020","author":"G Chauhan","year":"2020","unstructured":"Chauhan, G., et al.: Joint modeling of chest radiographs and radiology reports for pulmonary edema assessment. In: Martel, A.L., et al. (eds.) MICCAI 2020. LNCS, vol. 12262, pp. 529\u2013539. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-59713-9_51"},{"key":"23_CR16","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"721","DOI":"10.1007\/978-3-030-87240-3_69","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2021","author":"Y Guan","year":"2021","unstructured":"Guan, Y., et al.: Predicting esophageal fistula risks using a multimodal self-attention network. In: de Bruijne, M., et al. (eds.) MICCAI 2021. LNCS, vol. 12905, pp. 721\u2013730. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87240-3_69"},{"key":"23_CR17","unstructured":"Lin, T., Wang, Y., Liu, X. et al.: A Survey of Transformers. arXiv preprint arXiv:2106.04554 [cs] (2021)"},{"key":"23_CR18","unstructured":"Parmar, N., Vaswani, A., Uszkoreit, J. et al.: Image Transformer. arXiv preprint arXiv:1802.05751v3 [cs] (2018)"},{"key":"23_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"23_CR20","doi-asserted-by":"crossref","unstructured":"Wang, H., Zhu, Y., Adam, H. et al.: MaX-DeepLab: end-to-end panoptic segmentation with mask transformers. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 5459\u20135470. IEEE (2021)","DOI":"10.1109\/CVPR46437.2021.00542"},{"key":"23_CR21","doi-asserted-by":"crossref","unstructured":"Huang, J., Tao, J., Liu, B. et al.: Multimodal transformer fusion for continuous emotion recognition. In: IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 3507\u20133511. IEEE (2020)","DOI":"10.1109\/ICASSP40776.2020.9053762"},{"key":"23_CR22","doi-asserted-by":"crossref","unstructured":"Tsai, Y. H., Bai, S., Liang, P. P. et al.: Multimodal transformer for unaligned multimodal language sequences. In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6558\u20136569 (2019)","DOI":"10.18653\/v1\/P19-1656"},{"key":"23_CR23","doi-asserted-by":"crossref","unstructured":"Hu, R., Singh, A.: UniT: multimodal multitask learning with a unified transformer. arXiv preprint arXiv:2102.10772 [cs] (2021)","DOI":"10.1109\/ICCV48922.2021.00147"},{"key":"23_CR24","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S. et al.: Deep residual learning for image recognition. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778. IEEE (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"23_CR25","doi-asserted-by":"publisher","first-page":"24","DOI":"10.1186\/s12874-018-0482-1","volume":"18","author":"JL Katzman","year":"2018","unstructured":"Katzman, J.L., Shaham, U., Cloninger, A., et al.: DeepSurv: personalized treatment recommender system using a Cox proportional hazards deep neural network. BMC Med. Res. Methodol. 18, 24\u201335 (2018)","journal-title":"BMC Med. Res. Methodol."},{"key":"23_CR26","doi-asserted-by":"publisher","first-page":"274","DOI":"10.1038\/s42256-020-0173-6","volume":"2","author":"P Mukherjee","year":"2020","unstructured":"Mukherjee, P., Zhou, M., Lee, E., et al.: A shallow convolutional neural network predicts prognosis of lung cancer patients in multi-institutional CT-Image data. Nat. Mach. Intell. 2, 274\u2013282 (2020)","journal-title":"Nat. Mach. Intell."},{"key":"23_CR27","doi-asserted-by":"publisher","first-page":"1261","DOI":"10.1111\/exd.13777","volume":"27","author":"J Yap","year":"2018","unstructured":"Yap, J., Yolland, W., Tschandl, P.: Multimodal skin lesion classification using deep learning. Exp. Dermatol. 27, 1261\u20131267 (2018)","journal-title":"Exp. Dermatol."},{"key":"23_CR28","doi-asserted-by":"publisher","first-page":"13505","DOI":"10.1038\/s41598-021-92799-4","volume":"11","author":"LA Vale-Silva","year":"2021","unstructured":"Vale-Silva, L.A., Rohr, K.: Long-term cancer survival prediction using multimodal deep learning. Sci. Rep. 11, 13505\u201313516 (2021)","journal-title":"Sci. Rep."}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-16449-1_23","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,7]],"date-time":"2024-03-07T16:54:32Z","timestamp":1709830472000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-16449-1_23"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031164484","9783031164491"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-16449-1_23","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"17 September 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Singapore","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Singapore","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 September 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 September 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft Conference","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1831","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"574","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"31% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}