{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,23]],"date-time":"2025-06-23T09:06:26Z","timestamp":1750669586259,"version":"3.37.3"},"reference-count":34,"publisher":"Springer Science and Business Media LLC","issue":"25","license":[{"start":{"date-parts":[[2021,8,26]],"date-time":"2021-08-26T00:00:00Z","timestamp":1629936000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,8,26]],"date-time":"2021-08-26T00:00:00Z","timestamp":1629936000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61872030","61771039"],"award-info":[{"award-number":["61872030","61771039"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Shandong Province Major Science and Technology Innovation Project","award":["2019TSLH0206"],"award-info":[{"award-number":["2019TSLH0206"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2021,10]]},"DOI":"10.1007\/s11042-021-11225-z","type":"journal-article","created":{"date-parts":[[2021,8,27]],"date-time":"2021-08-27T00:25:58Z","timestamp":1630023958000},"page":"33779-33797","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["AMRSegNet: adaptive modality recalibration network for lung tumor segmentation on multi-modal MR images"],"prefix":"10.1007","volume":"80","author":[{"given":"Jiaxin","family":"Li","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6808-1140","authenticated-orcid":false,"given":"Houjin","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Yanfeng","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yahui","family":"Peng","sequence":"additional","affiliation":[]},{"given":"Naxin","family":"Cai","sequence":"additional","affiliation":[]},{"given":"Xuyang","family":"Cao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,8,26]]},"reference":[{"key":"11225_CR1","doi-asserted-by":"publisher","first-page":"803","DOI":"10.1109\/TMI.2017.2764326","volume":"37","author":"A Chartsias","year":"2018","unstructured":"Chartsias A, Joyce T, Giuffrida MV, Tsaftaris SA (2018) Multimodal MR synthesis via modality-invariant latent representation. IEEE Trans Med Imaging 37:803\u2013814. https:\/\/doi.org\/10.1109\/TMI.2017.2764326","journal-title":"IEEE Trans Med Imaging"},{"key":"11225_CR2","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2020.3036584","author":"A Chartsias","year":"2019","unstructured":"Chartsias A, Papanastasiou G, Wang C et al (2019) Disentangle, align and fuse for multimodal and zero-shot image segmentation. IEEE Trans Med Imaging. https:\/\/doi.org\/10.1109\/TMI.2020.3036584","journal-title":"IEEE Trans Med Imaging"},{"key":"11225_CR3","doi-asserted-by":"publisher","first-page":"407","DOI":"10.3978\/j.issn.2223-4292.2015.03.01","volume":"5","author":"GS Chilla","year":"2015","unstructured":"Chilla GS, Tan CH, Xu C, Poh CL (2015) Diffusion weighted magnetic resonance imaging and its recent trend-a survey. Quant Imaging Med Surg 5:407\u2013422. https:\/\/doi.org\/10.3978\/j.issn.2223-4292.2015.03.01","journal-title":"Quant Imaging Med Surg"},{"key":"11225_CR4","doi-asserted-by":"publisher","first-page":"297","DOI":"10.2307\/1932409","volume":"26","author":"LR Dice","year":"1945","unstructured":"Dice LR (1945) Measures of the amount of ecologic association between species. Ecology 26:297\u2013302","journal-title":"Ecology"},{"key":"11225_CR5","doi-asserted-by":"publisher","first-page":"1116","DOI":"10.1109\/TMI.2018.2878669","volume":"38","author":"J Dolz","year":"2019","unstructured":"Dolz J, Gopinath K, Yuan J et al (2019) HyperDense-net: a hyper-densely connected CNN for multi-modal image segmentation. IEEE Trans Med Imaging 38:1116\u20131126. https:\/\/doi.org\/10.1109\/TMI.2018.2878669","journal-title":"IEEE Trans Med Imaging"},{"key":"11225_CR6","volume-title":"IVD-net: intervertebral disc localization and segmentation in MRI with a multi-modal UNet","author":"J Dolz","year":"2019","unstructured":"Dolz J, Desrosiers C, Ben Ayed I (2019) IVD-net: intervertebral disc localization and segmentation in MRI with a multi-modal UNet. Springer International Publishing, Cham"},{"key":"11225_CR7","doi-asserted-by":"publisher","first-page":"162","DOI":"10.1109\/trpms.2018.2890359","volume":"3","author":"Z Guo","year":"2019","unstructured":"Guo Z, Li X, Huang H et al (2019) Deep learning-based image segmentation on multimodal medical imaging. IEEE Trans Radiat Plasma Med Sci 3:162\u2013169. https:\/\/doi.org\/10.1109\/trpms.2018.2890359","journal-title":"IEEE Trans Radiat Plasma Med Sci"},{"key":"11225_CR8","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1016\/j.media.2016.05.004","volume":"35","author":"M Havaei","year":"2017","unstructured":"Havaei M, Davy A, Warde-Farley D et al (2017) Brain tumor segmentation with deep neural networks. Med Image Anal 35:18\u201331. https:\/\/doi.org\/10.1016\/j.media.2016.05.004","journal-title":"Med Image Anal"},{"key":"11225_CR9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.123","author":"K He","year":"2015","unstructured":"He K, Zhang X, Ren S, Sun J (2015) Delving deep into rectifiers: surpassing human-level performance on imagenet classification. Proc IEEE Int Conf Comput Vis. https:\/\/doi.org\/10.1109\/ICCV.2015.123","journal-title":"Proc IEEE Int Conf Comput Vis"},{"key":"11225_CR10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90","author":"K He","year":"2016","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. Proc IEEE Comput Soc Conf Comput Vis Pattern Recognit. https:\/\/doi.org\/10.1109\/CVPR.2016.90","journal-title":"Proc IEEE Comput Soc Conf Comput Vis Pattern Recognit"},{"key":"11225_CR11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00745","volume-title":"Squeeze-and-excitation networks","author":"J Hu","year":"2018","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. IEEE, New Jersy"},{"key":"11225_CR12","first-page":"448","volume":"1","author":"S Ioffe","year":"2015","unstructured":"Ioffe S, Szegedy C (2015) Batch normalization: accelerating deep network training by reducing internal covariate shift. Int Conf Mach Learn ICML 1:448\u2013456","journal-title":"Int Conf Mach Learn ICML"},{"key":"11225_CR13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00934-2","author":"J Jiang","year":"2018","unstructured":"Jiang J, Hu Y, Tyagi N et al (2018) Tumor-aware adversarial domain adaptation from CT to MRI for lung cancer segmentation. Med Image Comput Comput Assist Interv. https:\/\/doi.org\/10.1007\/978-3-030-00934-2","journal-title":"Med Image Comput Comput Assist Interv"},{"key":"11225_CR14","unstructured":"Kingma DP, Ba JL (2015) Adam: a method for stochastic optimization. International Conference on Learning Representations (ICLR), 2015, Ithaca, NY,\u00a0abs\/1412.6980:1\u201315"},{"key":"11225_CR15","doi-asserted-by":"publisher","unstructured":"Li J, Chen H, Li Y, Peng Y (2019) A novel network based on densely connected fully convolutional networks for segmentation of lung tumors on multi-modal MR images. Proceedings of the 2019 International Conference on Artificial Intelligence and Advanced Manufacturing, 1\u20135. https:\/\/doi.org\/10.1145\/3358331.3358400","DOI":"10.1145\/3358331.3358400"},{"key":"11225_CR16","doi-asserted-by":"publisher","first-page":"2663","DOI":"10.1109\/TMI.2018.2845918","volume":"37","author":"X Li","year":"2018","unstructured":"Li X, Chen H, Qi X et al (2018) H-DenseUNet: hybrid densely connected UNet for liver and tumor segmentation from CT volumes. IEEE Trans Med Imaging 37:2663\u20132674. https:\/\/doi.org\/10.1109\/TMI.2018.2845918","journal-title":"IEEE Trans Med Imaging"},{"key":"11225_CR17","doi-asserted-by":"publisher","first-page":"34029","DOI":"10.1109\/ACCESS.2020.2973707","volume":"8","author":"P Liu","year":"2020","unstructured":"Liu P, Dou QI, Wang Q (2020) An encoder-decoder neural network with 3D squeeze-and-excitation and deep supervision for brain tumor segmentation. IEEE Access 8:34029\u201334037. https:\/\/doi.org\/10.1109\/ACCESS.2020.2973707","journal-title":"IEEE Access"},{"key":"11225_CR18","doi-asserted-by":"publisher","first-page":"1993","DOI":"10.1109\/TMI.2014.2377694","volume":"34","author":"BH Menze","year":"2015","unstructured":"Menze BH, Jakab A, Bauer S et al (2015) The multimodal brain tumor image segmentation benchmark (BRATS). IEEE Trans Med Imaging 34:1993\u20132024. https:\/\/doi.org\/10.1109\/TMI.2014.2377694","journal-title":"IEEE Trans Med Imaging"},{"key":"11225_CR19","first-page":"565","volume-title":"V-net: fully convolutional neural networks for volumetric medical image segmentation","author":"F Milletari","year":"2016","unstructured":"Milletari F, Navab N, Ahmadi SA (2016) V-net: fully convolutional neural networks for volumetric medical image segmentation. IEEE, New Jersy, pp 565\u2013571"},{"key":"11225_CR20","unstructured":"Nair V, Hinton GE (2010) Rectified linear units improve restricted Boltzmann machines, Vinod Nair. Proceedings of the 27th International Conference on Machine Learning (ICML-10), Haifa, Israel, 807\u2013814"},{"key":"11225_CR21","unstructured":"Oktay O, Schlemper J, Folgoc L et al (2018) Attention U-Net: Learning Where to Look for the Pancreas. 1st Conference on Medical Imaging with Deep Learning (MIDL)"},{"key":"11225_CR22","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TMI.2017.2725639","volume":"38","author":"A Pinto","year":"2019","unstructured":"Pinto A, Amorim J, Ribeiro A et al (2019) Adaptive feature recombination and recalibration for semantic segmentation with fully convolutional networks. IEEE Trans Med Imaging 38:1\u201312","journal-title":"IEEE Trans Med Imaging"},{"key":"11225_CR23","doi-asserted-by":"publisher","first-page":"20160667","DOI":"10.1259\/bjr.20160667","volume":"90","author":"J Pollard","year":"2017","unstructured":"Pollard J, Wen Z, Sadagopan R et al (2017) The future of image-guided radiotherapy will be MR-guided. Br J Radiol 90:20160667. https:\/\/doi.org\/10.1259\/bjr.20160667","journal-title":"Br J Radiol"},{"issue":"2","key":"11225_CR24","doi-asserted-by":"publisher","first-page":"540","DOI":"10.1109\/TMI.2018.2867261","volume":"38","author":"AG Roy","year":"2018","unstructured":"Roy AG, Navab N, Wachinger C (2018) Recalibrating fully convolutional networks with spatial and channel \u2018squeeze & excitation\u2019 blocks. IEEE Trans Med Imaging 38(2):540\u2013549","journal-title":"IEEE Trans Med Imaging"},{"key":"11225_CR25","doi-asserted-by":"publisher","first-page":"31","DOI":"10.1016\/j.neucom.2019.07.006","volume":"365","author":"L Rundo","year":"2019","unstructured":"Rundo L, Han C, Nagano Y, Zhang J (2019) USE-Net\u202f: incorporating squeeze-and-excitation blocks into U-Net for prostate zonal segmentation of multi-institutional MRI datasets. Neurocomputing 365:31\u201343","journal-title":"Neurocomputing"},{"key":"11225_CR26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24223-1_1","volume-title":"Lung cancer statistics","author":"LA Torre","year":"2016","unstructured":"Torre LA, Siegel RL, Jemal A (2016) Lung cancer statistics. Springer International Publishing, Cham"},{"key":"11225_CR27","first-page":"448","volume":"37","author":"K Tseng","year":"2015","unstructured":"Tseng K, Lin Y, Hsu W, Huang C (2015) Joint sequence learning and cross-modality convolution for 3D biomedical segmentation. Proc IEEE Comput Soc Conf Comput Vis Pattern Recognit 37:448\u2013456","journal-title":"Proc IEEE Comput Soc Conf Comput Vis Pattern Recognit"},{"key":"11225_CR28","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr42600.2020.01155","author":"Q Wang","year":"2020","unstructured":"Wang Q, Wu B, Zhu P et al (2020) ECA-net: efficient channel attention for deep convolutional neural networks. IEEE\/CVF Conf Comput Vis Pattern Recognit. https:\/\/doi.org\/10.1109\/cvpr42600.2020.01155","journal-title":"IEEE\/CVF Conf Comput Vis Pattern Recognit"},{"key":"11225_CR29","doi-asserted-by":"publisher","unstructured":"Woo S, Park J, Lee JY, Kweon IS (2018) CBAM: Convolutional Block Attention Module. In: Ferrari V., Hebert M., Sminchisescu C., Weiss Y. (eds) Computer Vision \u2013 ECCV 2018. ECCV 2018. Lecture Notes in Computer Science,  Springer, Cham.\u00a011211. https:\/\/doi.org\/10.1007\/978-3-030-01234-2_1","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"11225_CR30","first-page":"1","volume-title":"Attention guided network for retinal image segmentation","author":"S Zhang","year":"2019","unstructured":"Zhang S, Fu H, Yan Y et al (2019) Attention guided network for retinal image segmentation. Springer International Publishing, Cham, pp 1\u20138"},{"key":"11225_CR31","doi-asserted-by":"publisher","first-page":"54","DOI":"10.1007\/978-3-030-32692-0_7","volume-title":"Machine learning in medical imaging","author":"S Zhang","year":"2019","unstructured":"Zhang S, Zhang C, Wang L et al (2019) MSAFusionNet: multiple subspace attention based deep multi-modal fusion network. In: Suk H-I, Liu M, Yan P, Lian C (eds) Machine learning in medical imaging. Springer International Publishing, Cham, pp 54\u201362"},{"key":"11225_CR32","doi-asserted-by":"publisher","DOI":"10.1016\/j.array.2019.100004","author":"T Zhou","year":"2019","unstructured":"Zhou T, Ruan S (2019) A review: deep learning for medical image segmentation using multi-modality fusion. Array. https:\/\/doi.org\/10.1016\/j.array.2019.100004","journal-title":"Array"},{"key":"11225_CR33","doi-asserted-by":"publisher","unstructured":"Zhou T, Ruan S, Guo Y, Canu S (2020) A multi-modality fusion network based on attention mechanism for brain tumor segmentation. 2020 IEEE 17th International Symposium on Biomedical Imaging (ISBI). 377\u2013380.\u00a0https:\/\/doi.org\/10.1109\/ISBI45749.2020.9098392","DOI":"10.1109\/ISBI45749.2020.9098392"},{"key":"11225_CR34","doi-asserted-by":"publisher","unstructured":"Zhu W, Huang Y, Tang H et al (2018) AnatomyNet: Deep 3D Squeeze-and-excitation U-Nets for fast and fully automated whole-volume anatomical segmentation. bioRxiv. https:\/\/doi.org\/10.1101\/392969","DOI":"10.1101\/392969"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-021-11225-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-021-11225-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-021-11225-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,10,21]],"date-time":"2021-10-21T21:08:56Z","timestamp":1634850536000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-021-11225-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8,26]]},"references-count":34,"journal-issue":{"issue":"25","published-print":{"date-parts":[[2021,10]]}},"alternative-id":["11225"],"URL":"https:\/\/doi.org\/10.1007\/s11042-021-11225-z","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"type":"print","value":"1380-7501"},{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2021,8,26]]},"assertion":[{"value":"6 October 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 January 2021","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 July 2021","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"26 August 2021","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}