{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:11:40Z","timestamp":1777655500326,"version":"3.51.4"},"reference-count":47,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2022]]},"DOI":"10.1109\/access.2022.3211501","type":"journal-article","created":{"date-parts":[[2022,10,3]],"date-time":"2022-10-03T20:20:19Z","timestamp":1664828419000},"page":"108205-108215","source":"Crossref","is-referenced-by-count":127,"title":["TransNorm: Transformer Provides a Strong Spatial Normalization Mechanism for a Deep Segmentation Model"],"prefix":"10.1109","volume":"10","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4772-2161","authenticated-orcid":false,"given":"Reza","family":"Azad","sequence":"first","affiliation":[{"name":"Institute of Imaging and Computer Vision, RWTH Aachen University, Aachen, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8820-2843","authenticated-orcid":false,"given":"Mohammad T.","family":"Al-Antary","sequence":"additional","affiliation":[{"name":"School of Computing and Mathematical Sciences, University of Greenwich, London, U.K."}]},{"given":"Moein","family":"Heidari","sequence":"additional","affiliation":[{"name":"School of Electrical Engineering, Iran University of Science and Technology, Tehran, Iran"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1672-2185","authenticated-orcid":false,"given":"Dorit","family":"Merhof","sequence":"additional","affiliation":[{"name":"Faculty of Informatics and Data Science, University of Regensburg, Regensburg, Germany"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-022-07859-1"},{"key":"ref2","article-title":"Pyramid medical transformer for medical image segmentation","volume-title":"arXiv:2104.14702","author":"Zhang","year":"2021"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00052"},{"key":"ref4","article-title":"TransUNet: Transformers make strong encoders for medical image segmentation","volume-title":"arXiv:2102.04306","author":"Chen","year":"2021"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-21014-3_39"},{"key":"ref6","article-title":"Multi-scale regional attention Deeplab3+: Multiple myeloma plasma cells segmentation in microscopic images","volume-title":"arXiv:2105.06238","author":"Bozorgpour","year":"2021"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.7303\/SYN3193805"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref10","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume-title":"arXiv:1810.04805","author":"Devlin","year":"2018"},{"key":"ref11","first-page":"1","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018"},{"key":"ref12","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","volume-title":"arXiv:2010.11929","author":"Dosovitskiy","year":"2020"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"ref14","first-page":"1691","article-title":"Generative pretraining from pixels","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Chen"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00181"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.isprsjprs.2020.01.013"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1049\/iet-ipr.2019.1527"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00889-5_1"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053405"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46723-8_49"},{"key":"ref21","article-title":"Attention U-Net: Learning where to look for the pancreas","volume-title":"arXiv:1804.03999","author":"Oktay","year":"2018"},{"key":"ref22","article-title":"Multi-scale context aggregation by dilated convolutions","volume-title":"arXiv:1511.07122","author":"Yu","year":"2015"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2017.660"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00813"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00326"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01204"},{"key":"ref28","first-page":"1","article-title":"Twins: Revisiting the design of spatial attention in vision transformers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Chu"},{"key":"ref29","article-title":"Swin-Unet: Unet-like pure transformer for medical image segmentation","volume-title":"arXiv:2105.05537","author":"Cao","year":"2021"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2021.102327"},{"key":"ref31","article-title":"Layer normalization","volume-title":"arXiv:1607.06450","author":"Ba","year":"2016"},{"key":"ref32","article-title":"Self-supervised pre-training of Swin transformers for 3D medical image analysis","volume-title":"arXiv:2111.14791","author":"Tang","year":"2021"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ISBI.2018.8363547"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW54120.2021.00366"},{"key":"ref35","article-title":"Recurrent residual convolutional neural network based on U-Net (R2U-Net) for medical image segmentation","volume-title":"arXiv:1802.06955","author":"Alom","year":"2018"},{"key":"ref36","article-title":"Skin lesion analysis toward melanoma detection 2018: A challenge hosted by the international skin imaging collaboration (ISIC)","volume-title":"arXiv:1902.03368","author":"Codella","year":"2019"},{"key":"ref37","article-title":"Multi-level context gating of embedded collective knowledge for medical image segmentation","volume-title":"arXiv:2003.05056","author":"Asadi-Aghbolaghi","year":"2020"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/EMBC.2013.6610779"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-66415-2_16"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0207908"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/3DV.2016.79"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2020.101716"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-87193-2_4"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i3.16374"},{"key":"ref45","article-title":"ResNeSt: Split-attention networks","volume-title":"arXiv:2004.08955","author":"Zhang","year":"2020"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/INISTA52262.2021.9548367"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/9668973\/09908565.pdf?arnumber=9908565","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,24]],"date-time":"2024-01-24T02:08:13Z","timestamp":1706062093000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9908565\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/access.2022.3211501","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]}}}