{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:28:36Z","timestamp":1763191716534,"version":"3.45.0"},"reference-count":44,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11228110","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["CFPFormer: Cross Feature-Pyramid Transformer Decoder for Medical Image Segmentation"],"prefix":"10.1109","author":[{"given":"Hongyi","family":"Cai","sequence":"first","affiliation":[{"name":"Universiti Malaya,Faculty of Computer Science,Kuala Lumpur,Malaysia"}]},{"given":"Mohammad Mahdinur","family":"Rahman","sequence":"additional","affiliation":[{"name":"Universiti Malaya,Faculty of Computer Science,Kuala Lumpur,Malaysia"}]},{"given":"Wenzhen","family":"Dong","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Department of Mechanical and Automation Engineering,Hong Kong,China"}]},{"given":"Jingyu","family":"Wu","sequence":"additional","affiliation":[{"name":"Fuzhou University of International Studies and Trade,Fujian,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","year":"2021","author":"Dosovitskiy","key":"ref4"},{"key":"ref5","doi-asserted-by":"crossref","DOI":"10.1109\/WACV56688.2023.00365","article-title":"The Fully Convolutional Transformer for Medical Image Segmentation","author":"Tragakis","year":"2023"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-25066-8_9"},{"article-title":"Attention u-net: Learning where to look for the pancreas","year":"2018","author":"Oktay","key":"ref7"},{"key":"ref8","first-page":"197","article-title":"Attention gated networks: Learning to leverage salient regions in medical images","volume-title":"Medical Image Analysis","volume":"53","author":"Schlemper","year":"2019"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00889-5_1"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053405"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.3390\/app132111657"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2017.07.005"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1049\/iet-ipr.2019.1527"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/LGRS.2018.2802944"},{"article-title":"Multi-scale context aggregation by dilated convolutions","year":"2016","author":"Yu","key":"ref15"},{"article-title":"Rethinking atrous convolution for semantic image segmentation","year":"2017","author":"Chen","key":"ref16"},{"article-title":"Attention is all you need","year":"2023","author":"Vaswani","key":"ref17"},{"article-title":"Training data-efficient image transformers & distillation through attention","year":"2021","author":"Touvron","key":"ref18"},{"article-title":"TransUNet: Transformers make strong encoders for medical image segmentation","year":"2021","author":"Chen","key":"ref19"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/wacv51458.2022.00181"},{"article-title":"Efficientvit: Multi-scale linear attention for high-resolution dense prediction","year":"2024","author":"Cai","key":"ref21"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00813"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-87199-4_16"},{"article-title":"nnFormer: Interleaved transformer for volumetric segmentation","year":"2022","author":"Zhou","key":"ref25"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"article-title":"EfficientFormer: Vision transformers at MobileNet speed","year":"2022","author":"Li","key":"ref27"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00520"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01204"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52688.2022.01150"},{"key":"ref31","article-title":"Pyramid vision transformer: A versatile backbone for dense prediction without convolutions","volume-title":"CoRR","volume":"abs\/2102.12122","author":"Wang","year":"2021"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.62836\/jitp.v1i1.156"},{"article-title":"Enhancing efficiency in vision transformer networks: Design techniques and insights","year":"2024","author":"Heidari","key":"ref33"},{"article-title":"Rmt: Retentive networks meet vision transformers","year":"2023","author":"Fan","key":"ref34"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2018.2837502"},{"key":"ref36","article-title":"Synapse | Sage Bionetworks"},{"article-title":"Adam: A method for stochastic optimization","year":"2014","author":"Kingma","key":"ref37"},{"article-title":"Adaptive t-vmf dice loss for multi-class medical image segmentation","year":"2022","author":"Kato","key":"ref38"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01118"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00616"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00365"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW63382.2024.00503"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2023.3235002"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11228110.pdf?arnumber=11228110","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:26:44Z","timestamp":1763191604000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11228110\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":44,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11228110","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}