{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T09:51:26Z","timestamp":1774518686626,"version":"3.50.1"},"reference-count":44,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Science and Technology Innovation (STI) 2030\u2013Major Projects","award":["2021ZD0204500"],"award-info":[{"award-number":["2021ZD0204500"]}]},{"name":"Science and Technology Innovation (STI) 2030\u2013Major Projects","award":["2021ZD0204503"],"award-info":[{"award-number":["2021ZD0204503"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["32171461"],"award-info":[{"award-number":["32171461"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Med. Imaging"],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1109\/tmi.2024.3511599","type":"journal-article","created":{"date-parts":[[2024,12,5]],"date-time":"2024-12-05T19:06:48Z","timestamp":1733425608000},"page":"1624-1635","source":"Crossref","is-referenced-by-count":1,"title":["Re-Isotropic Segmentation for Subcellular Ultrastructure in Anisotropic EM Images"],"prefix":"10.1109","volume":"44","author":[{"ORCID":"https:\/\/orcid.org\/0009-0007-4042-8182","authenticated-orcid":false,"given":"Jinyue","family":"Guo","sequence":"first","affiliation":[{"name":"Key Laboratory of Brain Cognition and Brain-Inspired Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"given":"Zejin","family":"Wang","sequence":"additional","affiliation":[{"name":"Key Laboratory of Brain Cognition and Brain-Inspired Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4149-3131","authenticated-orcid":false,"given":"Hao","family":"Zhai","sequence":"additional","affiliation":[{"name":"Key Laboratory of Brain Cognition and Brain-Inspired Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-9313-5815","authenticated-orcid":false,"given":"Yanchao","family":"Zhang","sequence":"additional","affiliation":[{"name":"Key Laboratory of Brain Cognition and Brain-Inspired Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"given":"Jing","family":"Liu","sequence":"additional","affiliation":[{"name":"Key Laboratory of Brain Cognition and Brain-Inspired Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4713-4631","authenticated-orcid":false,"given":"Hua","family":"Han","sequence":"additional","affiliation":[{"name":"Key Laboratory of Brain Cognition and Brain-Inspired Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nmeth.4206"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ISBI45749.2020.9098489"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.conb.2011.10.022"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2023.102920"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/122"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR48806.2021.9411990"},{"key":"ref7","first-page":"1","article-title":"Recursive training of 2D-3D convolutional networks for neuronal boundary prediction","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Lee"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00937-3_51"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33015909"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-32226-7_20"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/wacv57701.2024.00582"},{"key":"ref12","article-title":"Superhuman accuracy on the SNEMI3D connectomics challenge","author":"Lee","year":"2017","journal-title":"arXiv:1706.00120"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00934-2_36"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.3389\/fnana.2018.00092"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-32245-8_30"},{"issue":"2","key":"ref16","doi-asserted-by":"crossref","first-page":"203","DOI":"10.1038\/s41592-020-01008-z","article-title":"NnU-Net: A self-configuring method for deep learning-based biomedical image segmentation","volume":"18","author":"Isensee","year":"2021","journal-title":"Nature Methods"},{"key":"ref17","article-title":"NnFormer: Interleaved transformer for volumetric segmentation","author":"Zhou","year":"2021","journal-title":"arXiv:2109.03201"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2022.3176050"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/S0140-6736(18)31645-3"},{"key":"ref20","article-title":"Edge-gated CNNs for volumetric semantic segmentation of medical images","author":"Hatamizadeh","year":"2020","journal-title":"arXiv:2002.04207"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1017\/S1431927619007554"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"ref23","first-page":"424","article-title":"3D U-Net: Learning dense volumetric segmentation from sparse annotation","volume-title":"Proc. Int. Conf. Med. Image Comput. Comput.-Assist. Interv.","author":"\u00c7i\u00e7ek"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2021.3089547"},{"key":"ref25","article-title":"3D UX-net: A large kernel volumetric ConvNet modernizing hierarchical transformer for medical image segmentation","author":"Hin Lee","year":"2022","journal-title":"arXiv:2209.15076"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/3DV.2016.79"},{"key":"ref27","first-page":"348","article-title":"On the compactness, efficiency, and representation of 3D convolutional networks: brain parcellation as a pretext task","volume-title":"Proc. 25th Int. Conf.","author":"Li"},{"key":"ref28","first-page":"2286","article-title":"ConViT: Improving vision transformers with soft convolutional inductive biases","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"D\u2019Ascoli"},{"key":"ref29","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv:2010.11929"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00181"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2024.3398728"},{"key":"ref32","first-page":"220","article-title":"Training vision transformers with only 2040 images","volume-title":"Proc. 17th Eur. Conf.","author":"Cao"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01393"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00207"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2022.3194984"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/EUVIP58404.2023.10323074"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref38","first-page":"234","article-title":"U-Net: Convolutional networks for biomedical image segmentation","volume-title":"Proc. Int. Conf. Med. Image Comput. Comput.-Assist. Intervent.","author":"Ronneberger"},{"key":"ref39","first-page":"1096","article-title":"Extracting and composing robust features with denoising autoencoders","volume-title":"Proc. 25th Int. Conf. Mach. Learn. (ICML)","author":"Vincent"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1016\/j.cell.2015.06.054"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-59722-1_7"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2011.2171705"},{"key":"ref43","first-page":"111","article-title":"Fast mitochondria detection for connectomics","volume-title":"Proc. 3rd Conf. Med. Imag. Deep Learn.","author":"Casser"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2699184"}],"container-title":["IEEE Transactions on Medical Imaging"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/42\/10948536\/10778623.pdf?arnumber=10778623","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,4]],"date-time":"2025-04-04T19:57:49Z","timestamp":1743796669000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10778623\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4]]},"references-count":44,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tmi.2024.3511599","relation":{},"ISSN":["0278-0062","1558-254X"],"issn-type":[{"value":"0278-0062","type":"print"},{"value":"1558-254X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,4]]}}}