{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,24]],"date-time":"2026-03-24T16:14:40Z","timestamp":1774368880207,"version":"3.50.1"},"reference-count":60,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2025,1,23]],"date-time":"2025-01-23T00:00:00Z","timestamp":1737590400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0"},{"start":{"date-parts":[[2025,1,23]],"date-time":"2025-01-23T00:00:00Z","timestamp":1737590400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0"}],"funder":[{"DOI":"10.13039\/501100013348","name":"Innosuisse - Schweizerische Agentur f\u00fcr Innovationsf\u00f6rderung","doi-asserted-by":"publisher","award":["59519IP-LS"],"award-info":[{"award-number":["59519IP-LS"]}],"id":[{"id":"10.13039\/501100013348","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Digit Imaging. Inform. med."],"abstract":"<jats:title>Abstract<\/jats:title>\n                  <jats:p>Accurate wound segmentation is crucial for the precise diagnosis and treatment of various skin conditions through image analysis. In this paper, we introduce a novel dual attention U-Net model designed for precise wound segmentation. Our proposed architecture integrates two widely used deep learning models, VGG16 and U-Net, incorporating dual attention mechanisms to focus on relevant regions within the wound area. Initially trained on diabetic foot ulcer images, we fine-tuned the model to acute and chronic wound images and conducted a comprehensive comparison with other state-of-the-art models. The results highlight the superior performance of our proposed dual attention model, achieving a Dice coefficient and IoU of 94.1% and 89.3%, respectively, on the test set. This underscores the robustness of our method and its capacity to generalize effectively to new data.<\/jats:p>","DOI":"10.1007\/s10278-025-01386-w","type":"journal-article","created":{"date-parts":[[2025,1,23]],"date-time":"2025-01-23T13:10:24Z","timestamp":1737637824000},"page":"3351-3365","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["Wound Segmentation with U-Net Using a Dual Attention Mechanism and Transfer Learning"],"prefix":"10.1007","volume":"38","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5231-3445","authenticated-orcid":false,"given":"Rania","family":"Niri","sequence":"first","affiliation":[]},{"given":"Sofia","family":"Zahia","sequence":"additional","affiliation":[]},{"given":"Alessio","family":"Stefanelli","sequence":"additional","affiliation":[]},{"given":"Kaushal","family":"Sharma","sequence":"additional","affiliation":[]},{"given":"Sebastian","family":"Probst","sequence":"additional","affiliation":[]},{"given":"Swann","family":"Pichon","sequence":"additional","affiliation":[]},{"given":"Guillaume","family":"Chanel","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,23]]},"reference":[{"key":"1386_CR1","doi-asserted-by":"crossref","unstructured":"C.\u00a0K. Sen, Human wound and its burden: updated 2020 compendium of estimates, advances in wound care, vol.\u00a010, no.\u00a05, pp.\u00a0281\u2013292, 2021.","DOI":"10.1089\/wound.2021.0026"},{"key":"1386_CR2","doi-asserted-by":"crossref","unstructured":"M.\u00a0Edmonds, C.\u00a0Manu, and P.\u00a0Vas, \u201cThe current burden of diabetic foot disease,\u201d Journal of clinical orthopaedics and trauma, vol.\u00a017, pp.\u00a088\u201393, 2021.","DOI":"10.1016\/j.jcot.2021.01.017"},{"key":"1386_CR3","doi-asserted-by":"crossref","unstructured":"G.\u00a0FrykbergRobert et\u00a0al., \u201cChallenges in the treatment of chronic wounds,\u201d Advances in wound care, 2015.","DOI":"10.1089\/wound.2015.0635"},{"key":"1386_CR4","unstructured":"E.\u00a0C. Montero, L.\u00a0Atkin, M.\u00a0Collier, A.\u00a0Hogh, J.\u00a0D. Ivory, K.\u00a0Kirketerp-Moller, S.\u00a0Meaume, H.\u00a0Ryan, E.\u00a0K. Stuermer, G.-S. Tiplica, et\u00a0al., \u201cLower leg ulcer diagnosis and principles of treatment,\u201d Journal of Wound Management, vol.\u00a024, no.\u00a02, pp.\u00a0S1\u2013S75, 2023."},{"key":"1386_CR5","doi-asserted-by":"crossref","unstructured":"C.\u00a0T. Hess, \u201cComprehensive patient and wound assessments,\u201d Advances in Skin & Wound Care, vol.\u00a032, no.\u00a06, pp.\u00a0287\u2013288, 2019.","DOI":"10.1097\/01.ASW.0000558514.64758.7f"},{"key":"1386_CR6","doi-asserted-by":"crossref","unstructured":"O.\u00a0Ronneberger, P.\u00a0Fischer, and T.\u00a0Brox, \u201cU-net: Convolutional networks for biomedical image segmentation,\u201d in International Conference on Medical image computing and computer-assisted intervention, pp.\u00a0234\u2013241, Springer, 2015.","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"1386_CR7","unstructured":"O.\u00a0Oktay, J.\u00a0Schlemper, L.\u00a0L. Folgoc, M.\u00a0Lee, M.\u00a0Heinrich, K.\u00a0Misawa, K.\u00a0Mori, S.\u00a0McDonagh, N.\u00a0Y. Hammerla, B.\u00a0Kainz, et\u00a0al., \u201cAttention u-net: Learning where to look for the pancreas,\u201d arXiv preprint arXiv:1804.03999, 2018."},{"key":"1386_CR8","doi-asserted-by":"crossref","unstructured":"J.\u00a0Hu, L.\u00a0Shen, and G.\u00a0Sun, \u201cSqueeze-and-excitation networks,\u201d in Proceedings of the IEEE conference on computer vision and pattern recognition, pp.\u00a07132\u20137141, 2018.","DOI":"10.1109\/CVPR.2018.00745"},{"key":"1386_CR9","unstructured":"K.\u00a0Simonyan and A.\u00a0Zisserman, \u201cVery deep convolutional networks for large-scale image recognition,\u201d arXiv preprint arXiv:1409.1556, 2014."},{"key":"1386_CR10","doi-asserted-by":"crossref","unstructured":"D.\u00a0H. Keast, C.\u00a0K. Bowering, A.\u00a0W. Evans, G.\u00a0L. Mackean, C.\u00a0Burrows, and L.\u00a0D\u2019Souza, \u201cContents: Measure: A proposed assessment framework for developing best practice recommendations for wound assessment,\u201d Wound Repair and Regeneration, vol.\u00a012, pp.\u00a0s1\u2013s17, 2004.","DOI":"10.1111\/j.1067-1927.2004.0123S1.x"},{"key":"1386_CR11","unstructured":"C.\u00a0Wang, X.\u00a0Yan, M.\u00a0Smith, K.\u00a0Kochhar, M.\u00a0Rubin, S.\u00a0M. Warren, J.\u00a0Wrobel, and H.\u00a0Lee, \u201cA unified framework for automatic wound segmentation and analysis with deep convolutional neural networks,\u201d in 2015 37th annual international conference of the ieee engineering in medicine and biology society (EMBC), pp.\u00a02415\u20132418, IEEE, 2015."},{"key":"1386_CR12","doi-asserted-by":"crossref","unstructured":"N.\u00a0Silberman, D.\u00a0Hoiem, P.\u00a0Kohli, and R.\u00a0Fergus, \u201cIndoor segmentation and support inference from rgbd images,\u201d in European conference on computer vision, pp.\u00a0746\u2013760, Springer, 2012.","DOI":"10.1007\/978-3-642-33715-4_54"},{"key":"1386_CR13","doi-asserted-by":"crossref","unstructured":"M.\u00a0Goyal, M.\u00a0H. Yap, N.\u00a0D. Reeves, S.\u00a0Rajbhandari, and J.\u00a0Spragg, \u201cFully convolutional networks for diabetic foot ulcer segmentation,\u201d in 2017 IEEE international conference on systems, man, and cybernetics (SMC), pp.\u00a0618\u2013623, IEEE, 2017.","DOI":"10.1109\/SMC.2017.8122675"},{"key":"1386_CR14","doi-asserted-by":"crossref","unstructured":"F.\u00a0Li, C.\u00a0Wang, X.\u00a0Liu, Y.\u00a0Peng, and S.\u00a0Jin, \u201cA composite model of wound segmentation based on traditional methods and deep neural networks,\u201d Computational intelligence and neuroscience, vol.\u00a02018, 2018.","DOI":"10.1155\/2018\/4149103"},{"key":"1386_CR15","doi-asserted-by":"crossref","unstructured":"R.\u00a0Niri, E.\u00a0Gutierrez, H.\u00a0Douzi, Y.\u00a0Lucas, S.\u00a0Treuillet, B.\u00a0Castaneda, and I.\u00a0Hernandez, \u201cMulti-view data augmentation to improve wound segmentation on 3d surface model by deep learning,\u201d IEEE Access, vol.\u00a09, pp.\u00a0157628\u2013157638, 2021.","DOI":"10.1109\/ACCESS.2021.3130784"},{"key":"1386_CR16","doi-asserted-by":"crossref","unstructured":"A.\u00a0Mahbod, G.\u00a0Schaefer, R.\u00a0Ecker, and I.\u00a0Ellinger, \u201cAutomatic foot ulcer segmentation using an ensemble of convolutional neural networks,\u201d in 2022 26th International Conference on Pattern Recognition (ICPR), pp.\u00a04358\u20134364, IEEE, 2022.","DOI":"10.1109\/ICPR56361.2022.9956253"},{"key":"1386_CR17","doi-asserted-by":"crossref","unstructured":"H.\u00a0Gamage, W.\u00a0Wijesinghe, and I.\u00a0Perera, \u201cInstance-based segmentation for boundary detection of neuropathic ulcers through mask-rcnn,\u201d in International Conference on Artificial Neural Networks, pp.\u00a0511\u2013522, Springer, 2019.","DOI":"10.1007\/978-3-030-30493-5_49"},{"key":"1386_CR18","doi-asserted-by":"crossref","unstructured":"S.\u00a0Zahia, B.\u00a0Garcia-Zapirain, and A.\u00a0Elmaghraby, \u201cIntegrating 3d model representation for an accurate non-invasive assessment of pressure injuries with deep learning,\u201d Sensors, vol.\u00a020, no.\u00a010, p.\u00a02933, 2020.","DOI":"10.3390\/s20102933"},{"key":"1386_CR19","unstructured":"C.\u00a0Kendrick, B.\u00a0Cassidy, J.\u00a0M. Pappachan, C.\u00a0O\u2019Shea, C.\u00a0J. Fernandez, E.\u00a0Chacko, K.\u00a0Jacob, N.\u00a0D. Reeves, and M.\u00a0H. Yap, \u201cTranslating clinical delineation of diabetic foot ulcers into machine interpretable segmentation,\u201d arXiv preprint arXiv:2204.11618, 2022."},{"key":"1386_CR20","doi-asserted-by":"crossref","unstructured":"Y.-H. Chen, Y.-J. Ju, and J.-D. Huang, \u201cCapture the devil in the details via partition-then-ensemble on higher resolution images,\u201d in Diabetic Foot Ulcers Grand Challenge, pp.\u00a052\u201364, Springer, 2022.","DOI":"10.1007\/978-3-031-26354-5_5"},{"key":"1386_CR21","doi-asserted-by":"crossref","unstructured":"T.-Y. Liao, C.-H. Yang, Y.-W. Lo, K.-Y. Lai, P.-H. Shen, and Y.-L. Lin, \u201cHardnet-dfus: Enhancing backbone and decoder of hardnet-mseg for diabetic foot ulcer image segmentation,\u201d in Diabetic Foot Ulcers Grand Challenge, pp.\u00a021\u201330, Springer, 2022.","DOI":"10.1007\/978-3-031-26354-5_2"},{"key":"1386_CR22","unstructured":"C.-H. Huang, H.-Y. Wu, and Y.-L. Lin, \u201cHardnet-mseg: A simple encoder-decoder polyp segmentation neural network that achieves over 0.9 mean dice and 86 fps,\u201d arXiv preprint arXiv:2101.07172, 2021."},{"key":"1386_CR23","doi-asserted-by":"crossref","unstructured":"H.\u00a0Yi, W.\u00a0Xu, Z.\u00a0Jiang, J.\u00a0Gao, Q.\u00a0Kang, Q.\u00a0Lao, and K.\u00a0Li, \u201cOcrnet for diabetic foot ulcer segmentation combined with edge loss,\u201d in Diabetic Foot Ulcers Grand Challenge, pp.\u00a031\u201339, Springer, 2022.","DOI":"10.1007\/978-3-031-26354-5_3"},{"key":"1386_CR24","doi-asserted-by":"crossref","unstructured":"V.\u00a0Gupta, A.\u00a0Gupta, N.\u00a0Arora, and J.\u00a0Garg, \u201cOcrnet - light-weighted and efficient neural network for optical character recognition,\u201d in 2021 IEEE Bombay Section Signature Conference (IBSSC), pp.\u00a01\u20134, 2021.","DOI":"10.1109\/IBSSC53889.2021.9673254"},{"key":"1386_CR25","doi-asserted-by":"crossref","unstructured":"G.\u00a0Scebba, J.\u00a0Zhang, S.\u00a0Catanzaro, C.\u00a0Mihai, O.\u00a0Distler, M.\u00a0Berli, and W.\u00a0Karlen, \u201cDetect-and-segment: A deep learning approach to automate wound image segmentation,\u201d Informatics in Medicine Unlocked, vol.\u00a029, p.\u00a0100884, 2022.","DOI":"10.1016\/j.imu.2022.100884"},{"key":"1386_CR26","doi-asserted-by":"crossref","unstructured":"M.\u00a0Hassib, M.\u00a0Ali, A.\u00a0Mohamed, M.\u00a0Torki, and M.\u00a0Hussein, \u201cDiabetic foot ulcer segmentation using convolutional and transformer-based models,\u201d in Diabetic Foot Ulcers Grand Challenge, pp.\u00a083\u201391, Springer, 2022.","DOI":"10.1007\/978-3-031-26354-5_7"},{"key":"1386_CR27","doi-asserted-by":"crossref","unstructured":"D.\u00a0Kucharski, A.\u00a0Kostuch, F.\u00a0Noworolnik, A.\u00a0Brodzicki, and J.\u00a0Jaworek-Korjakowska, \u201cDfu-ens: End-to-end diabetic foot ulcer segmentation framework with vision transformer based detection,\u201d in Diabetic Foot Ulcers Grand Challenge, pp.\u00a0101\u2013112, Springer, 2022.","DOI":"10.1007\/978-3-031-26354-5_9"},{"key":"1386_CR28","doi-asserted-by":"crossref","unstructured":"B.\u00a0Cassidy, C.\u00a0Mcbride, C.\u00a0Kendrick, N.\u00a0D. Reeves, J.\u00a0M. Pappachan, C.\u00a0J. Fernandez, E.\u00a0Chacko, R.\u00a0Br\u00fcngel, C.\u00a0M. Friedrich, M.\u00a0Alotaibi, et\u00a0al., \u201cAn enhanced harmonic densely connected hybrid transformer network architecture for chronic wound segmentation utilising multi-colour space tensor merging,\u201d arXiv preprint arXiv:2410.03359, 2024.","DOI":"10.1016\/j.compbiomed.2025.110172"},{"key":"1386_CR29","doi-asserted-by":"crossref","unstructured":"A.\u00a0Galdran, G.\u00a0Carneiro, and M.\u00a0A.\u00a0G. Ballester, \u201cOn the optimal combination of cross-entropy and soft dice losses for lesion segmentation with out-of-distribution robustness,\u201d in Diabetic Foot Ulcers Grand Challenge, pp.\u00a040\u201351, Springer, 2022.","DOI":"10.1007\/978-3-031-26354-5_4"},{"key":"1386_CR30","doi-asserted-by":"crossref","unstructured":"D.\u00a0J. Hresko, J.\u00a0Vereb, V.\u00a0Krigovsky, M.\u00a0Gayova, and P.\u00a0Drotar, \u201cRefined mixup augmentation for diabetic foot ulcer segmentation,\u201d in Diabetic Foot Ulcers Grand Challenge, pp.\u00a092\u2013100, Springer, 2022.","DOI":"10.1007\/978-3-031-26354-5_8"},{"key":"1386_CR31","doi-asserted-by":"crossref","unstructured":"R.\u00a0Br\u00fcngel, S.\u00a0Koitka, and C.\u00a0M. Friedrich, \u201cUnconditionally generated and pseudo-labeled synthetic images for diabetic foot ulcer segmentation dataset extension,\u201d in Diabetic Foot Ulcers Grand Challenge, pp.\u00a065\u201379, Springer, 2022.","DOI":"10.1007\/978-3-031-26354-5_6"},{"key":"1386_CR32","doi-asserted-by":"crossref","unstructured":"M.-H. Guo, T.-X. Xu, J.-J. Liu, Z.-N. Liu, P.-T. Jiang, T.-J. Mu, S.-H. Zhang, R.\u00a0R. Martin, M.-M. Cheng, and S.-M. Hu, \u201cAttention mechanisms in computer vision: A survey,\u201d Computational visual media, vol.\u00a08, no.\u00a03, pp.\u00a0331\u2013368, 2022.","DOI":"10.1007\/s41095-022-0271-y"},{"key":"1386_CR33","doi-asserted-by":"crossref","unstructured":"S.\u00a0Wang, L.\u00a0Li, and X.\u00a0Zhuang, \u201cAttu-net: attention u-net for brain tumor segmentation,\u201d in International MICCAI Brainlesion Workshop, pp.\u00a0302\u2013311, Springer, 2021.","DOI":"10.1007\/978-3-031-09002-8_27"},{"key":"1386_CR34","doi-asserted-by":"crossref","unstructured":"Z.\u00a0Li, H.\u00a0Zhang, Z.\u00a0Li, and Z.\u00a0Ren, \u201cResidual-attention unet++: A nested residual-attention u-net for medical image segmentation,\u201d Applied Sciences, vol.\u00a012, no.\u00a014, p.\u00a07149, 2022.","DOI":"10.3390\/app12147149"},{"key":"1386_CR35","doi-asserted-by":"crossref","unstructured":"J.\u00a0Zhang, X.\u00a0Lv, H.\u00a0Zhang, and B.\u00a0Liu, \u201cAresu-net: Attention residual u-net for brain tumor segmentation,\u201d Symmetry, vol.\u00a012, no.\u00a05, p.\u00a0721, 2020.","DOI":"10.3390\/sym12050721"},{"key":"1386_CR36","doi-asserted-by":"crossref","unstructured":"J.\u00a0Wang, P.\u00a0Lv, H.\u00a0Wang, and C.\u00a0Shi, \u201cSar-u-net: Squeeze-and-excitation block and atrous spatial pyramid pooling based residual u-net for automatic liver segmentation in computed tomography,\u201d Computer Methods and Programs in Biomedicine, vol.\u00a0208, p.\u00a0106268, 2021.","DOI":"10.1016\/j.cmpb.2021.106268"},{"key":"1386_CR37","doi-asserted-by":"crossref","unstructured":"G.\u00a0Prasanna, J.\u00a0R. Ernest, S.\u00a0Narayanan, et\u00a0al., \u201cSqueeze excitation embedded attention unet for brain tumor segmentation,\u201d arXiv preprint arXiv:2305.07850, 2023.","DOI":"10.1007\/978-981-99-6855-8_9"},{"key":"1386_CR38","doi-asserted-by":"crossref","unstructured":"S.\u00a0Liang, T.\u00a0Wang, C.\u00a0Chen, H.\u00a0Liu, C.\u00a0Qin, and Y.\u00a0Feng, \u201cRsea-net: Residual squeeze and excitation attention network for medical image segmentation,\u201d BMC Medical Imaging, 2022.","DOI":"10.21203\/rs.3.rs-1419097\/v1"},{"key":"1386_CR39","unstructured":"J.\u00a0Chae, K.\u00a0Y. Hong, and J.\u00a0Kim, \u201cA pressure ulcer care system for remote medical assistance: residual u-net with an attention model based for wound area segmentation,\u201d arXiv preprint arXiv:2101.09433, 2021."},{"key":"1386_CR40","doi-asserted-by":"crossref","unstructured":"J.\u00a0Ma, Y.\u00a0He, F.\u00a0Li, L.\u00a0Han, C.\u00a0You, and B.\u00a0Wang, \u201cSegment anything in medical images,\u201d Nature Communications, vol.\u00a015, no.\u00a01, p.\u00a0654, 2024.","DOI":"10.1038\/s41467-024-44824-z"},{"key":"1386_CR41","doi-asserted-by":"crossref","unstructured":"A.\u00a0A. Pravitasari, N.\u00a0Iriawan, M.\u00a0Almuhayar, T.\u00a0Azmi, I.\u00a0Irhamah, K.\u00a0Fithriasari, S.\u00a0W. Purnami, and W.\u00a0Ferriastuti, \u201cUnet-vgg16 with transfer learning for mri-based brain tumor segmentation,\u201d TELKOMNIKA (Telecommunication Computing Electronics and Control), vol.\u00a018, no.\u00a03, pp.\u00a01310\u20131318, 2020.","DOI":"10.12928\/telkomnika.v18i3.14753"},{"key":"1386_CR42","doi-asserted-by":"crossref","unstructured":"A.\u00a0Huang, Q.\u00a0Wang, L.\u00a0Jiang, and J.\u00a0Zhang, \u201cAutomatic segmentation of median nerve in ultrasound image by a combined use of u-net and vgg16,\u201d in 2021 IEEE International Ultrasonics Symposium (IUS), pp.\u00a01\u20134, 2021.","DOI":"10.1109\/IUS52206.2021.9593861"},{"key":"1386_CR43","doi-asserted-by":"crossref","unstructured":"S.\u00a0Woo, J.\u00a0Park, J.-Y. Lee, and I.\u00a0S. Kweon, \u201cCbam: Convolutional block attention module,\u201d in Proceedings of the European conference on computer vision (ECCV), pp.\u00a03\u201319, 2018.","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"1386_CR44","doi-asserted-by":"crossref","unstructured":"J.\u00a0Fu, J.\u00a0Liu, H.\u00a0Tian, Y.\u00a0Li, Y.\u00a0Bao, Z.\u00a0Fang, and H.\u00a0Lu, \u201cDual attention network for scene segmentation,\u201d in Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp.\u00a03146\u20133154, 2019.","DOI":"10.1109\/CVPR.2019.00326"},{"key":"1386_CR45","doi-asserted-by":"crossref","unstructured":"Z.\u00a0Gao, J.\u00a0Xie, Q.\u00a0Wang, and P.\u00a0Li, \u201cGlobal second-order pooling convolutional networks,\u201d in Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, pp.\u00a03024\u20133033, 2019.","DOI":"10.1109\/CVPR.2019.00314"},{"key":"1386_CR46","doi-asserted-by":"crossref","unstructured":"L.\u00a0Rundo, C.\u00a0Han, Y.\u00a0Nagano, J.\u00a0Zhang, R.\u00a0Hataya, C.\u00a0Militello, A.\u00a0Tangherloni, M.\u00a0S. Nobile, C.\u00a0Ferretti, D.\u00a0Besozzi, et\u00a0al., \u201cUse-net: Incorporating squeeze-and-excitation blocks into u-net for prostate zonal segmentation of multi-institutional mri datasets,\u201d Neurocomputing, vol.\u00a0365, pp.\u00a031\u201343, 2019.","DOI":"10.1016\/j.neucom.2019.07.006"},{"key":"1386_CR47","doi-asserted-by":"crossref","unstructured":"T.\u00a0B. Fitzpatrick, \u201cThe Validity and Practicality of Sun-Reactive Skin Types I Through VI,\u201d Archives of Dermatology, vol.\u00a0124, pp.\u00a0869\u2013871, 06 1988.","DOI":"10.1001\/archderm.1988.01670060015008"},{"key":"1386_CR48","doi-asserted-by":"crossref","unstructured":"M.\u00a0Kr\u0119cichwost, J.\u00a0Czajkowska, A.\u00a0Wijata, J.\u00a0Juszczyk, B.\u00a0Pyci\u0144ski, M.\u00a0Biesok, M.\u00a0Rudzki, J.\u00a0Majewski, J.\u00a0Kostecki, and E.\u00a0Pietka, \u201cChronic wounds multimodal image database,\u201d Computerized Medical Imaging and Graphics, vol.\u00a088, p.\u00a0101844, 2021.","DOI":"10.1016\/j.compmedimag.2020.101844"},{"issue":"9","key":"1386_CR49","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0163092","volume":"11","author":"S Yang","year":"2016","unstructured":"S.\u00a0Yang, J.\u00a0Park, H.\u00a0Lee, S.\u00a0Kim, B.-U. Lee, K.-Y. Chung, and B.\u00a0Oh, \u201cSequential change of wound calculated by image analysis using a color patch method during a secondary intention healing,\u201d PloS one, vol.\u00a011, no.\u00a09, p.\u00a0e0163092, 2016.","journal-title":"PloS one"},{"key":"1386_CR50","doi-asserted-by":"crossref","unstructured":"C.\u00a0Wang, A.\u00a0Mahbod, I.\u00a0Ellinger, A.\u00a0Galdran, S.\u00a0Gopalakrishnan, J.\u00a0Niezgoda, and Z.\u00a0Yu, \u201cFuseg: The foot ulcer segmentation challenge,\u201d Information, vol.\u00a015, no.\u00a03, p.\u00a0140, 2024.","DOI":"10.3390\/info15030140"},{"key":"1386_CR51","doi-asserted-by":"crossref","unstructured":"L.\u00a0R. Dice, \u201cMeasures of the amount of ecologic association between species,\u201d Ecology, vol.\u00a026, no.\u00a03, pp.\u00a0297\u2013302, 1945.","DOI":"10.2307\/1932409"},{"key":"1386_CR52","doi-asserted-by":"crossref","unstructured":"K.\u00a0Pearson, \u201cMathematical contributions to the theory of evolution.\u2014on a form of spurious correlation which may arise when indices are used in the measurement of organs,\u201d Proceedings of the royal society of london, vol.\u00a060, no.\u00a0359-367, pp.\u00a0489\u2013498, 1897.","DOI":"10.1098\/rspl.1896.0076"},{"key":"1386_CR53","doi-asserted-by":"crossref","unstructured":"K.\u00a0He, X.\u00a0Zhang, S.\u00a0Ren, and J.\u00a0Sun, \u201cDeep residual learning for image recognition,\u201d in Proceedings of the IEEE conference on computer vision and pattern recognition, pp.\u00a0770\u2013778, 2016.","DOI":"10.1109\/CVPR.2016.90"},{"key":"1386_CR54","doi-asserted-by":"crossref","unstructured":"C.\u00a0Szegedy, S.\u00a0Ioffe, V.\u00a0Vanhoucke, and A.\u00a0A. Alemi, \u201cInception-v4, inception-resnet and the impact of residual connections on learning,\u201d in Thirty-first AAAI conference on artificial intelligence, 2017.","DOI":"10.1609\/aaai.v31i1.11231"},{"key":"1386_CR55","unstructured":"A.\u00a0G. Howard, M.\u00a0Zhu, B.\u00a0Chen, D.\u00a0Kalenichenko, W.\u00a0Wang, T.\u00a0Weyand, M.\u00a0Andreetto, and H.\u00a0Adam, \u201cMobilenets: Efficient convolutional neural networks for mobile vision applications,\u201d arXiv preprint arXiv:1704.04861, 2017."},{"key":"1386_CR56","doi-asserted-by":"crossref","unstructured":"G.\u00a0Huang, Z.\u00a0Liu, L.\u00a0Van Der\u00a0Maaten, and K.\u00a0Q. Weinberger, \u201cDensely connected convolutional networks,\u201d in Proceedings of the IEEE conference on computer vision and pattern recognition, pp.\u00a04700\u20134708, 2017.","DOI":"10.1109\/CVPR.2017.243"},{"key":"1386_CR57","unstructured":"M.\u00a0Tan and Q.\u00a0Le, \u201cEfficientnet: Rethinking model scaling for convolutional neural networks,\u201d in International conference on machine learning, pp.\u00a06105\u20136114, PMLR, 2019."},{"key":"1386_CR58","doi-asserted-by":"crossref","unstructured":"L.-C. Chen, Y.\u00a0Zhu, G.\u00a0Papandreou, F.\u00a0Schroff, and H.\u00a0Adam, \u201cEncoder-decoder with atrous separable convolution for semantic image segmentation,\u201d in Proceedings of the European conference on computer vision (ECCV), pp.\u00a0801\u2013818, 2018.","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"1386_CR59","doi-asserted-by":"crossref","unstructured":"V.\u00a0Badrinarayanan, A.\u00a0Kendall, and R.\u00a0Cipolla, \u201cSegnet: A deep convolutional encoder-decoder architecture for image segmentation,\u201d IEEE transactions on pattern analysis and machine intelligence, vol.\u00a039, no.\u00a012, pp.\u00a02481\u20132495, 2017.","DOI":"10.1109\/TPAMI.2016.2644615"},{"key":"1386_CR60","doi-asserted-by":"crossref","unstructured":"A.\u00a0Chaurasia and E.\u00a0Culurciello, \u201cLinknet: Exploiting encoder representations for efficient semantic segmentation,\u201d in 2017 IEEE visual communications and image processing (VCIP), pp.\u00a01\u20134, IEEE, 2017.","DOI":"10.1109\/VCIP.2017.8305148"}],"container-title":["Journal of Imaging Informatics in Medicine"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10278-025-01386-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10278-025-01386-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10278-025-01386-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,29]],"date-time":"2025-10-29T22:48:47Z","timestamp":1761778127000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10278-025-01386-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1,23]]},"references-count":60,"journal-issue":{"issue":"5","published-online":{"date-parts":[[2025,10]]}},"alternative-id":["1386"],"URL":"https:\/\/doi.org\/10.1007\/s10278-025-01386-w","relation":{},"ISSN":["2948-2933"],"issn-type":[{"value":"2948-2933","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,1,23]]},"assertion":[{"value":"28 June 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 December 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 December 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 January 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}