{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,24]],"date-time":"2026-03-24T15:34:02Z","timestamp":1774366442037,"version":"3.50.1"},"reference-count":62,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2023,9,19]],"date-time":"2023-09-19T00:00:00Z","timestamp":1695081600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,9,19]],"date-time":"2023-09-19T00:00:00Z","timestamp":1695081600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62071267"],"award-info":[{"award-number":["62071267"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2023,12]]},"DOI":"10.1007\/s00530-023-01184-w","type":"journal-article","created":{"date-parts":[[2023,9,19]],"date-time":"2023-09-19T17:12:10Z","timestamp":1695143530000},"page":"3819-3832","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":14,"title":["CTNet: hybrid architecture based on CNN and transformer for image inpainting detection"],"prefix":"10.1007","volume":"29","author":[{"given":"Fengjun","family":"Xiao","sequence":"first","affiliation":[]},{"given":"Zhuxi","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Ye","family":"Yao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,19]]},"reference":[{"issue":"1","key":"1184_CR1","doi-asserted-by":"publisher","first-page":"241","DOI":"10.1007\/s00530-021-00812-7","volume":"28","author":"H Wang","year":"2022","unstructured":"Wang, H., Li, W., Hu, L., Zhang, C., He, Q.: Structural smoothness low-rank matrix recovery via outlier estimation for image denoising. Multimedia Syst 28(1), 241\u2013255 (2022)","journal-title":"Multimedia Syst"},{"key":"1184_CR2","doi-asserted-by":"publisher","first-page":"379","DOI":"10.1007\/s00530-005-0167-6","volume":"10","author":"W-Q Yan","year":"2005","unstructured":"Yan, W.-Q., Wang, J., Kankanhalli, M.S.: Automatic video logo detection and removal. Multimedia Syst. 10, 379\u2013391 (2005)","journal-title":"Multimedia Syst."},{"issue":"2","key":"1184_CR3","doi-asserted-by":"publisher","first-page":"556","DOI":"10.1109\/TIP.2017.2768180","volume":"27","author":"M Ghorai","year":"2017","unstructured":"Ghorai, M., Mandal, S., Chanda, B.: A group-based image inpainting using patch refinement in mrf framework. IEEE Trans. Image Process. 27(2), 556\u2013567 (2017)","journal-title":"IEEE Trans. Image Process."},{"issue":"6","key":"1184_CR4","doi-asserted-by":"publisher","first-page":"2023","DOI":"10.1109\/TVCG.2017.2702738","volume":"24","author":"Q Guo","year":"2017","unstructured":"Guo, Q., Gao, S., Zhang, X., Yin, Y., Zhang, C.: Patch-based image inpainting via two-stage low rank approximation. IEEE Trans. Visual Comput. Graphics 24(6), 2023\u20132036 (2017)","journal-title":"IEEE Trans. Visual Comput. Graphics"},{"issue":"12","key":"1184_CR5","doi-asserted-by":"publisher","first-page":"3050","DOI":"10.1109\/TIFS.2017.2730822","volume":"12","author":"H Li","year":"2017","unstructured":"Li, H., Luo, W., Huang, J.: Localization of diffusion-based inpainting in digital images. IEEE Trans. Inf. Forensics Secur. 12(12), 3050\u20133064 (2017)","journal-title":"IEEE Trans. Inf. Forensics Secur."},{"issue":"8","key":"1184_CR6","doi-asserted-by":"publisher","first-page":"3802","DOI":"10.1007\/s00034-019-01029-w","volume":"38","author":"G Sridevi","year":"2019","unstructured":"Sridevi, G., Srinivas Kumar, S.: Image inpainting based on fractional-order nonlinear diffusion for image reconstruction. Circuits Syst Signal Process. 38(8), 3802\u20133817 (2019)","journal-title":"Circuits Syst Signal Process."},{"key":"1184_CR7","doi-asserted-by":"crossref","unstructured":"Yu, J., Lin, Z., Yang, J., Shen, X., Lu, X., Huang, T.S.: Free-form image inpainting with gated convolution. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4471\u20134480 (2019)","DOI":"10.1109\/ICCV.2019.00457"},{"key":"1184_CR8","doi-asserted-by":"publisher","first-page":"1784","DOI":"10.1109\/TIP.2020.3048629","volume":"30","author":"N Wang","year":"2021","unstructured":"Wang, N., Zhang, Y., Zhang, L.: Dynamic selection network for image inpainting. IEEE Trans. Image Process. 30, 1784\u20131798 (2021)","journal-title":"IEEE Trans. Image Process."},{"key":"1184_CR9","doi-asserted-by":"crossref","unstructured":"Wang, W., Zhang, J., Niu, L., Ling, H., Yang, X., Zhang, L.: Parallel multi-resolution fusion network for image inpainting. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 14559\u201314568 (2021)","DOI":"10.1109\/ICCV48922.2021.01429"},{"key":"1184_CR10","doi-asserted-by":"publisher","first-page":"22884","DOI":"10.1109\/ACCESS.2020.2970169","volume":"8","author":"Y Jiang","year":"2020","unstructured":"Jiang, Y., Xu, J., Yang, B., Xu, J., Zhu, J.: Image inpainting based on generative adversarial networks. IEEE Access 8, 22884\u201322892 (2020)","journal-title":"IEEE Access"},{"issue":"6","key":"1184_CR11","doi-asserted-by":"publisher","first-page":"4777","DOI":"10.1109\/TIE.2018.2866043","volume":"66","author":"X Dong","year":"2018","unstructured":"Dong, X., Dong, J., Sun, G., Duan, Y., Qi, L., Yu, H.: Learning-based texture synthesis and automatic inpainting using support vector machines. IEEE Trans. Industr. Electron. 66(6), 4777\u20134787 (2018)","journal-title":"IEEE Trans. Industr. Electron."},{"issue":"3","key":"1184_CR12","doi-asserted-by":"publisher","first-page":"939","DOI":"10.1007\/s00530-021-00873-8","volume":"28","author":"ST Nabi","year":"2022","unstructured":"Nabi, S.T., Kumar, M., Singh, P., Aggarwal, N., Kumar, K.: A comprehensive survey of image and video forgery techniques: variants, challenges, and future directions. Multimedia Syst. 28(3), 939\u2013992 (2022)","journal-title":"Multimedia Syst."},{"key":"1184_CR13","doi-asserted-by":"crossref","unstructured":"Wu, Q., Sun, S.-J., Zhu, W., Li, G.-H., Tu, D.: Detection of digital doctoring in exemplar-based inpainted images. In: 2008 International Conference on Machine Learning and Cybernetics, vol. 3, pp. 1222\u20131226 (2008)","DOI":"10.1109\/ICMLC.2008.4620591"},{"key":"1184_CR14","doi-asserted-by":"crossref","unstructured":"Bacchuwar, K.S., Ramakrishnan, K., et al.: A jump patch-block match algorithm for multiple forgery detection. In: 2013 International Mutli-Conference on Automation, Computing, Communication, Control and Compressed Sensing (iMac4s), pp. 723\u2013728 (2013)","DOI":"10.1109\/iMac4s.2013.6526502"},{"issue":"1","key":"1184_CR15","doi-asserted-by":"publisher","first-page":"57","DOI":"10.1016\/j.imavis.2012.09.002","volume":"31","author":"I-C Chang","year":"2013","unstructured":"Chang, I.-C., Yu, J.C., Chang, C.-C.: A forgery detection algorithm for exemplar-based inpainting images using multi-region relation. Image Vis. Comput. 31(1), 57\u201371 (2013)","journal-title":"Image Vis. Comput."},{"key":"1184_CR16","doi-asserted-by":"publisher","first-page":"90","DOI":"10.1016\/j.image.2018.05.015","volume":"67","author":"X Zhu","year":"2018","unstructured":"Zhu, X., Qian, Y., Zhao, X., Sun, B., Sun, Y.: A deep learning approach to patch-based image inpainting forensics. Signal Proces Image Comm 67, 90\u201399 (2018)","journal-title":"Signal Proces Image Comm"},{"key":"1184_CR17","unstructured":"Chu, X., Zhang, B., Tian, Z., Wei, X., Xia, H.: Do we really need explicit position encodings for vision transformers. arXiv preprint arXiv:2102.10882 (2021)"},{"key":"1184_CR18","doi-asserted-by":"crossref","unstructured":"Li, H., Huang, J.: Localization of deep inpainting using high-pass fully convolutional network. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8301\u20138310 (2019)","DOI":"10.1109\/ICCV.2019.00839"},{"key":"1184_CR19","doi-asserted-by":"publisher","first-page":"29","DOI":"10.1016\/j.ins.2021.04.042","volume":"572","author":"Y Zhang","year":"2021","unstructured":"Zhang, Y., Ding, F., Kwong, S., Zhu, G.: Feature pyramid network for diffusion-based image inpainting detection. Inf. Sci. 572, 29\u201342 (2021)","journal-title":"Inf. Sci."},{"issue":"1","key":"1184_CR20","doi-asserted-by":"publisher","first-page":"149","DOI":"10.1080\/02564602.2020.1782274","volume":"38","author":"X Wang","year":"2021","unstructured":"Wang, X., Niu, S., Wang, H.: Image inpainting detection based on multi-task deep learning network. IETE Tech. Rev. 38(1), 149\u2013157 (2021)","journal-title":"IETE Tech. Rev."},{"key":"1184_CR21","doi-asserted-by":"crossref","unstructured":"Li, A., Ke, Q., Ma, X., Weng, H., Zong, Z., Xue, F., Zhang, R.: Noise doesn\u2019t lie: Towards universal detection of deep inpainting. arXiv preprint arXiv:2106.01532 (2021)","DOI":"10.24963\/ijcai.2021\/109"},{"key":"1184_CR22","doi-asserted-by":"crossref","unstructured":"Chen, X., Dong, C., Ji, J., Cao, J., Li, X.: Image manipulation detection by multi-view multi-scale supervision. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 14185\u201314193 (2021)","DOI":"10.1109\/ICCV48922.2021.01392"},{"key":"1184_CR23","doi-asserted-by":"crossref","unstructured":"Wu, Y., AbdAlmageed, W., Natarajan, P.: Mantra-net: Manipulation tracing network for detection and localization of image forgeries with anomalous features. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9543\u20139552 (2019)","DOI":"10.1109\/CVPR.2019.00977"},{"issue":"3","key":"1184_CR24","doi-asserted-by":"publisher","first-page":"1172","DOI":"10.1109\/TCSVT.2021.3075039","volume":"32","author":"H Wu","year":"2021","unstructured":"Wu, H., Zhou, J.: IID-Net: Image inpainting detection network via neural architecture search and attention. IEEE Trans. Circuits Syst. Video Technol. 32(3), 1172\u20131185 (2021)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"1184_CR25","doi-asserted-by":"crossref","unstructured":"Liu, K., Li, J., Hussain\u00a0Bukhari, S.S.: Overview of image inpainting and forensic technology. Security and Communication Networks 2022 (2022)","DOI":"10.1155\/2022\/9291971"},{"key":"1184_CR26","doi-asserted-by":"crossref","unstructured":"Bertalmio, M., Sapiro, G., Caselles, V., Ballester, C.: Image inpainting. In: Proceedings of the 27th Annual Conference on Computer Graphics and Interactive Techniques, pp. 417\u2013424 (2000)","DOI":"10.1145\/344779.344972"},{"issue":"3","key":"1184_CR27","first-page":"1019","volume":"62","author":"T Chan","year":"2001","unstructured":"Chan, T.: Local inpainting models and tv inpainting. SIAM J. Appl. Math. 62(3), 1019\u20131043 (2001)","journal-title":"SIAM J. Appl. Math."},{"issue":"4","key":"1184_CR28","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1006\/jvci.2001.0487","volume":"12","author":"TF Chan","year":"2001","unstructured":"Chan, T.F., Shen, J.: Nontexture inpainting by curvature-driven diffusions. J. Vis. Commun. Image Represent. 12(4), 436\u2013449 (2001)","journal-title":"J. Vis. Commun. Image Represent."},{"issue":"5","key":"1184_CR29","doi-asserted-by":"publisher","first-page":"1153","DOI":"10.1109\/TIP.2010.2042098","volume":"19","author":"Z Xu","year":"2010","unstructured":"Xu, Z., Sun, J.: Image inpainting by patch propagation using patch sparsity. IEEE Trans. Image Process. 19(5), 1153\u20131165 (2010)","journal-title":"IEEE Trans. Image Process."},{"issue":"1","key":"1184_CR30","doi-asserted-by":"publisher","first-page":"444","DOI":"10.1109\/TIP.2014.2372479","volume":"24","author":"T Ruzic","year":"2015","unstructured":"Ruzic, T., Pizurica, A.: Context-aware patch-based image inpainting using markov random field modeling. IEEE Trans. Image Process. 24(1), 444\u2013456 (2015)","journal-title":"IEEE Trans. Image Process."},{"issue":"1","key":"1184_CR31","doi-asserted-by":"publisher","first-page":"23","DOI":"10.1080\/10867651.2004.10487596","volume":"9","author":"A Telea","year":"2004","unstructured":"Telea, A.: An image inpainting technique based on the fast marching method. J graph tools 9(1), 23\u201334 (2004)","journal-title":"J graph tools"},{"key":"1184_CR32","unstructured":"Bertalmio, M., Bertozzi, A.L., Sapiro, G.: Navier-stokes, fluid dynamics, and image and video inpainting. In: Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (2001)"},{"issue":"6","key":"1184_CR33","doi-asserted-by":"publisher","first-page":"866","DOI":"10.1109\/TVCG.2014.2298016","volume":"20","author":"J Herling","year":"2014","unstructured":"Herling, J., Broll, W.: High-quality real-time video inpaintingwith pixmix. IEEE Trans. Visual Comput. Graphics 20(6), 866\u2013879 (2014)","journal-title":"IEEE Trans. Visual Comput. Graphics"},{"issue":"4","key":"1184_CR34","first-page":"1","volume":"33","author":"J-B Huang","year":"2014","unstructured":"Huang, J.-B., Kang, S.B., Ahuja, N., Kopf, J.: Image completion using planar structure guidance. ACM Trans graphi (TOG) 33(4), 1\u201310 (2014)","journal-title":"ACM Trans graphi (TOG)"},{"key":"1184_CR35","doi-asserted-by":"crossref","unstructured":"Pathak, D., Krahenbuhl, P., Donahue, J., Darrell, T., Efros, A.A.: Context encoders: Feature learning by inpainting. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2536\u20132544 (2016)","DOI":"10.1109\/CVPR.2016.278"},{"key":"1184_CR36","doi-asserted-by":"crossref","unstructured":"Yang, C., Lu, X., Lin, Z., Shechtman, E., Wang, O., Li, H.: High-resolution image inpainting using multi-scale neural patch synthesis. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6721\u20136729 (2017)","DOI":"10.1109\/CVPR.2017.434"},{"issue":"4","key":"1184_CR37","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3072959.3073659","volume":"36","author":"S Iizuka","year":"2017","unstructured":"Iizuka, S., Simo-Serra, E., Ishikawa, H.: Globally and locally consistent image completion. ACM Trans Graph 36(4), 1\u201314 (2017)","journal-title":"ACM Trans Graph"},{"key":"1184_CR38","doi-asserted-by":"crossref","unstructured":"Zeng, Y., Fu, J., Chao, H., Guo, B.: Learning pyramid-context encoder network for high-quality image inpainting. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1486\u20131494 (2019)","DOI":"10.1109\/CVPR.2019.00158"},{"key":"1184_CR39","doi-asserted-by":"crossref","unstructured":"Yu, J., Lin, Z., Yang, J., Shen, X., Lu, X., Huang, T.S.: Generative image inpainting with contextual attention. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5505\u20135514 (2018)","DOI":"10.1109\/CVPR.2018.00577"},{"key":"1184_CR40","doi-asserted-by":"crossref","unstructured":"Yan, Z., Li, X., Li, M., Zuo, W., Shan, S.: Shift-net: Image inpainting via deep feature rearrangement. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 1\u201317 (2018)","DOI":"10.1007\/978-3-030-01264-9_1"},{"key":"1184_CR41","unstructured":"Nazeri, K., Ng, E., Joseph, T., Qureshi, F.Z., Ebrahimi, M.: Edgeconnect: Generative image inpainting with adversarial edge learning. arXiv preprint arXiv:1901.00212 (2019)"},{"key":"1184_CR42","unstructured":"Wu, H., Zhou, J., Li, Y.: Deep generative model for image inpainting with local binary pattern learning and spatial attention. arXiv preprint arXiv:2009.01031 (2020)"},{"key":"1184_CR43","doi-asserted-by":"crossref","unstructured":"Yu, T., Guo, Z., Jin, X., Wu, S., Chen, Z., Li, W., Zhang, Z., Liu, S.: Region normalization for image inpainting. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 12733\u201312740 (2020)","DOI":"10.1609\/aaai.v34i07.6967"},{"issue":"3","key":"1184_CR44","first-page":"1146","volume":"43","author":"C Xiao","year":"2022","unstructured":"Xiao, C., Li, F., Zhang, D., Huang, P., Ding, X., Sheng, V.S.: Image inpainting detection based on high-pass filter attention network. Comput. Syst. Sci. Eng. 43(3), 1146\u20131154 (2022)","journal-title":"Comput. Syst. Sci. Eng."},{"key":"1184_CR45","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask r-cnn. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2961\u20132969 (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"1184_CR46","doi-asserted-by":"crossref","unstructured":"Yang, W., Cai, R., Kot, A.: Image inpainting detection via enriched attentive pattern with near original image augmentation. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 2816\u20132824 (2022)","DOI":"10.1145\/3503161.3547921"},{"key":"1184_CR47","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., Polosukhin, I.: Attention is all you need. Advances in neural information processing systems 30 (2017)"},{"key":"1184_CR48","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"1184_CR49","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., Fei-Fei, L.: Imagenet: A large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255 (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"1184_CR50","unstructured":"Krizhevsky, A.: Learning multiple layers of features from tiny images. The CIFAR-100 dataset https:\/\/www.cs.toronto.edu\/~kriz\/cifar.html (2009)"},{"key":"1184_CR51","unstructured":"Zhai, X., Puigcerver, J., Kolesnikov, A., Ruyssen, P., Riquelme, C., Lucic, M., Djolonga, J., Pinto, A.S., Neumann, M., Dosovitskiy, A., et al.: A large-scale study of representation learning with the visual task adaptation benchmark. arXiv preprint arXiv:1910.04867 (2019)"},{"key":"1184_CR52","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: International Conference on Machine Learning, pp. 10347\u201310357 (2021)"},{"key":"1184_CR53","doi-asserted-by":"crossref","unstructured":"Wang, W., Xie, E., Li, X., Fan, D.-P., Song, K., Liang, D., Lu, T., Luo, P., Shao, L.: Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 568\u2013578 (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"1184_CR54","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1184_CR55","doi-asserted-by":"crossref","unstructured":"Wu, K., Peng, H., Chen, M., Fu, J., Chao, H.: Rethinking and improving relative position encoding for vision transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10033\u201310041 (2021)","DOI":"10.1109\/ICCV48922.2021.00988"},{"key":"1184_CR56","doi-asserted-by":"crossref","unstructured":"Yuan, L., Chen, Y., Wang, T., Yu, W., Shi, Y., Jiang, Z.-H., Tay, F.E., Feng, J., Yan, S.: Tokens-to-token vit: Training vision transformers from scratch on imagenet. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 558\u2013567 (2021)","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"1184_CR57","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"11","key":"1184_CR58","doi-asserted-by":"publisher","first-page":"2691","DOI":"10.1109\/TIFS.2018.2825953","volume":"13","author":"B Bayar","year":"2018","unstructured":"Bayar, B., Stamm, M.C.: Constrained convolutional neural networks: A new approach towards general purpose image manipulation detection. IEEE Trans. Inf. Forensics Secur. 13(11), 2691\u20132706 (2018)","journal-title":"IEEE Trans. Inf. Forensics Secur."},{"key":"1184_CR59","unstructured":"Camacho, I.C.: Initialization methods of convolutional neural networks for detection of image manipulations. PhD thesis, Universit\u00e9 Grenoble Alpes (2021)"},{"key":"1184_CR60","doi-asserted-by":"crossref","unstructured":"Lin, T.-Y., Goyal, P., Girshick, R., He, K., Doll\u00e1r, P.: Focal loss for dense object detection. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2980\u20132988 (2017)","DOI":"10.1109\/ICCV.2017.324"},{"key":"1184_CR61","doi-asserted-by":"crossref","unstructured":"Gloe, T., B\u00f6hme, R.: The dresden image database for benchmarking digital image forensics. In: Proceedings of the 2010 ACM Symposium on Applied Computing, pp. 1584\u20131590 (2010)","DOI":"10.1145\/1774088.1774427"},{"issue":"6","key":"1184_CR62","doi-asserted-by":"publisher","first-page":"1452","DOI":"10.1109\/TPAMI.2017.2723009","volume":"40","author":"B Zhou","year":"2017","unstructured":"Zhou, B., Lapedriza, A., Khosla, A., Oliva, A., Torralba, A.: Places: A 10 million image database for scene recognition. IEEE Trans. Pattern Anal. Mach. Intell. 40(6), 1452\u20131464 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-023-01184-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-023-01184-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-023-01184-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T11:17:54Z","timestamp":1730114274000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-023-01184-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,9,19]]},"references-count":62,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2023,12]]}},"alternative-id":["1184"],"URL":"https:\/\/doi.org\/10.1007\/s00530-023-01184-w","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,9,19]]},"assertion":[{"value":"31 March 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 September 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 September 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}