{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,10]],"date-time":"2026-03-10T15:25:02Z","timestamp":1773156302850,"version":"3.50.1"},"reference-count":40,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2023,8]]},"DOI":"10.1007\/s00530-023-01118-6","type":"journal-article","created":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T13:01:30Z","timestamp":1685883690000},"page":"2399-2413","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":11,"title":["FDS_2D: rethinking magnitude-phase features for DeepFake detection"],"prefix":"10.1007","volume":"29","author":[{"given":"Gaoming","family":"Yang","sequence":"first","affiliation":[]},{"given":"Anxing","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Xianjin","family":"Fang","sequence":"additional","affiliation":[]},{"given":"Ji","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,6,4]]},"reference":[{"issue":"7","key":"1118_CR1","doi-asserted-by":"publisher","first-page":"1678","DOI":"10.1007\/s11263-022-01606-8","volume":"130","author":"F Juefei-Xu","year":"2022","unstructured":"Juefei-Xu, F., Wang, R., Huang, Y., et al.: Countering malicious deepfakes: Survey, battleground, and horizon. Int. J. Comput. Vision 130(7), 1678\u20131734 (2022). https:\/\/doi.org\/10.1007\/s11263-022-01606-8","journal-title":"Int. J. Comput. Vision"},{"key":"1118_CR2","doi-asserted-by":"publisher","first-page":"131","DOI":"10.1016\/j.inffus.2020.06.014","volume":"64","author":"R Tolosana","year":"2020","unstructured":"Tolosana, R., Vera-Rodriguez, R., Fierrez, J., et al.: Deepfakes and beyond: a survey of face manipulation and fake detection. Information Fusion 64, 131\u2013148 (2020). https:\/\/doi.org\/10.1016\/j.inffus.2020.06.014","journal-title":"Information Fusion"},{"issue":"11","key":"1118_CR3","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1145\/3422622","volume":"63","author":"I Goodfellow","year":"2020","unstructured":"Goodfellow, I., Pouget-Abadie, J., Mirza, M., et al.: Generative adversarial networks. Commun. ACM 63(11), 139\u2013144 (2020). https:\/\/doi.org\/10.1145\/3422622","journal-title":"Commun. ACM"},{"key":"1118_CR4","first-page":"1","volume":"1050","author":"DP Kingma","year":"2014","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational bayes. Stat 1050, 1 (2014)","journal-title":"Stat"},{"key":"1118_CR5","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Adv. Neural. Inf. Process. Syst. 33, 6840\u20136851 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"1118_CR6","doi-asserted-by":"publisher","unstructured":"Lin BS, Hsu DW, Shen CH, et al (2020) Using fully connected and convolutional net for GAN-based face swapping. In: 2020 IEEE Asia Pacific Conference on Circuits and Systems (APCCAS), IEEE, pp 185\u2013188, https:\/\/doi.org\/10.1109\/APCCAS50809.2020.9301665","DOI":"10.1109\/APCCAS50809.2020.9301665"},{"key":"1118_CR7","doi-asserted-by":"publisher","unstructured":"Choi Y, Choi M, Kim M, et al (2018) StarGAN: Unified generative adversarial networks for multi-domain image-to-image translation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 8789\u20138797, https:\/\/doi.org\/10.1109\/CVPR.2018.00916","DOI":"10.1109\/CVPR.2018.00916"},{"key":"1118_CR8","doi-asserted-by":"publisher","unstructured":"Wang SY, Wang O, Zhang R, et al (2020) CNN-generated images are surprisingly easy to spot... for now. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 8695\u20138704, https:\/\/doi.org\/10.1109\/CVPR42600.2020.00872","DOI":"10.1109\/CVPR42600.2020.00872"},{"key":"1118_CR9","doi-asserted-by":"publisher","unstructured":"Marra F, Gragnaniello D, Verdoliva L, et al (2019) Do GANs leave artificial fingerprints? In: 2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR), IEEE, pp 506\u2013511, https:\/\/doi.org\/10.1109\/MIPR.2019.00103","DOI":"10.1109\/MIPR.2019.00103"},{"key":"1118_CR10","doi-asserted-by":"publisher","unstructured":"Matern F, Riess C, Stamminger M (2019) Exploiting visual artifacts to expose deepfakes and face manipulations. In: 2019 IEEE Winter Applications of Computer Vision Workshops (WACVW), IEEE, pp 83\u201392, https:\/\/doi.org\/10.1109\/WACVW.2019.00020","DOI":"10.1109\/WACVW.2019.00020"},{"key":"1118_CR11","doi-asserted-by":"publisher","unstructured":"Zhao H, Zhou W, Chen D, et al (2021) Multi-attentional deepfake detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 2185\u20132194, https:\/\/doi.org\/10.1109\/CVPR46437.2021.00222","DOI":"10.1109\/CVPR46437.2021.00222"},{"key":"1118_CR12","doi-asserted-by":"publisher","unstructured":"Bondi L, Cannas ED, Bestagini P, et al (2020) Training strategies and data augmentations in CNN-based deepfake video detection. In: 2020 IEEE International Workshop on Information Forensics and Security (WIFS), IEEE, pp 1\u20136, https:\/\/doi.org\/10.1109\/WIFS49906.2020.9360901","DOI":"10.1109\/WIFS49906.2020.9360901"},{"key":"1118_CR13","doi-asserted-by":"publisher","unstructured":"Coccomini DA, Messina N, Gennaro C, et al (2022) Combining efficientnet and vision transformers for video deepfake detection. In: Image Analysis and Processing\u2013ICIAP 2022: 21st International Conference, Lecce, Italy, May 23\u201327, 2022, Proceedings, Part III, Springer, pp 219\u2013229, https:\/\/doi.org\/10.1007\/978-3-031-06433-3 19","DOI":"10.1007\/978-3-031-06433-3"},{"key":"1118_CR14","unstructured":"Durall R, Keuper M, Pfreundt FJ, et al (2019) Unmasking deepfakes with simple features. CoRR abs\/1911.00686"},{"key":"1118_CR15","doi-asserted-by":"publisher","unstructured":"Liu H, Li X, Zhou W, et al (2021) Spatial-phase shallow learning: Rethinking face forgery detection in frequency domain. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 772\u2013781, https:\/\/doi.org\/10.1109\/CVPR46437.2021.00083","DOI":"10.1109\/CVPR46437.2021.00083"},{"key":"1118_CR16","doi-asserted-by":"publisher","unstructured":"Zhang X, Karaman S, Chang SF (2019) Detecting and simulating artifacts in GAN fake images. In: 2019 IEEE International Workshop on Information Forensics and Security (WIFS), IEEE, pp 1\u20136, https:\/\/doi.org\/10.1109\/WIFS47025.2019.9035107","DOI":"10.1109\/WIFS47025.2019.9035107"},{"key":"1118_CR17","doi-asserted-by":"publisher","unstructured":"Qian Y, Yin G, Sheng L, et al (2020) Thinking in frequency: Face forgery detection by mining frequency-aware clues. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XII, Springer, pp 86\u2013103, https:\/\/doi.org\/10.1007\/978-3-030-58610-2 6","DOI":"10.1007\/978-3-030-58610-2"},{"issue":"10","key":"1118_CR18","doi-asserted-by":"publisher","first-page":"e3","DOI":"10.23915\/distill.00003","volume":"1","author":"A Odena","year":"2016","unstructured":"Odena, A., Dumoulin, V., Olah, C.: Deconvolution and checkerboard artifacts. Distill 1(10), e3 (2016)","journal-title":"Distill"},{"key":"1118_CR19","first-page":"1","volume":"20","author":"A Azulay","year":"2019","unstructured":"Azulay, A., Weiss, Y.: Why do deep convolutional networks generalize so poorly to small image transformations? J. Mach. Learn. Res. 20, 1\u201325 (2019)","journal-title":"J. Mach. Learn. Res."},{"key":"1118_CR20","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1155\/2022\/5169873","volume":"2022","author":"B Wang","year":"2022","unstructured":"Wang, B., Li, Y., Wu, X., et al.: Face forgery detection based on the improved siamese network. Secur Commun Net 2022, 1\u201313 (2022). https:\/\/doi.org\/10.1155\/2022\/5169873","journal-title":"Secur Commun Net"},{"key":"1118_CR21","doi-asserted-by":"publisher","DOI":"10.1007\/s00371-022-02683-z","author":"G Yang","year":"2022","unstructured":"Yang, G., Xu, K., Fang, X., et al.: Video face forgery detection via facial motion-assisted capturing dense optical flow truncation. Visual Comput (2022). https:\/\/doi.org\/10.1007\/s00371-022-02683-z","journal-title":"Visual Comput"},{"key":"1118_CR22","doi-asserted-by":"publisher","unstructured":"Wang J, Wu Z, Ouyang W, et al (2022) M2tr: Multi-modal multi-scale transformers for deepfake detection. In: Proceedings of the 2022 International Conference on Multimedia Retrieval, pp 615\u2013623, https:\/\/doi.org\/10.1145\/3512527.3531415","DOI":"10.1145\/3512527.3531415"},{"key":"1118_CR23","unstructured":"Zhang R (2019) Making convolutional networks shift-invariant again. In: International Conference on Machine Learning, PMLR, pp 7324\u20137334, URL https:\/\/proceedings.mlr.press\/v97\/zhang19a.html"},{"key":"1118_CR24","unstructured":"Kaiser L, Gomez AN, Chollet F (2018) Depthwise separable convolutions for neural machine translation. In: International Conference on Learning Representations"},{"key":"1118_CR25","doi-asserted-by":"publisher","unstructured":"Sandler M, Howard A, Zhu M, et al (2018) Mobilenetv2: Inverted residuals and linear bottlenecks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 4510\u20134520, https:\/\/doi.org\/10.1109\/CVPR.2018.00474","DOI":"10.1109\/CVPR.2018.00474"},{"key":"1118_CR26","doi-asserted-by":"publisher","unstructured":"He K, Zhang X, Ren S, et al (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 770\u2013778, https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"1118_CR27","doi-asserted-by":"publisher","unstructured":"Luo Y, Zhang Y, Yan J, et al (2021) Generalizing face forgery detection with high-frequency features. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 16,317\u201316,326, https:\/\/doi.org\/10.1109\/CVPR46437.2021.01605","DOI":"10.1109\/CVPR46437.2021.01605"},{"key":"1118_CR28","doi-asserted-by":"publisher","unstructured":"Feichtenhofer C, Fan H, Malik J, et al (2019) Slowfast networks for video recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 6202\u20136211, https:\/\/doi.org\/10.1109\/ICCV.2019.00630","DOI":"10.1109\/ICCV.2019.00630"},{"key":"1118_CR29","unstructured":"Vaswani A, Shazeer N, Parmar N, et al (2017) Attention is all you need. Advances in Neural Information Processing Systems 30"},{"key":"1118_CR30","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A, et al (2020) An image is worth 16x16 words: Transformers for image recognition at scale. CoRR abs\/2010.11929"},{"key":"1118_CR31","doi-asserted-by":"publisher","unstructured":"Rossler A, Cozzolino D, Verdoliva L, et al (2019) Faceforensics++: Learning to detect manipulated facial images. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 1\u201311, https:\/\/doi.org\/10.1109\/ICCV.2019.00009","DOI":"10.1109\/ICCV.2019.00009"},{"key":"1118_CR32","doi-asserted-by":"publisher","unstructured":"Li Y, Yang X, Sun P, et al (2020) Celeb-df: A large-scale challenging dataset for deepfake forensics. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 3207\u20133216, https:\/\/doi.org\/10.1109\/CVPR42600.2020.00327","DOI":"10.1109\/CVPR42600.2020.00327"},{"key":"1118_CR33","doi-asserted-by":"publisher","unstructured":"Thies J, Zollhofer M, Stamminger M, et al (2016) Face2face: Real-time face capture and reenactment of RGB videos. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 2387\u20132395, https:\/\/doi.org\/10.1109\/CVPR.2016.262","DOI":"10.1109\/CVPR.2016.262"},{"issue":"4","key":"1118_CR34","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3306346.3323035","volume":"38","author":"J Thies","year":"2019","unstructured":"Thies, J., Zollh ofer, M., Nie\u00dfner, M.: Deferred neural rendering: Image synthesis using neural textures. ACM Transact Graph (TOG). 38(4), 1\u201312 (2019). https:\/\/doi.org\/10.1145\/3306346.3323035","journal-title":"ACM Transact Graph (TOG)."},{"key":"1118_CR35","doi-asserted-by":"publisher","unstructured":"Zhou P, Han X, Morariu VI, et al (2017) Two-stream neural networks for tampered face detection. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), IEEE, pp 1831\u20131839, https:\/\/doi.org\/10.1109\/CVPRW.2017.229","DOI":"10.1109\/CVPRW.2017.229"},{"key":"1118_CR36","doi-asserted-by":"publisher","unstructured":"Afchar D, Nozick V, Yamagishi J, et al (2018) Mesonet: a compact facial video forgery detection network. In: 2018 IEEE International Workshop on Information Forensics and Security (WIFS), IEEE, pp 1\u20137, https:\/\/doi.org\/10.1109\/WIFS.2018.8630761","DOI":"10.1109\/WIFS.2018.8630761"},{"key":"1118_CR37","doi-asserted-by":"publisher","unstructured":"Nguyen HH, Fang F, Yamagishi J, et al (2019) Multi-task learning for detecting and segmenting manipulated facial images and videos. In: 2019 IEEE 10th International Conference on Biometrics Theory, Applications and Systems (BTAS), IEEE, pp 1\u20138, https:\/\/doi.org\/10.1109\/BTAS46853.2019.9185974","DOI":"10.1109\/BTAS46853.2019.9185974"},{"key":"1118_CR38","doi-asserted-by":"publisher","unstructured":"Chollet F (2017) Xception: Deep learning with depthwise separable convolutions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 1251\u20131258, https:\/\/doi.org\/10.1109\/CVPR.2017.195","DOI":"10.1109\/CVPR.2017.195"},{"issue":"1","key":"1118_CR39","doi-asserted-by":"publisher","first-page":"39","DOI":"10.1109\/TBIOM.2022.3201887","volume":"5","author":"A Mehra","year":"2022","unstructured":"Mehra, A., Agarwal, A., Vatsa, M., et al.: Motion magnified 3-D residual-in-dense network for DeepFake Detection[J]. IEEE Transact Biomet Behav, Identity Sci 5(1), 39\u201352 (2022). https:\/\/doi.org\/10.1109\/TBIOM.2022.3201887","journal-title":"IEEE Transact Biomet Behav, Identity Sci"},{"key":"1118_CR40","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-022-13966-x","author":"D Zhang","year":"2022","unstructured":"Zhang, D., Zhu, W., Ding, X., et al.: SRTNet: a spatial and residual based two-stream neural network for DeepFakes detection. Multimed Tools App (2022). https:\/\/doi.org\/10.1007\/s11042-022-13966-x","journal-title":"Multimed Tools App"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-023-01118-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-023-01118-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-023-01118-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,7,14]],"date-time":"2023-07-14T06:33:05Z","timestamp":1689316385000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-023-01118-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,4]]},"references-count":40,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2023,8]]}},"alternative-id":["1118"],"URL":"https:\/\/doi.org\/10.1007\/s00530-023-01118-6","relation":{"has-preprint":[{"id-type":"doi","id":"10.21203\/rs.3.rs-2588635\/v1","asserted-by":"object"}]},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,6,4]]},"assertion":[{"value":"15 February 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 May 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 June 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}