{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T17:55:16Z","timestamp":1773510916848,"version":"3.50.1"},"publisher-location":"Cham","reference-count":39,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030582005","type":"print"},{"value":"9783030582012","type":"electronic"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-58201-2_28","type":"book-chapter","created":{"date-parts":[[2020,9,13]],"date-time":"2020-09-13T23:02:29Z","timestamp":1600038149000},"page":"416-430","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":44,"title":["FDFtNet: Facing Off Fake Images Using Fake Detection Fine-Tuning Network"],"prefix":"10.1007","author":[{"given":"Hyeonseong","family":"Jeon","sequence":"first","affiliation":[]},{"given":"Youngoh","family":"Bang","sequence":"additional","affiliation":[]},{"given":"Simon S.","family":"Woo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,9,14]]},"reference":[{"key":"28_CR1","unstructured":"R\u00f6ssler, A., Cozzolino, D., Verdoliva, L., Riess, C., Thies, J., Nie\u00dfner, M.: This table lists the benchmark results for the binary classification scenario. (2019). http:\/\/kaldir.vc.in.tum.de\/faceforensics_benchmark\/"},{"key":"28_CR2","doi-asserted-by":"crossref","unstructured":"Chollet, F.: Xception: deep learning with depthwise separable convolutions. In: Proceedings of the IEEE Conference On Computer Vision and Pattern Recognition, pp. 1251\u20131258 (2017)","DOI":"10.1109\/CVPR.2017.195"},{"key":"28_CR3","unstructured":"DeVries, T., Taylor, G.W.: Improved regularization of convolutional neural networks with cutout (2017). arXiv preprint arXiv:1708.04552"},{"key":"28_CR4","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1016\/j.neunet.2017.12.012","volume":"107","author":"S Elfwing","year":"2018","unstructured":"Elfwing, S., Uchibe, E., Doya, K.: Sigmoid-weighted linear units for neural network function approximation in reinforcement learning. Neural Netw. 107, 3\u201311 (2018)","journal-title":"Neural Netw."},{"issue":"1","key":"28_CR5","doi-asserted-by":"publisher","first-page":"154","DOI":"10.1109\/TIFS.2008.2012215","volume":"4","author":"H Farid","year":"2009","unstructured":"Farid, H.: Exposing digital forgeries from jpeg ghosts. IEEE Trans. Inf. Forensics Secur. 4(1), 154\u2013160 (2009)","journal-title":"IEEE Trans. Inf. Forensics Secur."},{"key":"28_CR6","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: Advances in Neural Information Processing Systems, pp. 2672\u20132680 (2014)"},{"key":"28_CR7","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference On Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"28_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"630","DOI":"10.1007\/978-3-319-46493-0_38","volume-title":"Computer Vision \u2013 ECCV 2016","author":"K He","year":"2016","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Identity mappings in deep residual networks. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 630\u2013645. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_38"},{"key":"28_CR9","unstructured":"Hendrycks, D., Gimpel, K.: Bridging nonlinearities and stochastic regularizers with gaussian error linear units. ArXiv abs\/1606.08415 (2017)"},{"key":"28_CR10","unstructured":"Howard, A., et al.: Searching for mobilenetv3. arXiv preprint arXiv:1905.02244 (2019)"},{"key":"28_CR11","unstructured":"Howard, A.G., et al.: Mobilenets: efficient convolutional neural networks for mobile vision applications (2017). arXiv preprint arXiv:1704.04861"},{"key":"28_CR12","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7132\u20137141 (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"key":"28_CR13","unstructured":"Iandola, F., Moskewicz, M., Karayev, S., Girshick, R., Darrell, T., Keutzer, K.: Densenet: implementing efficient convnet descriptor pyramids (2014). arXiv preprint arXiv:1404.1869"},{"key":"28_CR14","unstructured":"Iandola, F.N., Han, S., Moskewicz, M.W., Ashraf, K., Dally, W.J., Keutzer, K.: Squeezenet: alexnet-level accuracy with 50x fewer parameters and$$<$$ 0.5 mb model size (2016). arXiv preprint arXiv:1602.07360"},{"key":"28_CR15","unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: accelerating deep network training by reducing internal covariate shift (2015). arXiv preprint arXiv:1502.03167"},{"key":"28_CR16","doi-asserted-by":"crossref","unstructured":"Jeon, H., Bang, Y., Woo, S.S.: Faketalkerdetect: effective and practical realistic neural talking head detection with a highly unbalanced dataset. In: Proceedings of the IEEE International Conference on Computer Vision Workshops (2019)","DOI":"10.1109\/ICCVW.2019.00163"},{"key":"28_CR17","unstructured":"Karras, T., Aila, T., Laine, S., Lehtinen, J.: Progressive growing of gans for improved quality, stability, and variation (2017). arXiv preprint arXiv:1710.10196"},{"key":"28_CR18","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4401\u20134410 (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"28_CR19","first-page":"2","volume":"6","author":"N Krawetz","year":"2007","unstructured":"Krawetz, N., Solutions, H.F.: A picture\u2019s worth. Hacker Factor Solutions 6, 2 (2007)","journal-title":"Hacker Factor Solutions"},{"key":"28_CR20","doi-asserted-by":"crossref","unstructured":"Liu, Z., Luo, P., Wang, X., Tang, X.: Deep learning face attributes in the wild. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 3730\u20133738 (2015)","DOI":"10.1109\/ICCV.2015.425"},{"issue":"4","key":"28_CR21","first-page":"174","volume":"5","author":"SK Mankar","year":"2015","unstructured":"Mankar, S.K., Gurjar, A.A.: Image forgery types and their detection: a review. Int. J. Adv. Res. Comput. Sci. Softw. Eng. 5(4), 174\u2013178 (2015)","journal-title":"Int. J. Adv. Res. Comput. Sci. Softw. Eng."},{"key":"28_CR22","unstructured":"Ramachandran, P., Zoph, B., Le, Q.V.: Swish: a self-gated activation function 7 (2017). arXiv preprint arXiv:1710.05941"},{"key":"28_CR23","unstructured":"R\u00f6ssler, A., Cozzolino, D., Verdoliva, L., Riess, C., Thies, J., Nie\u00dfner, M.: Faceforensics: a large-scale video dataset for forgery detection in human faces (2018). arXiv preprint arXiv:1803.09179"},{"key":"28_CR24","doi-asserted-by":"crossref","unstructured":"R\u00f6ssler, A., Cozzolino, D., Verdoliva, L., Riess, C., Thies, J., Nie\u00dfner, M.: Faceforensics++: learning to detect manipulated facial images (2019). arXiv preprint arXiv:1901.08971","DOI":"10.1109\/ICCV.2019.00009"},{"key":"28_CR25","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.C.: Mobilenetv 2: inverted residuals and linear bottlenecks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4510\u20134520 (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"28_CR26","doi-asserted-by":"crossref","unstructured":"Sheng, T., Feng, C., Zhuo, S., Zhang, X., Shen, L., Aleksic, M.: A quantization-friendly separable convolution for mobilenets. In: 2018 1st Workshop on Energy Efficient Machine Learning and Cognitive Computing for Embedded Applications (EMC2), pp. 14\u201318. IEEE (2018)","DOI":"10.1109\/EMC2.2018.00011"},{"key":"28_CR27","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition (2014). arXiv preprint arXiv:1409.1556"},{"key":"28_CR28","doi-asserted-by":"crossref","unstructured":"Sun, Q., Liu, Y., Chua, T.S., Schiele, B.: Meta-transfer learning for few-shot learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 403\u2013412 (2019)","DOI":"10.1109\/CVPR.2019.00049"},{"key":"28_CR29","doi-asserted-by":"crossref","unstructured":"Szegedy, C., et al.: Going deeper with convolutions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1\u20139 (2015)","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"28_CR30","doi-asserted-by":"crossref","unstructured":"Tariq, S., Lee, S., Kim, H., Shin, Y., Woo, S.S.: Gan is a friend or foe?: a framework to detect various fake face images. In: Proceedings of the 34th ACM\/SIGAPP Symposium on Applied Computing, pp. 1296\u20131303. ACM (2019)","DOI":"10.1145\/3297280.3297410"},{"key":"28_CR31","doi-asserted-by":"crossref","unstructured":"Thies, J., Zollh\u00f6fer, M., Stamminger, M., Theobalt, C., Nie\u00dfner, M.: Face2Face: real-time Face Capture and Reenactment of RGB Videos. In: Proceedings of the Computer Vision and Pattern Recognition (CVPR). IEEE (2016)","DOI":"10.1109\/CVPR.2016.262"},{"key":"28_CR32","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in neural information processing systems, pp. 5998\u20136008 (2017)"},{"key":"28_CR33","unstructured":"Wikipedia: Deepfake. https:\/\/en.wikipedia.org\/wiki\/Deepfake (2019). Accessed 15 July 2019"},{"key":"28_CR34","doi-asserted-by":"crossref","unstructured":"Wu, J., et al.: Sliced wasserstein generative models. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019). https:\/\/arxiv.org\/pdf\/1706.02631.pdf","DOI":"10.1109\/CVPR.2019.00383"},{"issue":"3","key":"28_CR35","doi-asserted-by":"publisher","first-page":"653","DOI":"10.1111\/rode.12160","volume":"19","author":"X Yang","year":"2015","unstructured":"Yang, X.: Estimating distribution costs with the e aton-k ortum model. Rev. Dev. Econ. 19(3), 653\u2013665 (2015)","journal-title":"Rev. Dev. Econ."},{"key":"28_CR36","unstructured":"Yin, C.: Altering faces via ai deepfake may be outlawed. China Daily, April 2019. http:\/\/global.chinadaily.com.cn\/a\/201904\/22\/WS5cbd15c4a3104842260b76c8.html"},{"key":"28_CR37","doi-asserted-by":"crossref","unstructured":"Yu, N., Davis, L.S., Fritz, M.: Attributing fake images to GANs: learning and analyzing Gan fingerprints. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 7556\u20137566 (2019)","DOI":"10.1109\/ICCV.2019.00765"},{"key":"28_CR38","doi-asserted-by":"crossref","unstructured":"Zakharov, E., Shysheya, A., Burkov, E., Lempitsky, V.: Few-shot adversarial learning of realistic neural talking head models (2019). arXiv preprint arXiv:1905.08233","DOI":"10.1109\/ICCV.2019.00955"},{"key":"28_CR39","unstructured":"Zhang, H., Goodfellow, I., Metaxas, D., Odena, A.: Self-attention generative adversarial networks (2018). arXiv preprint arXiv:1805.08318"}],"container-title":["IFIP Advances in Information and Communication Technology","ICT Systems Security and Privacy Protection"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-58201-2_28","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,14]],"date-time":"2024-09-14T00:09:27Z","timestamp":1726272567000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-58201-2_28"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030582005","9783030582012"],"references-count":39,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-58201-2_28","relation":{},"ISSN":["1868-4238","1868-422X"],"issn-type":[{"value":"1868-4238","type":"print"},{"value":"1868-422X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"14 September 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"SEC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"IFIP International Conference on ICT Systems Security and Privacy Protection","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Maribor","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Slovenia","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 September 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 September 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"35","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"sec2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/sec2020.um.si\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"149","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"29","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"19% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.88","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5.14","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"The conference was held virtually due to the COVID-19 pandemic.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"This content has been made available to all.","name":"free","label":"Free to read"}]}}