{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T03:40:13Z","timestamp":1771645213894,"version":"3.50.1"},"reference-count":49,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100003600","name":"Korea Institute of Police Technology (KIPoT) through the Korean National Police Agency and Ministry of Science and ICT","doi-asserted-by":"publisher","award":["210121M06"],"award-info":[{"award-number":["210121M06"]}],"id":[{"id":"10.13039\/501100003600","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Institute of Information and Communications Technology Planning and Evaluation"},{"name":"Korean Government through the MSIT","award":["2019-0-00421"],"award-info":[{"award-number":["2019-0-00421"]}]},{"name":"Korean Government through the MSIT","award":["2020-0-01821"],"award-info":[{"award-number":["2020-0-01821"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/tip.2023.3293767","type":"journal-article","created":{"date-parts":[[2023,7,13]],"date-time":"2023-07-13T17:34:28Z","timestamp":1689269668000},"page":"3949-3963","source":"Crossref","is-referenced-by-count":3,"title":["Frequency-Based Motion Representation for Video Generative Adversarial Networks"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4050-6896","authenticated-orcid":false,"given":"Sangeek","family":"Hyun","sequence":"first","affiliation":[{"name":"Department of Artificial Intelligence, Sungkyunkwan University, Suwon, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3934-2879","authenticated-orcid":false,"given":"Jaihyun","family":"Lew","sequence":"additional","affiliation":[{"name":"Interdisciplinary Program in Artificial Intelligence, Seoul National University, Seoul, South Korea"}]},{"given":"Jiwoo","family":"Chung","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence, Sungkyunkwan University, Suwon, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1079-6917","authenticated-orcid":false,"given":"Euiyeon","family":"Kim","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence, Sungkyunkwan University, Suwon, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9684-7641","authenticated-orcid":false,"given":"Jae-Pil","family":"Heo","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, Sungkyunkwan University, Suwon, South Korea"}]}],"member":"263","reference":[{"key":"ref13","article-title":"InMoDeGAN: Interpretable motion decomposition generative adversarial network for video generation","author":"wang","year":"2021","journal-title":"arXiv 2101 03049"},{"key":"ref12","article-title":"Adversarial video generation on complex datasets","author":"clark","year":"2019","journal-title":"arXiv 1907 06571"},{"key":"ref15","article-title":"Fourier features let networks learn high frequency functions in low dimensional domains","author":"tancik","year":"2020","journal-title":"arXiv 2006 10739"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00531"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-020-01333-y"},{"key":"ref10","article-title":"A good image generator is what you need for high-resolution video synthesis","author":"tian","year":"2021","journal-title":"arXiv 2104 15069"},{"key":"ref17","first-page":"2672","article-title":"Generative adversarial nets","author":"goodfellow","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref16","first-page":"1","article-title":"Implicit neural representations with periodic activation functions","volume":"33","author":"sitzmann","year":"2020","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref19","first-page":"1","article-title":"Progressive growing of GANs for improved quality, stability, and variation","author":"karras","year":"2018","journal-title":"Proc 6th Int Conf Learn Represent (ICLR)"},{"key":"ref18","first-page":"1","article-title":"Spectral normalization for generative adversarial networks","author":"miyato","year":"2018","journal-title":"Proc 6th Int Conf Learn Represent (ICLR)"},{"key":"ref46","article-title":"Differentiable augmentation for data-efficient GAN training","author":"zhao","year":"2020","journal-title":"arXiv 2006 10738"},{"key":"ref45","first-page":"1","article-title":"The unusual effectiveness of averaging in GAN training","author":"yaz","year":"2018","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref48","first-page":"14042","article-title":"Ccvs: Context-aware controllable video synthesis","volume":"34","author":"le moing","year":"2021","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref47","article-title":"Towards high resolution video generation with progressive growing of sliced Wasserstein GANs","author":"acharya","year":"2018","journal-title":"arXiv 1810 02419"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.223"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00685"},{"key":"ref43","first-page":"1","article-title":"GANs trained by a two time-scale update rule converge to a local Nash equilibrium","volume":"30","author":"heusel","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref49","first-page":"1","article-title":"Neural discrete representation learning","volume":"30","author":"van den oord","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00232"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00581"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00165"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00518"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00091"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref35","article-title":"FaceForensics: A large-scale video dataset for forgery detection in human faces","author":"r\u00f6ssler","year":"2018","journal-title":"arXiv 1803 09179"},{"key":"ref34","article-title":"UCF101: A dataset of 101 human actions classes from videos in the wild","author":"soomro","year":"2012","journal-title":"arXiv 1212 0402"},{"key":"ref37","article-title":"VoxCeleb2: Deep speaker recognition","author":"son chung","year":"2018","journal-title":"arXiv 1806 05622"},{"key":"ref36","first-page":"344","article-title":"Self-supervised visual planning with temporal skip connections","author":"ebert","year":"2017","journal-title":"Proc CoRL"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01061"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00461"},{"key":"ref33","article-title":"Alias-free generative adversarial networks","author":"karras","year":"2021","journal-title":"arXiv 2106 12423"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01405"},{"key":"ref2","first-page":"7354","article-title":"Self-attention generative adversarial networks","author":"zhang","year":"2019","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref1","first-page":"1","article-title":"Large scale GAN training for high fidelity natural image synthesis","author":"brock","year":"2019","journal-title":"Int Conf Learn Represent (ICLR)"},{"key":"ref39","article-title":"Towards accurate generative models of video: A new metric & challenges","author":"unterthiner","year":"2018","journal-title":"arXiv 1812 01717"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00251"},{"key":"ref24","article-title":"GANSpace: Discovering interpretable GAN controls","author":"h\u00e4rk\u00f6nen","year":"2020","journal-title":"arXiv 2004 02546"},{"key":"ref23","first-page":"9786","article-title":"Unsupervised discovery of interpretable directions in the GAN latent space","author":"voynov","year":"2020","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref26","first-page":"1","article-title":"Generating videos with dynamics-aware implicit generative adversarial networks","author":"yu","year":"2022","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2020.09.016"},{"key":"ref20","first-page":"613","article-title":"Generating videos with scene dynamics","author":"vondrick","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01068"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.308"},{"key":"ref28","first-page":"8633","article-title":"Video diffusion models","author":"ho","year":"2022","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00361"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19790-1_7"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/83\/9991910\/10183834.pdf?arnumber=10183834","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,7]],"date-time":"2023-08-07T18:36:21Z","timestamp":1691433381000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10183834\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":49,"URL":"https:\/\/doi.org\/10.1109\/tip.2023.3293767","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"value":"1057-7149","type":"print"},{"value":"1941-0042","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]}}}