{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,2]],"date-time":"2026-03-02T14:18:33Z","timestamp":1772461113559,"version":"3.50.1"},"reference-count":61,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/access.2025.3571760","type":"journal-article","created":{"date-parts":[[2025,5,20]],"date-time":"2025-05-20T17:16:15Z","timestamp":1747761375000},"page":"92712-92729","source":"Crossref","is-referenced-by-count":3,"title":["AMT-Net: Adversarial Motion Transfer Network With Disentangled Shape and Pose for Realistic Image Animation"],"prefix":"10.1109","volume":"13","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6509-1367","authenticated-orcid":false,"given":"Nega","family":"Asebe Teka","sequence":"first","affiliation":[{"name":"School of Information and Communication Engineering, UESTC, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6458-1882","authenticated-orcid":false,"given":"Kumie","family":"Gedamu Alemu","sequence":"additional","affiliation":[{"name":"Sichuan Artificial Intelligence Research Institute, UESTC, Yibin, China"}]},{"given":"Maregu","family":"Assefa","sequence":"additional","affiliation":[{"name":"School of Computing and Mathematics, Khalifa University, Abu Dhabi, United Arab Emirates"}]},{"given":"Feidu","family":"Akmel","sequence":"additional","affiliation":[{"name":"School of Information and Communication Engineering, UESTC, Chengdu, China"}]},{"given":"Zhenting","family":"Zhou","sequence":"additional","affiliation":[{"name":"School of Information and Communication Engineering, UESTC, Chengdu, China"}]},{"given":"Weijie","family":"Wu","sequence":"additional","affiliation":[{"name":"School of Information and Communication Engineering, UESTC, Chengdu, China"}]},{"given":"Jianwen","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Information and Communication Engineering, UESTC, Chengdu, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"First order motion model for image animation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Siarohin"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00991"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/113"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2024.128168"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2021.3100352"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00359"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01344"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00248"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01413"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6721"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00779"},{"key":"ref12","article-title":"Disco: Disentangled control for referring human dance generation in real world","author":"Wang","year":"2023","journal-title":"arXiv:2307.00040"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00364"},{"key":"ref14","article-title":"Image animation with keypoint mask","author":"Toledano","year":"2021","journal-title":"arXiv:2112.10457"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00363"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.cag.2023.12.009"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_24"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00600"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00018"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19787-1_40"},{"key":"ref21","first-page":"1","article-title":"Generative adversarial nets","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"27","author":"Goodfellow"},{"key":"ref22","article-title":"Dynamics transfer GAN: Generating video by transferring arbitrary temporal dynamics from a source video to a single target image","author":"Baddar","year":"2017","journal-title":"arXiv:1712.03534"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.580"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01261-8_41"},{"key":"ref25","article-title":"Latent image animator: Learning to animate images via latent space navigation","author":"Wang","year":"2022","journal-title":"arXiv:2203.09043"},{"key":"ref26","article-title":"PriorityCut: Occlusion-guided regularization for warp-based image animation","author":"Ting Cheung","year":"2021","journal-title":"arXiv:2103.11600"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/j.vrih.2024.04.002"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ISCAS46773.2023.10181664"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3160297"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2023.3318956"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-950"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.632"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.310"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.244"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01249-6_13"},{"key":"ref36","first-page":"2225","article-title":"Video prediction with appearance and motion conditions","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jang"},{"key":"ref37","article-title":"Deep multi-scale video prediction beyond mean square error","author":"Mathieu","year":"2015","journal-title":"arXiv:1511.05440"},{"key":"ref38","article-title":"Video-to-video synthesis","author":"Wang","year":"2018","journal-title":"arXiv:1808.06601"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01228-1_8"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/s00530-024-01499-2"},{"key":"ref41","first-page":"5932","article-title":"Everybody dance now","volume-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis. (ICCV)","author":"Chan"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-024-02231-3"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.3018224"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3449075"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/WACV57701.2024.00602"},{"key":"ref46","first-page":"843","article-title":"Unsupervised learning of video representations using LSTMs","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Srivastava"},{"key":"ref47","first-page":"2863","article-title":"Action-conditional video prediction using deep networks in Atari games","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Oh"},{"key":"ref48","article-title":"Unsupervised learning for physical interaction through video prediction","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Finn"},{"key":"ref49","article-title":"Transformation-based models of video sequences","author":"van Amersfoort","year":"2017","journal-title":"arXiv:1701.08435"},{"key":"ref50","first-page":"1771","article-title":"Video pixel networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kalchbrenner"},{"key":"ref51","article-title":"Stochastic variational video prediction","author":"Babaeizadeh","year":"2017","journal-title":"arXiv:1710.11252"},{"key":"ref52","first-page":"3560","article-title":"Learning to generate long-term future via hierarchical prediction","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Villegas"},{"key":"ref53","article-title":"Generating videos with scene dynamics","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Vondrick"},{"key":"ref54","doi-asserted-by":"crossref","DOI":"10.1145\/3240508.3240704","article-title":"GestureGAN for hand gesture-to-gesture translation in the wild","volume-title":"Proc. 26th ACM Int. Conf. Multimedia","author":"Tang"},{"issue":"2","key":"ref55","doi-asserted-by":"crossref","first-page":"523","DOI":"10.1111\/cgf.13382","article-title":"State of the art on monocular 3D face reconstruction, tracking, and applications","volume":"37","author":"Zollh\u00f6fer","year":"2018","journal-title":"Comput. Graph. Forum"},{"key":"ref56","first-page":"9458","article-title":"Few-shot adversarial learning of realistic neural talking head models","volume-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis. (ICCV)","author":"Zakharov"},{"key":"ref57","first-page":"2382","article-title":"Textured neural avatars","volume-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR)","author":"Shysheya"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_29"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.5220\/0005355506350642"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00019"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1603.08155"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10820123\/11007652.pdf?arnumber=11007652","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,2]],"date-time":"2025-06-02T18:03:11Z","timestamp":1748887391000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11007652\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":61,"URL":"https:\/\/doi.org\/10.1109\/access.2025.3571760","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}