{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T11:40:06Z","timestamp":1755862806046,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":25,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,2,2]],"date-time":"2024-02-02T00:00:00Z","timestamp":1706832000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,2,2]]},"DOI":"10.1145\/3651671.3651681","type":"proceedings-article","created":{"date-parts":[[2024,6,7]],"date-time":"2024-06-07T18:55:50Z","timestamp":1717786550000},"page":"355-362","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Generative-AI- and Optical-Flow-Based Aspect Ratio Enhancement of Videos"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4535-2774","authenticated-orcid":false,"given":"Tomasz Jan","family":"Palczewski","sequence":"first","affiliation":[{"name":"Visual Display Intelligence Lab, Samsung Research America, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-5307-8936","authenticated-orcid":false,"given":"Anirudh","family":"Rao","sequence":"additional","affiliation":[{"name":"Visual Display Intelligence Lab, Samsung Research America, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-5709-1335","authenticated-orcid":false,"given":"Yingnan","family":"Zhu","sequence":"additional","affiliation":[{"name":"Visual Display Intelligence Lab, Samsung Research America, USA"}]}],"member":"320","published-online":{"date-parts":[[2024,6,7]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Retrieved","author":"DeepFloyd","year":"2023","unstructured":"DeepFloyd. 2023. DeepFloyd-IF model. Retrieved Aug 24, 2023 from https:\/\/www.deepfloyd.ai\/deepfloyd-if"},{"volume-title":"2023. Align your Latents: High-Resolution Video Synthesis with Latent Diffusion Models. arXiv:2304.08818","year":"2023","key":"e_1_3_2_1_2_1","unstructured":"Andreas\u00a0Blattmann et al.2023. Align your Latents: High-Resolution Video Synthesis with Latent Diffusion Models. arXiv:2304.08818 (2023)."},{"volume-title":"2020. Flow-edge Guided Video Completion. arXiv:2009.01835v1","year":"2020","key":"e_1_3_2_1_3_1","unstructured":"Chen\u00a0Gao et al.2020. Flow-edge Guided Video Completion. arXiv:2009.01835v1 (2020)."},{"volume-title":"2023. Blind Video Deflickering by Neural Filtering with a Flawed Atlas. arXiv:2303.08120","year":"2023","key":"e_1_3_2_1_4_1","unstructured":"Chenyang\u00a0Lei et al.2023. Blind Video Deflickering by Neural Filtering with a Flawed Atlas. arXiv:2303.08120 (2023)."},{"volume-title":"2016. Context encoders: Feature learning by inpainting. CVPR","year":"2016","key":"e_1_3_2_1_5_1","unstructured":"D.\u00a0Pathak et al.2016. Context encoders: Feature learning by inpainting. CVPR (2016), 2536\u20132544."},{"volume-title":"2023. The RefinedWeb Dataset for Falcon LLM: Outperforming Curated Corpora with Web Data, and Web Data Only. arXiv:2306.01116v1","year":"2023","key":"e_1_3_2_1_6_1","unstructured":"Guilherme\u00a0Penedo et al.2023. The RefinedWeb Dataset for Falcon LLM: Outperforming Curated Corpora with Web Data, and Web Data Only. arXiv:2306.01116v1 (2023)."},{"volume-title":"2020. Rethinking image inpainting via a mutual encoderdecoder with feature equalizations. ECCV","year":"2020","key":"e_1_3_2_1_7_1","unstructured":"Hongyu\u00a0Liu et al.2020. Rethinking image inpainting via a mutual encoderdecoder with feature equalizations. ECCV (2020)."},{"volume-title":"2023. Deep learning for image inpainting: A survey. Pattern Recognition","year":"2023","key":"e_1_3_2_1_8_1","unstructured":"Hanyu\u00a0Xiang et al.2023. Deep learning for image inpainting: A survey. Pattern Recognition Volume 134 (2023)."},{"volume-title":"2014. Generative adversarial nets. Advances in neural information processing systems","year":"2014","key":"e_1_3_2_1_9_1","unstructured":"Ian\u00a0Goodfellow et al.2014. Generative adversarial nets. Advances in neural information processing systems (2014)."},{"volume-title":"2022. BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation. rXiv:2201.12086","year":"2022","key":"e_1_3_2_1_10_1","unstructured":"Junnan\u00a0Li et al.2022. BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation. rXiv:2201.12086 (2022)."},{"volume-title":"2022. Chain-of-Thought Prompting Elicits Reasoning in Large Language Models. arXiv:2201.11903","year":"2022","key":"e_1_3_2_1_11_1","unstructured":"Jason\u00a0Wei et al.2022. Chain-of-Thought Prompting Elicits Reasoning in Large Language Models. arXiv:2201.11903 (2022)."},{"volume-title":"2018. Generative image inpainting with contextual attention. CVPR","year":"2018","key":"e_1_3_2_1_12_1","unstructured":"J.\u00a0Yu et al.2018. Generative image inpainting with contextual attention. CVPR (2018)."},{"volume-title":"2023. Exploiting Optical Flow Guidance for Transformer-Based Video Inpainting. arXiv:2301.10048v1","year":"2023","key":"e_1_3_2_1_13_1","unstructured":"Kaidong\u00a0Zhang et al.2023. Exploiting Optical Flow Guidance for Transformer-Based Video Inpainting. arXiv:2301.10048v1 (2023)."},{"key":"e_1_3_2_1_14_1","volume-title":"Sig. Proces.: Image Comm.","volume":"30","year":"2015","unstructured":"Nikolay\u00a0Ponomarenko et al.2015. Image database tid2013: Peculiarities, results and perspectives. Sig. Proces.: Image Comm., vol. 30 (2015)."},{"volume-title":"2022. Make-A-Video: Text-to-Video Generation without Text-Video Data. arXiv:2209.14792","year":"2022","key":"e_1_3_2_1_15_1","unstructured":"Uriel\u00a0Singer et al.2022. Make-A-Video: Text-to-Video Generation without Text-Video Data. arXiv:2209.14792 (2022)."},{"volume-title":"2021. Aggregated contextual transformations for high-resolution image inpainting. arXiv:2104.01431","year":"2021","key":"e_1_3_2_1_16_1","unstructured":"Yanhong\u00a0Zeng et al.2021. Aggregated contextual transformations for high-resolution image inpainting. arXiv:2104.01431 (2021)."},{"volume-title":"2021. High-fidelity pluralistic image completion with transformers. arXiv:2103.14031","year":"2021","key":"e_1_3_2_1_17_1","unstructured":"Ziyu\u00a0Wan et al.2021. High-fidelity pluralistic image completion with transformers. arXiv:2103.14031 (2021)."},{"volume-title":"Retrieved","year":"2023","key":"e_1_3_2_1_18_1","unstructured":"google. 2023. Base ViT. Retrieved Aug 24, 2023 from google\/vit-base-patch16-224-in21k"},{"key":"e_1_3_2_1_19_1","volume-title":"GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium. arXiv:1706.08500","author":"\u00a0al Heusel","year":"2017","unstructured":"Martin et\u00a0al. Heusel. 2017. GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium. arXiv:1706.08500 (2017)."},{"key":"e_1_3_2_1_20_1","volume-title":"A survey on data-driven video completion. Computer Graphics Forum","author":"\u00a0et\u00a0al Ilan","year":"2015","unstructured":"S.\u00a0et\u00a0al. Ilan. 2015. A survey on data-driven video completion. Computer Graphics Forum. vol. 34 (2015)."},{"volume-title":"Retrieved","year":"2023","key":"e_1_3_2_1_21_1","unstructured":"lllyasviel. 2023. ControlNet inpainting. Retrieved Aug 24, 2023 from https:\/\/huggingface.co\/lllyasviel\/control_v11p_sd15_inpaint"},{"key":"e_1_3_2_1_22_1","volume-title":"A benchmark dataset and evaluation methodology for video object segmentation. CVPR","author":"\u00a0et\u00a0al Perazzi","year":"2016","unstructured":"F.\u00a0et\u00a0al. Perazzi. 2016. A benchmark dataset and evaluation methodology for video object segmentation. CVPR (2016)."},{"volume-title":"Retrieved","year":"2023","key":"e_1_3_2_1_23_1","unstructured":"pharmapsychotic. 2023. clip-interrogator. Retrieved Aug 24, 2023 from https:\/\/github.com\/pharmapsychotic\/clip-interrogator"},{"key":"e_1_3_2_1_24_1","volume-title":"TOG","volume":"36","author":"Ishikawa","year":"2017","unstructured":"E.\u00a0Simo-Serra S.\u00a0Iizuka and H. Ishikawa. 2017. Globally and locally consistent image completion. TOG, vol. 36 (2017)."},{"volume-title":"Retrieved","year":"2023","key":"e_1_3_2_1_25_1","unstructured":"stabilityai. 2023. stabilityai\/stable-diffusion-2-inpainting. Retrieved Aug 24, 2023 from https:\/\/huggingface.co\/stabilityai\/stable-diffusion-2-inpainting"}],"event":{"name":"ICMLC 2024: 2024 16th International Conference on Machine Learning and Computing","acronym":"ICMLC 2024","location":"Shenzhen China"},"container-title":["Proceedings of the 2024 16th International Conference on Machine Learning and Computing"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3651671.3651681","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3651671.3651681","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T11:19:39Z","timestamp":1755861579000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3651671.3651681"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,2,2]]},"references-count":25,"alternative-id":["10.1145\/3651671.3651681","10.1145\/3651671"],"URL":"https:\/\/doi.org\/10.1145\/3651671.3651681","relation":{},"subject":[],"published":{"date-parts":[[2024,2,2]]},"assertion":[{"value":"2024-06-07","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}