{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T05:05:32Z","timestamp":1750309532038,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":6,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,12,3]]},"DOI":"10.1145\/3681756.3697974","type":"proceedings-article","created":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T07:11:12Z","timestamp":1733209872000},"page":"1-2","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Multimodal Learning for Autoencoders"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-3633-8783","authenticated-orcid":false,"given":"Wajahat Ali","family":"Khan","sequence":"first","affiliation":[{"name":"Kyung Hee University, Yongin, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9721-4093","authenticated-orcid":false,"given":"Seungkyu","family":"Lee","sequence":"additional","affiliation":[{"name":"Kyung Hee University, Yongin, South Korea"}]}],"member":"320","published-online":{"date-parts":[[2024,12,2]]},"reference":[{"key":"e_1_3_3_1_2_1","unstructured":"Alexey Dosovitskiy Lucas Beyer Alexander Kolesnikov Dirk Weissenborn Xiaohua Zhai Thomas Unterthiner Mostafa Dehghani Matthias Minderer Georg Heigold Sylvain Gelly Jakob Uszkoreit and Neil Houlsby. 2021. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. arxiv:https:\/\/arXiv.org\/abs\/2010.11929\u00a0[cs.CV]"},{"key":"e_1_3_3_1_3_1","unstructured":"Xinyang Geng Hao Liu Lisa Lee Dale Schuurmans Sergey Levine and Pieter Abbeel. 2022. Multimodal Masked Autoencoders Learn Transferable Representations. arxiv:https:\/\/arXiv.org\/abs\/2205.14204\u00a0[cs.CV]"},{"key":"e_1_3_3_1_4_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACII.2017.8273601"},{"key":"e_1_3_3_1_5_1","unstructured":"Paul\u00a0Pu Liang Amir Zadeh and Louis-Philippe Morency. 2023. Foundations and Trends in Multimodal Machine Learning: Principles Challenges and Open Questions. arxiv:https:\/\/arXiv.org\/abs\/2209.03430\u00a0[cs.LG]"},{"key":"e_1_3_3_1_6_1","first-page":"8748","volume-title":"International conference on machine learning","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong\u00a0Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et\u00a0al. 2021. Learning transferable visual models from natural language supervision. In International conference on machine learning. PMLR, 8748\u20138763."},{"key":"e_1_3_3_1_7_1","unstructured":"Alec Radford Jeffrey Wu Rewon Child David Luan Dario Amodei Ilya Sutskever et\u00a0al. 2019. Language models are unsupervised multitask learners. OpenAI blog 1 8 (2019) 9."}],"event":{"name":"SA '24: SIGGRAPH Asia 2024 Posters","sponsor":["SIGGRAPH ACM Special Interest Group on Computer Graphics and Interactive Techniques"],"location":"Tokyo Japan","acronym":"SA '24"},"container-title":["SIGGRAPH Asia 2024 Posters"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3681756.3697974","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3681756.3697974","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T01:18:15Z","timestamp":1750295895000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3681756.3697974"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,2]]},"references-count":6,"alternative-id":["10.1145\/3681756.3697974","10.1145\/3681756"],"URL":"https:\/\/doi.org\/10.1145\/3681756.3697974","relation":{},"subject":[],"published":{"date-parts":[[2024,12,2]]},"assertion":[{"value":"2024-12-02","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}