{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T03:38:34Z","timestamp":1771299514564,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":50,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,8,14]],"date-time":"2022-08-14T00:00:00Z","timestamp":1660435200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,8,14]]},"DOI":"10.1145\/3534678.3539151","type":"proceedings-article","created":{"date-parts":[[2022,8,12]],"date-time":"2022-08-12T19:06:12Z","timestamp":1660331172000},"page":"4433-4442","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":30,"title":["CommerceMM"],"prefix":"10.1145","author":[{"given":"Licheng","family":"Yu","sequence":"first","affiliation":[{"name":"Meta AI, Menlo Park, CA, USA"}]},{"given":"Jun","family":"Chen","sequence":"additional","affiliation":[{"name":"Meta AI, Menlo Park, CA, USA"}]},{"given":"Animesh","family":"Sinha","sequence":"additional","affiliation":[{"name":"Meta AI, Menlo Park, CA, USA"}]},{"given":"Mengjiao","family":"Wang","sequence":"additional","affiliation":[{"name":"Meta AI, Menlo Park, CA, USA"}]},{"given":"Yu","family":"Chen","sequence":"additional","affiliation":[{"name":"Meta AI, Menlo Park, CA, USA"}]},{"given":"Tamara L.","family":"Berg","sequence":"additional","affiliation":[{"name":"Meta AI, Menlo Park, CA, USA"}]},{"given":"Ning","family":"Zhang","sequence":"additional","affiliation":[{"name":"Meta AI, Menlo Park, CA, USA"}]}],"member":"320","published-online":{"date-parts":[[2022,8,14]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Vatt: Transformers for multimodal self-supervised learning from raw video, audio and text. NeurIPS","author":"Akbari Hassan","year":"2021","unstructured":"Hassan Akbari, Liangzhe Yuan, Rui Qian, Wei-Hong Chuang, Shih-Fu Chang, Yin Cui, and Boqing Gong. 2021. Vatt: Transformers for multimodal self-supervised learning from raw video, audio and text. NeurIPS (2021)."},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"crossref","unstructured":"Peter Anderson Xiaodong He Chris Buehler Damien Teney Mark Johnson Stephen Gould and Lei Zhang. 2018. Bottom-up and top-down attention for image captioning and visual question answering. In CVPR.","DOI":"10.1109\/CVPR.2018.00636"},{"key":"e_1_3_2_2_3_1","volume-title":"Vqa: Visual question answering. In ICCV.","author":"Antol Stanislaw","year":"2015","unstructured":"Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Margaret Mitchell, Dhruv Batra, C Lawrence Zitnick, and Devi Parikh. 2015. Vqa: Visual question answering. In ICCV."},{"key":"e_1_3_2_2_4_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403311"},{"key":"e_1_3_2_2_5_1","volume-title":"Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu.","author":"Chen Yen-Chun","year":"2020","unstructured":"Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. 2020. Uniter: Learning universal image-text representations. In ECCV."},{"key":"e_1_3_2_2_6_1","volume-title":"Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116","author":"Conneau Alexis","year":"2019","unstructured":"Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116 (2019)."},{"key":"e_1_3_2_2_7_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL.","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL."},{"key":"e_1_3_2_2_8_1","volume-title":"M5product: A multi-modal pretraining benchmark for e-commercial product downstream tasks. arXiv preprint arXiv:2109.04275","author":"Dong Xiao","year":"2021","unstructured":"Xiao Dong, Xunlin Zhan, Yangxin Wu, Yunchao Wei, Xiaoyong Wei, Minlong Lu, and Xiaodan Liang. 2021. M5product: A multi-modal pretraining benchmark for e-commercial product downstream tasks. arXiv preprint arXiv:2109.04275 (2021)."},{"key":"e_1_3_2_2_9_1","unstructured":"Alexey Dosovitskiy Lucas Beyer Alexander Kolesnikov Dirk Weissenborn Xiaohua Zhai Thomas Unterthiner Mostafa Dehghani Matthias Minderer Georg Heigold Sylvain Gelly et al. 2020. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)."},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"crossref","unstructured":"Zi-Yi Dou Yichong Xu Zhe Gan Jianfeng Wang Shuohang Wang Lijuan Wang Chenguang Zhu Zicheng Liu Michael Zeng et al. 2021. An Empirical Study of Training End-to-End Vision-and-Language Transformers. arXiv preprint arXiv:2111.02387 (2021).","DOI":"10.1109\/CVPR52688.2022.01763"},{"key":"e_1_3_2_2_11_1","volume-title":"Fashionbert: Text and image matching with adaptive loss for cross-modal retrieval. In SIGIR.","author":"Gao Dehong","year":"2020","unstructured":"Dehong Gao, Linbo Jin, Ben Chen, Minghui Qiu, Peng Li, Yi Wei, Yi Hu, and Hao Wang. 2020. Fashionbert: Text and image matching with adaptive loss for cross-modal retrieval. In SIGIR."},{"key":"e_1_3_2_2_12_1","volume-title":"Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377","author":"He Kaiming","year":"2021","unstructured":"Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll\u00e1r, and Ross Girshick. 2021. Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377 (2021)."},{"key":"e_1_3_2_2_13_1","volume-title":"Vivo: Surpassing human performance in novel object captioning with visual vocabulary pre-training. In AAAI.","author":"Hu Xiaowei","year":"2021","unstructured":"Xiaowei Hu, Xi Yin, Kevin Lin, Lijuan Wang, Lei Zhang, Jianfeng Gao, and Zicheng Liu. 2021. Vivo: Surpassing human performance in novel object captioning with visual vocabulary pre-training. In AAAI."},{"key":"e_1_3_2_2_14_1","volume-title":"Pixel-bert: Aligning image pixels with text by deep multi-modal transformers. arXiv preprint arXiv:2004.00849","author":"Huang Zhicheng","year":"2020","unstructured":"Zhicheng Huang, Zhaoyang Zeng, Bei Liu, Dongmei Fu, and Jianlong Fu. 2020. Pixel-bert: Aligning image pixels with text by deep multi-modal transformers. arXiv preprint arXiv:2004.00849 (2020)."},{"key":"e_1_3_2_2_15_1","unstructured":"Chao Jia Yinfei Yang Ye Xia Yi-Ting Chen Zarana Parekh Hieu Pham Quoc Le Yun-Hsuan Sung Zhen Li and Tom Duerig. 2021. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML."},{"key":"e_1_3_2_2_16_1","volume-title":"Vilt: Vision-and-language transformer without convolution or region supervision. In ICML.","author":"Kim Wonjae","year":"2021","unstructured":"Wonjae Kim, Bokyung Son, and Ildoo Kim. 2021. Vilt: Vision-and-language transformer without convolution or region supervision. In ICML."},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"crossref","unstructured":"Ranjay Krishna Yuke Zhu Oliver Groth Justin Johnson Kenji Hata Joshua Kravitz Stephanie Chen Yannis Kalantidis Li-Jia Li David A Shamma et al. 2017. Visual genome: Connecting language and vision using crowdsourced dense image annotations. IJCV (2017).","DOI":"10.1007\/s11263-016-0981-7"},{"key":"e_1_3_2_2_18_1","volume-title":"Unicoder-vl: A universal encoder for vision and language by cross-modal pre-training. In AAAI.","author":"Li Gen","year":"2020","unstructured":"Gen Li, Nan Duan, Yuejian Fang, Ming Gong, and Daxin Jiang. 2020. Unicoder-vl: A universal encoder for vision and language by cross-modal pre-training. In AAAI."},{"key":"e_1_3_2_2_19_1","volume-title":"Align before fuse: Vision and language representation learning with momentum distillation. NeurIPS","author":"Li Junnan","year":"2021","unstructured":"Junnan Li, Ramprasaath Selvaraju, Akhilesh Gotmare, Shafiq Joty, Caiming Xiong, and Steven Chu Hong Hoi. 2021. Align before fuse: Vision and language representation learning with momentum distillation. NeurIPS (2021)."},{"key":"e_1_3_2_2_20_1","volume-title":"Hero: Hierarchical encoder for video+ language omni-representation pre-training. In EMNLP.","author":"Li Linjie","year":"2020","unstructured":"Linjie Li, Yen-Chun Chen, Yu Cheng, Zhe Gan, Licheng Yu, and Jingjing Liu. 2020. Hero: Hierarchical encoder for video+ language omni-representation pre-training. In EMNLP."},{"key":"e_1_3_2_2_21_1","volume-title":"Visualbert: A simple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557","author":"Li Liunian Harold","year":"2019","unstructured":"Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. 2019. Visualbert: A simple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557 (2019)."},{"key":"e_1_3_2_2_22_1","volume-title":"Unimo: Towards unified-modal understanding and generation via cross-modal contrastive learning. arXiv preprint arXiv:2012.15409","author":"Li Wei","year":"2020","unstructured":"Wei Li, Can Gao, Guocheng Niu, Xinyan Xiao, Hao Liu, Jiachen Liu, Hua Wu, and Haifeng Wang. 2020. Unimo: Towards unified-modal understanding and generation via cross-modal contrastive learning. arXiv preprint arXiv:2012.15409 (2020)."},{"key":"e_1_3_2_2_23_1","volume-title":"Oscar: Object-semantics aligned pre-training for vision-language tasks. In ECCV.","author":"Li Xiujun","year":"2020","unstructured":"Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. 2020. Oscar: Object-semantics aligned pre-training for vision-language tasks. In ECCV."},{"key":"e_1_3_2_2_24_1","unstructured":"Tsung-Yi Lin Michael Maire Serge Belongie James Hays Pietro Perona Deva Ramanan Piotr Doll\u00e1r and C Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In ECCV."},{"key":"e_1_3_2_2_25_1","volume-title":"Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. NeurIPS","author":"Lu Jiasen","year":"2019","unstructured":"Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. 2019. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. NeurIPS (2019)."},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"crossref","unstructured":"Dhruv Mahajan Ross Girshick Vignesh Ramanathan Kaiming He Manohar Paluri Yixuan Li Ashwin Bharambe and Laurens Van Der Maaten. 2018. Exploring the limits of weakly supervised pretraining. In ECCV.","DOI":"10.1007\/978-3-030-01216-8_12"},{"key":"e_1_3_2_2_27_1","doi-asserted-by":"crossref","unstructured":"Antoine Miech Jean-Baptiste Alayrac Ivan Laptev Josef Sivic and Andrew Zisserman. 2021. Thinking Fast and Slow: Efficient Text-to-Visual Retrieval with Transformers. In CVPR.","DOI":"10.1109\/CVPR46437.2021.00970"},{"key":"e_1_3_2_2_28_1","volume-title":"Im2text: Describing images using 1 million captioned photographs. NeurIPS","author":"Ordonez Vicente","year":"2011","unstructured":"Vicente Ordonez, Girish Kulkarni, and Tamara Berg. 2011. Im2text: Describing images using 1 million captioned photographs. NeurIPS (2011)."},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"crossref","unstructured":"Bryan A Plummer Liwei Wang Chris M Cervantes Juan C Caicedo Julia Hockenmaier and Svetlana Lazebnik. 2015. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In ICCV.","DOI":"10.1109\/ICCV.2015.303"},{"key":"e_1_3_2_2_30_1","volume-title":"Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al.","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning transferable visual models from natural language supervision. In ICML."},{"key":"e_1_3_2_2_31_1","volume-title":"Fashion-gen: The generative fashion dataset and challenge. arXiv preprint arXiv:1806.08317","author":"Rostamzadeh Negar","year":"2018","unstructured":"Negar Rostamzadeh, Seyedarian Hosseini, Thomas Boquet, Wojciech Stokowiec, Ying Zhang, Christian Jauvin, and Chris Pal. 2018. Fashion-gen: The generative fashion dataset and challenge. arXiv preprint arXiv:1806.08317 (2018)."},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"crossref","unstructured":"Piyush Sharma Nan Ding Sebastian Goodman and Radu Soricut. 2018. Conceptual captions: A cleaned hypernymed image alt-text dataset for automatic image captioning. In ACL.","DOI":"10.18653\/v1\/P18-1238"},{"key":"e_1_3_2_2_33_1","volume-title":"Meet Shah, Marcus Rohrbach, Dhruv Batra, and Devi Parikh.","author":"Singh Amanpreet","year":"2020","unstructured":"Amanpreet Singh, Vedanuj Goswami, Vivek Natarajan, Yu Jiang, Xinlei Chen, Meet Shah, Marcus Rohrbach, Dhruv Batra, and Devi Parikh. 2020. MMF: A multimodal framework for vision and language research. https:\/\/github.com\/ facebookresearch\/mmf."},{"key":"e_1_3_2_2_34_1","volume-title":"Vl-bert: Pre-training of generic visual-linguistic representations. arXiv preprint arXiv:1908.08530","author":"Su Weijie","year":"2019","unstructured":"Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. 2019. Vl-bert: Pre-training of generic visual-linguistic representations. arXiv preprint arXiv:1908.08530 (2019)."},{"key":"e_1_3_2_2_35_1","volume-title":"Carl Vondrick, Kevin Murphy, and Cordelia Schmid.","author":"Sun Chen","year":"2019","unstructured":"Chen Sun, Austin Myers, Carl Vondrick, Kevin Murphy, and Cordelia Schmid. 2019. Videobert: A joint model for video and language representation learning. In ICCV."},{"key":"e_1_3_2_2_36_1","volume-title":"Lxmert: Learning cross-modality encoder representations from transformers. In EMNLP.","author":"Tan Hao","year":"2019","unstructured":"Hao Tan and Mohit Bansal. 2019. Lxmert: Learning cross-modality encoder representations from transformers. In EMNLP."},{"key":"e_1_3_2_2_37_1","volume-title":"Attention is all you need. NeurIPS","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. NeurIPS (2017)."},{"key":"e_1_3_2_2_38_1","volume-title":"UFO: A UniFied TransfOrmer for VisionLanguage Representation Learning. arXiv preprint arXiv:2111.10023","author":"Wang Jianfeng","year":"2021","unstructured":"Jianfeng Wang, Xiaowei Hu, Zhe Gan, Zhengyuan Yang, Xiyang Dai, Zicheng Liu, Yumao Lu, and Lijuan Wang. 2021. UFO: A UniFied TransfOrmer for VisionLanguage Representation Learning. arXiv preprint arXiv:2111.10023 (2021)."},{"key":"e_1_3_2_2_39_1","volume-title":"VLMo: Unified Vision-Language Pre-Training with Mixture-of-Modality-Experts. arXiv preprint arXiv:2111.02358","author":"Wang Wenhui","year":"2021","unstructured":"Wenhui Wang, Hangbo Bao, Li Dong, and Furu Wei. 2021. VLMo: Unified Vision-Language Pre-Training with Mixture-of-Modality-Experts. arXiv preprint arXiv:2111.02358 (2021)."},{"key":"e_1_3_2_2_40_1","volume-title":"Zihang Dai, Yulia Tsvetkov, and Yuan Cao.","author":"Wang Zirui","year":"2021","unstructured":"Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. 2021. Simvlm: Simple visual language model pretraining with weak supervision. arXiv preprint arXiv:2108.10904 (2021)."},{"key":"e_1_3_2_2_41_1","volume-title":"Masked Feature Prediction for Self-Supervised Visual PreTraining. arXiv preprint arXiv:2112.09133","author":"Wei Chen","year":"2021","unstructured":"Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan Yuille, and Christoph Feichtenhofer. 2021. Masked Feature Prediction for Self-Supervised Visual PreTraining. arXiv preprint arXiv:2112.09133 (2021)."},{"key":"e_1_3_2_2_42_1","volume-title":"Simmim: A simple framework for masked image modeling. arXiv preprint arXiv:2111.09886","author":"Xie Zhenda","year":"2021","unstructured":"Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. 2021. Simmim: A simple framework for masked image modeling. arXiv preprint arXiv:2111.09886 (2021)."},{"key":"e_1_3_2_2_43_1","volume-title":"Clusterfit: Improving generalization of visual representations. In CVPR.","author":"Yan Xueting","year":"2020","unstructured":"Xueting Yan, Ishan Misra, Abhinav Gupta, Deepti Ghadiyaram, and Dhruv Mahajan. 2020. Clusterfit: Improving generalization of visual representations. In CVPR."},{"key":"e_1_3_2_2_44_1","unstructured":"Haoxuan You Luowei Zhou Bin Xiao Noel C Codella Yu Cheng Ruochen Xu Shih-Fu Chang and Lu Yuan. 2021. MA-CLIP: Towards Modality-Agnostic Contrastive Language-Image Pre-training. (2021)."},{"key":"e_1_3_2_2_45_1","volume-title":"Ernie-vil: Knowledge enhanced vision-language representations through scene graph. arXiv preprint arXiv:2006.16934","author":"Yu Fei","year":"2020","unstructured":"Fei Yu, Jiji Tang, Weichong Yin, Yu Sun, Hao Tian, Hua Wu, and Haifeng Wang. 2020. Ernie-vil: Knowledge enhanced vision-language representations through scene graph. arXiv preprint arXiv:2006.16934 (2020)."},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"crossref","unstructured":"Rowan Zellers Yonatan Bisk Ali Farhadi and Yejin Choi. 2019. From Recognition to Cognition: Visual Commonsense Reasoning. In CVPR.","DOI":"10.1109\/CVPR.2019.00688"},{"key":"e_1_3_2_2_47_1","volume-title":"Vinvl: Revisiting visual representations in vision-language models. In CVPR.","author":"Zhang Pengchuan","year":"2021","unstructured":"Pengchuan Zhang, Xiujun Li, Xiaowei Hu, Jianwei Yang, Lei Zhang, Lijuan Wang, Yejin Choi, and Jianfeng Gao. 2021. Vinvl: Revisiting visual representations in vision-language models. In CVPR."},{"key":"e_1_3_2_2_48_1","doi-asserted-by":"crossref","unstructured":"Luowei Zhou Hamid Palangi Lei Zhang Houdong Hu Jason Corso and Jianfeng Gao. 2020. Unified vision-language pre-training for image captioning and vqa. In AAAI.","DOI":"10.1609\/aaai.v34i07.7005"},{"key":"e_1_3_2_2_49_1","unstructured":"Yushan Zhu Huaixiao Zhao Wen Zhang Ganqiang Ye Hui Chen Ningyu Zhang and Huajun Chen. 2021. Knowledge perceived multi-modal pretraining in ecommerce. In ACM-MM."},{"key":"e_1_3_2_2_50_1","volume-title":"Kaleido-bert: Vision-language pretraining on fashion domain. In CVPR.","author":"Zhuge Mingchen","year":"2021","unstructured":"Mingchen Zhuge, Dehong Gao, Deng-Ping Fan, Linbo Jin, Ben Chen, Haoming Zhou, Minghui Qiu, and Ling Shao. 2021. Kaleido-bert: Vision-language pretraining on fashion domain. In CVPR."}],"event":{"name":"KDD '22: The 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining","location":"Washington DC USA","acronym":"KDD '22","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"]},"container-title":["Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3534678.3539151","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3534678.3539151","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:02:58Z","timestamp":1750186978000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3534678.3539151"}},"subtitle":["Large-Scale Commerce MultiModal Representation Learning with Omni Retrieval"],"short-title":[],"issued":{"date-parts":[[2022,8,14]]},"references-count":50,"alternative-id":["10.1145\/3534678.3539151","10.1145\/3534678"],"URL":"https:\/\/doi.org\/10.1145\/3534678.3539151","relation":{},"subject":[],"published":{"date-parts":[[2022,8,14]]},"assertion":[{"value":"2022-08-14","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}