{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T06:21:46Z","timestamp":1774678906605,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":42,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,1,12]],"date-time":"2024-01-12T00:00:00Z","timestamp":1705017600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,1,12]]},"DOI":"10.1145\/3647782.3647803","type":"proceedings-article","created":{"date-parts":[[2024,5,7]],"date-time":"2024-05-07T00:05:11Z","timestamp":1715040311000},"page":"133-143","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":36,"title":["A Literature Survey on Open Source Large Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-6787-5426","authenticated-orcid":false,"given":"Sanjay","family":"Kukreja","sequence":"first","affiliation":[{"name":"SP Jain School of Global Management, India"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-9595-5500","authenticated-orcid":false,"given":"Tarun","family":"Kumar","sequence":"additional","affiliation":[{"name":"eClerx Services Ltd., India"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-5187-5396","authenticated-orcid":false,"given":"Amit","family":"Purohit","sequence":"additional","affiliation":[{"name":"eClerx Services Ltd., India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3591-6409","authenticated-orcid":false,"given":"Abhijit","family":"Dasgupta","sequence":"additional","affiliation":[{"name":"SP Jain School of Global Management, India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9651-4676","authenticated-orcid":false,"given":"Debashis","family":"Guha","sequence":"additional","affiliation":[{"name":"SP Jain School of Global Management, India"}]}],"member":"320","published-online":{"date-parts":[[2024,5,6]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Jian-Yun Nie and Ji-Rong Wen","author":"Zhao Wayne Xin","year":"2023","unstructured":"Wayne Xin Zhao, Kun Zhou*, Junyi Li*, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, Yifan Du, Chen Yang, Yushuo Chen, Zhipeng Chen, Jinhao Jiang, Ruiyang Ren, Yifan Li, Xinyu Tang, Zikang Liu, Peiyu Liu, Jian-Yun Nie and Ji-Rong Wen, 2023, A Survey of Large Language Models, arXiv:2303.18223v11 [cs.CL], pg. 1"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/1034780.1034781"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.5555\/944919.944966"},{"key":"e_1_3_2_1_4_1","first-page":"03551","article-title":"Talking about large language models","volume":"2212","author":"Shanahan M.","year":"2022","unstructured":"M. Shanahan, 2022, \u201cTalking about large language models,\u201d CoRR, vol. abs\/2212.03551.","journal-title":"CoRR"},{"key":"e_1_3_2_1_5_1","first-page":"11903","article-title":"Chain of thought prompting elicits reasoning in large language models","volume":"2201","author":"Wei J.","year":"2022","unstructured":"J. Wei, X. Wang, D. Schuurmans, M. Bosma, E. H. Chi, Q. Le, and D. Zhou, 2022, \u201cChain of thought prompting elicits reasoning in large language models,\u201d CoRR, vol.abs\/2201.11903.","journal-title":"CoRR"},{"key":"e_1_3_2_1_6_1","volume-title":"Sifre","volume":"2203","author":"Hoffmann J.","year":"2022","unstructured":"J. Hoffmann, S. Borgeaud, A. Mensch, E. Buchatskaya, T. Cai, E. Rutherford, D. de Las Casas, L. A. Hendricks, J. Welbl, A. Clark, T. Hennigan, E. Noland, K. Millican, G. van den Driessche, B. Damoc, A. Guy, S. Osindero, K. Simonyan, E. Elsen, J. W. Rae, O. Vinyals, and L. Sifre, 2022, \u201cTraining compute-optimal large language models,\u201d vol. abs\/2203.15556."},{"key":"e_1_3_2_1_7_1","first-page":"09085","article-title":"Galactica: A large language model for science","volume":"2211","author":"Taylor R.","year":"2022","unstructured":"R. Taylor, M. Kardas, G. Cucurull, T. Scialom, A. Hartshorn, E. Saravia, A. Poulton, V. Kerkez, and R. Stojnic, 2022, \u201cGalactica: A large language model for science,\u201d CoRR, vol. abs\/2211.09085.","journal-title":"CoRR"},{"key":"e_1_3_2_1_8_1","unstructured":"S. Bubeck V. Chandrasekaran R. Eldan J. Gehrke E. Horvitz E. Kamar P. Lee Y. T. Lee Y. Li S. Lundberg H. Nori H. Palangi M. T. Ribeiro and Y. Zhang 2023 \u201cSparks of artificial general intelligence: Early experiments with gpt-4 \u201d vol. abs\/2303.12712."},{"key":"e_1_3_2_1_9_1","unstructured":"Y. Fu H. Peng and T. Khot 2022 \u201cHow does gpt obtain its ability? tracing emergent abilities of language models to their sources \u201d Yao Fu's Notion."},{"key":"e_1_3_2_1_10_1","unstructured":"OpenAI 2023 \u201cGpt-4 technical report \u201d OpenAI."},{"key":"e_1_3_2_1_11_1","unstructured":"Dzmitry Bahdanau Kyunghyun Cho and Yoshua Bengio 2014 Neural machine translation by jointly learning to align and translate. CoRR abs\/1409.0473."},{"key":"e_1_3_2_1_12_1","unstructured":"Kyunghyun Cho Bart van Merrienboer Caglar Gulcehre Fethi Bougares Holger Schwenk and Yoshua Bengio 2014 Learning phrase representations using rnn encoder-decoder for statistical machine translation. CoRR abs\/1406.1078."},{"key":"e_1_3_2_1_13_1","first-page":"3112","volume-title":"Advances in Neural Information Processing Systems","author":"Sutskever Ilya","year":"2014","unstructured":"Ilya Sutskever, Oriol Vinyals, and Quoc VV Le, 2014, Sequence to sequence learning with neural networks. In Advances in Neural Information Processing Systems, pages 3104\u20133112."},{"key":"e_1_3_2_1_14_1","volume-title":"Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems.","author":"Brown T. B.","unstructured":"T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, S. Agarwal, A. Herbert-Voss, G. Krueger, T. Henighan, R. Child, A. Ramesh, D. M. Ziegler, J. Wu, C. Winter, C. Hesse, M. Chen, E. Sigler, M. Litwin, S. Gray, B. Chess, J. Clark, C. Berner, S. Mc- Candlish, A. Radford, I. Sutskever, and D. Amodei, 2020, \u201cLanguage models are few-shot learners,\u201d in Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems."},{"key":"e_1_3_2_1_15_1","first-page":"02311","article-title":"Palm: Scaling language modeling with pathways","volume":"2204","author":"Chowdhery A.","year":"2022","unstructured":"A. Chowdhery, S. Narang, J. Devlin, M. Bosma, G. Mishra, A. Roberts, P. Barham, H. W. Chung, C. Sutton, S. Gehrmann, P. Schuh, K. Shi,S. Tsvyashchenko, J. Maynez, A. Rao, P. Barnes, Y. Tay, N. Shazeer, V. Prabhakaran, E. Reif, N. Du, B. Hutchinson, R. Pope, J. Bradbury, J. Austin, M. Isard, G. Gur-Ari, P. Yin, T. Duke, A. Levskaya, S. Ghemawat, S. Dev, H. Michalewski, X. Garcia, V. Misra, K. Robinson, L. Fedus, D. Zhou, D. Ippolito, D. Luan, H. Lim, B. Zoph, A. Spiridonov, R. Sepassi, D. Dohan, S. Agrawal, M. Omernick, A. M. Dai, T. S. Pillai, M. Pellat, A. Lewkowycz, E. Moreira, R. Child, O. Polozov, K. Lee, Z. Zhou, X. Wang, B. Saeta, M. Diaz, O. Firat, M. Catasta, J. Wei, K. Meier-Hellstern, D. Eck, J. Dean, S. Petrov, and N. Fiedel, 2022, \u201cPalm: Scaling language modeling with pathways,\u201d CoRR, vol. abs\/2204.02311.","journal-title":"CoRR"},{"key":"e_1_3_2_1_16_1","unstructured":"Hugo Lauren\u00b8con Lucile Saulnier Thomas Wang Christopher Akiki Albert Villanova del Moral Teven Le Scao Leandro Von Werra Chenghao Mou Eduardo Gonz\u00e1lez Ponferrada Huu Nguyen J\u00a8org Frohberg Mario \u02c7Sa\u02c7sko Quentin Lhoest Angelina McMillan-Major G\u00e9rard Dupont Stella Biderman Anna Rogers Loubna Ben allal Francesco De Toni Giada Pistilli Olivier Nguyen Somaieh Nikpoor Maraim Masoud Pierre Colombo Javier de la Rosa Paulo Villegas Tristan Thrush Shayne Longpre Sebastian Nagel Leon Weber Manuel Romero Mu\u02dcnoz Jian Zhu Daniel Van Strien Zaid Alyafeai Khalid Almubarak Vu Minh Chien Itziar Gonzalez-Dios Aitor Soroa Kyle Lo Manan Dey Pedro Ortiz Suarez Aaron Gokaslan Shamik Bose David Ifeoluwa Adelani Long Phan Hieu Tran Ian Yu Suhas Pai Jenny Chim Violette Lepercq Suzana Ilic Margaret Mitchell Sasha Luccioni and Yacine Jernite 2022. The BigScience ROOTS corpus: A 1.6TB composite multilingual dataset. In Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track. URL https: \/\/openreview.net\/forum?id=UoEw6KigkUn."},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","unstructured":"Quentin Lhoest Albert Villanova del Moral Yacine Jernite Abhishek Thakur Patrick von Platen Suraj Patil Julien Chaumond Mariama Drame Julien Plu Lewis Tunstall Joe Davison Mario \u02c7Sa\u02c7sko Gunjan Chhablani Bhavitvya Malik Simon Brandeis Teven Le Scao Victor Sanh Canwen Xu Nicolas Patry Angelina McMillan-Major Philipp Schmid Sylvain Gugger Cl\u00e9ment Delangue Th\u00e9o Matussi\u00e8re Lysandre Debut Stas Bekman Pierric Cistac Thibault Goehringer Victor Mustar Fran\u00b8cois Lagunas Alexander Rush and Thomas Wolf 2021. Datasets: A community library for natural language processing In Proceedings of the 2021 Conference on Empirical Methods in Natural Language 54 BLOOM Processing: System Demonstrations pages 175\u2013184 Online and Punta Cana Dominican Republic. Association for Computational Linguistics. doi: 10.18653\/v1\/2021.emnlp-demo.21. URL https:\/\/aclanthology.org\/2021.emnlp-demo.21.","DOI":"10.18653\/v1\/2021.emnlp-demo.21"},{"key":"e_1_3_2_1_18_1","unstructured":"Christopher Akiki Giada Pistilli Margot Mieskes Matthias Gall\u00e9 Thomas Wolf Suzana Ili\u00b4c and Yacine Jernite 2022. BigScience: A Case Study in the Social Construction of a Multilingual Large Language Model. URL https:\/\/arxiv.org\/abs\/2212.04960."},{"key":"e_1_3_2_1_19_1","volume-title":"A 176B-Parameter Open-Access Multilingual Language Model","author":"BLOOM","year":"2022","unstructured":"BLOOM: A 176B-Parameter Open-Access Multilingual Language Model, 2022, arXiv:2211.05100v4 [cs.CL]"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.14618\/ids-pub-9021"},{"key":"e_1_3_2_1_21_1","volume-title":"Atharva Naik, David Stap","author":"Wang Yizhong","year":"2022","unstructured":"Yizhong Wang, Swaroop Mishra, Pegah Alipoormolabashi, Yeganeh Kordi, Amirreza Mirzaei, Anjana Arunkumar, Arjun Ashok, Arut Selvan Dhanasekaran, Atharva Naik, David Stap, , 2022. Benchmarking generalization via in-context instructions on 1,600+ language tasks. arXiv preprint arXiv:2204.07705, 2022b."},{"key":"e_1_3_2_1_22_1","volume-title":"M Saiful Bari, Sheng Shen, Zheng-Xin Yong, Hailey Schoelkopf","author":"Muennighoff Niklas","year":"2022","unstructured":"Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng-Xin Yong, Hailey Schoelkopf, , 2022. Crosslingual generalization through multitask fine-tuning. arXiv preprint arXiv:2211.01786, 2022b."},{"key":"e_1_3_2_1_23_1","unstructured":"Tom Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared D Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell Sandhini Agarwal Ariel Herbert-Voss Gretchen Krueger Tom Henighan Rewon Child Aditya Ramesh Daniel Ziegler Jeffrey Wu Clemens Winter Chris Hesse Mark Chen Eric Sigler Mateusz Litwin Scott Gray Benjamin Chess Jack Clark Christopher Berner Sam McCandlish Alec Radford Ilya Sutskever and Dario Amodei 2020. Language models are few-shot learners. Advances in Neural Information Processing Systems."},{"key":"e_1_3_2_1_24_1","unstructured":"Ilya Loshchilov and Frank Hutter 2016 SGDR: stochastic gradient descent with restarts. CoRR abs\/1608.03983 URL http:\/\/arxiv.org\/abs\/1608.03983."},{"key":"e_1_3_2_1_25_1","first-page":"00234","article-title":"A survey for in-context learning","volume":"2301","author":"Dong Q.","year":"2023","unstructured":"Q. Dong, L. Li, D. Dai, C. Zheng, Z. Wu, B. Chang, X. Sun, J. Xu, L. Li, and Z. Sui, 2023, \u201cA survey for in-context learning,\u201d CoRR, vol. abs\/2301.00234.","journal-title":"CoRR"},{"key":"e_1_3_2_1_26_1","first-page":"14876","article-title":"Dense text retrieval based on pretrained language models: A survey","volume":"2211","author":"Zhao W. X.","year":"2022","unstructured":"W. X. Zhao, J. Liu, R. Ren, and J. Wen, 2022, \u201cDense text retrieval based on pretrained language models: A survey,\u201d CoRR, vol. abs\/2211.14876.","journal-title":"CoRR"},{"key":"e_1_3_2_1_27_1","first-page":"11903","article-title":"Chain of thought prompting elicits reasoning in large language models","volume":"2201","author":"Wei J.","year":"2022","unstructured":"J. Wei, X. Wang, D. Schuurmans, M. Bosma, E. H. Chi, Q. Le, and D. Zhou, 2022, \u201cChain of thought prompting elicits reasoning in large language models,\u201d CoRR, vol. abs\/2201.11903.","journal-title":"CoRR"},{"key":"e_1_3_2_1_28_1","first-page":"10625","article-title":"Least-to-most prompting enables complex reasoning in large language models","volume":"2205","author":"Zhou D.","year":"2022","unstructured":"D. Zhou, N. Sch\u00a8arli, L. Hou, J.Wei, N. Scales, X.Wang, D. Schuurmans, O. Bousquet, Q. Le, and E. H. Chi, 2022, \u201cLeast-to-most prompting enables complex reasoning in large language models,\u201d CoRR, vol. abs\/2205.10625.","journal-title":"CoRR"},{"key":"e_1_3_2_1_29_1","volume-title":"Thibaut Lavril, Gautier Izacard, Xavier Martinet Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozi\u00e8re, Naman Goyal Eric Hambro, Faisal Azhar, Aurelien Rodriguez","author":"Llama","year":"2023","unstructured":"Llama: Open and Efficient Foundation Language Models Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozi\u00e8re, Naman Goyal Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin Edouard Grave, Guillaume Lample, 2023, arXiv:2302.13971v1 [cs.CL]."},{"key":"e_1_3_2_1_30_1","unstructured":"Llama 2: Open Foundation and Fine-Tuned Chat Models Hugo Touvron\u2217 Louis Martin\u2020 Kevin Stone\u2020 Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale Dan Bikel Lukas Blecher Cristian Canton Ferrer Moya Chen Guillem Cucurull David Esiobu Jude Fernandes Jeremy Fu Wenyin Fu Brian Fuller Cynthia Gao Vedanuj Goswami Naman Goyal Anthony Hartshorn Saghar Hosseini Rui Hou Hakan Inan Marcin Kardas Viktor Kerkez Madian Khabsa Isabel Kloumann Artem Korenev Punit Singh Koura Marie-Anne Lachaux Thibaut Lavril Jenya Lee Diana Liskovich Yinghai Lu Yuning Mao Xavier Martinet Todor Mihaylov Pushkar Mishra Igor Molybog Yixin Nie Andrew Poulton Jeremy Reizenstein Rashi Rungta Kalyan Saladi Alan Schelten Ruan Silva Eric Michael Smith Ranjan Subramanian Xiaoqing Ellen Tan Binh Tang Ross Taylor Adina Williams Jian Xiang Kuan Puxin Xu Zheng Yan Iliyan Zarov Yuchen Zhang Angela Fan Melanie Kambadur Sharan Narang Aurelien Rodriguez Robert Stojnic Sergey Edunov Thomas Scialom 2023 arXiv:2307.09288v2 [cs.CL]."},{"key":"e_1_3_2_1_31_1","volume-title":"Gqa: Training generalized multi-query transformer models from multi-head checkpoints.","author":"Ainslie Joshua","year":"2023","unstructured":"Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebr\u00f3n, and Sumit Sanghai, 2023. Gqa: Training generalized multi-query transformer models from multi-head checkpoints."},{"key":"e_1_3_2_1_32_1","unstructured":"Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Llion Jones Aidan N. Gomez Lukasz Kaiser and Illia Polosukhin 2017. Attention is all you need."},{"key":"e_1_3_2_1_33_1","volume-title":"Xi Victoria Lin","author":"Zhang Susan","year":"2022","unstructured":"Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, , 2022. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068."},{"key":"e_1_3_2_1_34_1","unstructured":"Noam Shazeer. Glu 2020 variants improve transformer."},{"key":"e_1_3_2_1_35_1","volume-title":"Roformer: Enhanced transformer with rotary position embedding.","author":"Su Jianlin","year":"2022","unstructured":"Jianlin Su, Yu Lu, Shengfeng Pan, Ahmed Murtadha, Bo Wen, and Yunfeng Liu, 2022. Roformer: Enhanced transformer with rotary position embedding."},{"key":"e_1_3_2_1_36_1","unstructured":"Ilya Loshchilov and Frank Hutter 2017. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"crossref","unstructured":"Rico Sennrich Barry Haddow and Alexandra Birch 2016. Neural machine translation of rare words with subword units.","DOI":"10.18653\/v1\/P16-1162"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"crossref","unstructured":"Taku Kudo and John Richardson 2018. Sentence piece: A simple and language independent subword tokenizer and detokenizer for neural text processing.","DOI":"10.18653\/v1\/D18-2012"},{"key":"e_1_3_2_1_39_1","unstructured":"Guilherme Penedo Quentin Malartic Daniel Hesslow Ruxandra Cojocaru Alessandro Cappelli Hamza Alobeidli Baptiste Pannier Ebtesam Almazrouei Julien Launay 2023. The RefinedWeb Dataset for Falcon LLM: Outperforming Curated Corpora with Web Data and Web Data Only"},{"key":"e_1_3_2_1_40_1","first-page":"8445","volume-title":"Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","unstructured":"Lee, K., Ippolito, D., Nystrom, A., Zhang, C., Eck, D., Callison-Burch, C., and Carlini, N., 2022, Deduplicating training data makes language models better. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 8424\u2013 8445."},{"key":"e_1_3_2_1_41_1","first-page":"1305","volume-title":"Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing","year":"2021","unstructured":"Dodge, J., Sap, M., Marasovic, A., Agnew, W., Ilharco, G., \u00b4 Groeneveld, D., Mitchell, M., and Gardner, M, 2021, Documenting large webtext corpora: A case study on the colossal clean crawled corpus. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 1286\u20131305."},{"key":"e_1_3_2_1_42_1","volume-title":"GQA: Training Generalized Multi-Query Transformer Models from Multi-Head Checkpoints","author":"Ainslie Joshua","year":"2023","unstructured":"Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebr\u00f3n, Sumit Sanghai, 2023, GQA: Training Generalized Multi-Query Transformer Models from Multi-Head Checkpoints"}],"event":{"name":"ICCMB 2024: 2024 7th International Conference on Computers in Management and Business","location":"Singapore Singapore","acronym":"ICCMB 2024"},"container-title":["Proceedings of the 2024 7th International Conference on Computers in Management and Business"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3647782.3647803","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3647782.3647803","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T01:24:38Z","timestamp":1755912278000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3647782.3647803"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,1,12]]},"references-count":42,"alternative-id":["10.1145\/3647782.3647803","10.1145\/3647782"],"URL":"https:\/\/doi.org\/10.1145\/3647782.3647803","relation":{},"subject":[],"published":{"date-parts":[[2024,1,12]]},"assertion":[{"value":"2024-05-06","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}