{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T16:42:12Z","timestamp":1773247332743,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":64,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,10,21]],"date-time":"2024-10-21T00:00:00Z","timestamp":1729468800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100001691","name":"Japan Society for the Promotion of Science","doi-asserted-by":"publisher","award":["23K17456, 23K25157, 23K28096"],"award-info":[{"award-number":["23K17456, 23K25157, 23K28096"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100001691","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100002241","name":"Japan Science and Technology Agency","doi-asserted-by":"publisher","award":["CREST JPMJCR22M2"],"award-info":[{"award-number":["CREST JPMJCR22M2"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100002241","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,10,21]]},"DOI":"10.1145\/3627673.3679100","type":"proceedings-article","created":{"date-parts":[[2024,10,20]],"date-time":"2024-10-20T19:34:21Z","timestamp":1729452861000},"page":"5518-5521","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["On the Use of Large Language Models for Table Tasks"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7112-5212","authenticated-orcid":false,"given":"Yuyang","family":"Dong","sequence":"first","affiliation":[{"name":"NEC, Kawasaki, Kanagawa, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4045-7350","authenticated-orcid":false,"given":"Masafumi","family":"Oyamada","sequence":"additional","affiliation":[{"name":"NEC, Kawasaki, Kanagawa, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7239-5134","authenticated-orcid":false,"given":"Chuan","family":"Xiao","sequence":"additional","affiliation":[{"name":"Osaka University, Nagoya University, Suita, Osaka, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-2537-4984","authenticated-orcid":false,"given":"Haochen","family":"Zhang","sequence":"additional","affiliation":[{"name":"Osaka University, Suita, Osaka, Japan"}]}],"member":"320","published-online":{"date-parts":[[2024,10,21]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Context quality matters in training fusion-in-decoder for extractive open-domain question answering. arXiv preprint arXiv:2403.14197","author":"Akimoto K.","year":"2024","unstructured":"K. Akimoto, K. Takeoka, and M. Oyamada. Context quality matters in training fusion-in-decoder for extractive open-domain question answering. arXiv preprint arXiv:2403.14197, 2024."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.14778\/3554821.3554890"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1109\/BigData59044.2023.10386199"},{"key":"e_1_3_2_1_4_1","volume-title":"Product attribute value extraction using large language models. arXiv preprint arXiv:2310.12537","author":"Brinkmann A.","year":"2023","unstructured":"A. Brinkmann, R. Shraga, and C. Bizer. Product attribute value extraction using large language models. arXiv preprint arXiv:2310.12537, 2023."},{"key":"e_1_3_2_1_5_1","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown T.","year":"2020","unstructured":"T. Brown, B. Mann, N. Ryder, M. Subbiah, J.D. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, et al. Language models are few-shot learners. NeurIPS, 33:1877--1901, 2020.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_6_1","volume-title":"andW. Y.Wang. Tabfact: A large-scale dataset for table-based fact verification. arXiv preprint arXiv:1909.02164","author":"Chen W.","year":"2019","unstructured":"W. Chen, H.Wang, J. Chen, Y. Zhang, H.Wang, S. Li, X. Zhou, andW. Y.Wang. Tabfact: A large-scale dataset for table-based fact verification. arXiv preprint arXiv:1909.02164, 2019."},{"key":"e_1_3_2_1_7_1","volume-title":"Batch prompting: Efficient inference with large language model APIs. arXiv preprint arXiv:2301.08721","author":"Cheng Z.","year":"2023","unstructured":"Z. Cheng, J. Kasai, and T. Yu. Batch prompting: Efficient inference with large language model APIs. arXiv preprint arXiv:2301.08721, 2023."},{"key":"e_1_3_2_1_8_1","volume-title":"Tables as images? exploring the strengths and limitations of llms on multimodal representations of tabular data. arXiv preprint arXiv:2402.12424","author":"Deng N.","year":"2024","unstructured":"N. Deng, Z. Sun, R. He, A. Sikka, Y. Chen, L. Ma, Y. Zhang, and R. Mihalcea. Tables as images? exploring the strengths and limitations of llms on multimodal representations of tabular data. arXiv preprint arXiv:2402.12424, 2024."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1145\/3542700.3542709"},{"key":"e_1_3_2_1_10_1","volume-title":"BERT: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin J.","year":"2018","unstructured":"J. Devlin, M.-W. Chang, K. Lee, and K. Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/3477495.3531678"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDE51399.2021.00046"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.14778\/3603581.3603587"},{"key":"e_1_3_2_1_14_1","volume-title":"Large language models on tabular data--a survey. arXiv preprint arXiv:2402.17944","author":"Fang X.","year":"2024","unstructured":"X. Fang, W. Xu, F. A. Tan, J. Zhang, Z. Hu, Y. Qi, S. Nickleach, D. Socolinsky, S. Sengamedu, and C. Faloutsos. Large language models on tabular data--a survey. arXiv preprint arXiv:2402.17944, 2024."},{"key":"e_1_3_2_1_15_1","volume-title":"Chain-of-table: Evolving tables in the reasoning chain for table understanding","author":"Research Google","year":"2024","unstructured":"Google Research. Chain-of-table: Evolving tables in the reasoning chain for table understanding, 2024."},{"key":"e_1_3_2_1_16_1","volume-title":"guinea pig trials\" utilizing gpt: A novel smart agentbased modeling approach for studying firm competition and collusion. arXiv preprint arXiv:2308.10974","author":"Han X.","year":"2023","unstructured":"X. Han, Z.Wu, and C. Xiao. \"guinea pig trials\" utilizing gpt: A novel smart agentbased modeling approach for studying firm competition and collusion. arXiv preprint arXiv:2308.10974, 2023."},{"key":"e_1_3_2_1_17_1","first-page":"174","volume-title":"PAKDD","author":"Hayashi S.","year":"2023","unstructured":"S. Hayashi, Y. Dong, and M.Oyamada. Qa-matcher: Unsupervised entity matching using a question answering model. In PAKDD, pages 174--185, 2023."},{"key":"e_1_3_2_1_18_1","volume-title":"Anameta: A table understanding dataset of field metadata knowledge shared by multi-dimensional data analysis tasks. arXiv preprint arXiv:2209.00946","author":"He X.","year":"2022","unstructured":"X. He, M. Zhou, M. Zhou, J. Xu, X. Lv, T. Li, Y. Shao, S. Han, Z. Yuan, and D. Zhang. Anameta: A table understanding dataset of field metadata knowledge shared by multi-dimensional data analysis tasks. arXiv preprint arXiv:2209.00946, 2022."},{"key":"e_1_3_2_1_19_1","volume-title":"TaPas:Weakly supervised table parsing via pre-training. arXiv preprint arXiv:2004.02349","author":"Herzig J.","year":"2020","unstructured":"J. Herzig, P. K. Nowak, T. M\u00fcller, F. Piccinno, and J. M. Eisenschlos. TaPas:Weakly supervised table parsing via pre-training. arXiv preprint arXiv:2004.02349, 2020."},{"key":"e_1_3_2_1_20_1","volume-title":"MetaGPT: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352","author":"Hong S.","year":"2023","unstructured":"S. Hong, X. Zheng, J. Chen, Y. Cheng, C. Zhang, Z. Wang, S. K. S. Yau, Z. Lin, L. Zhou, C. Ran, et al. MetaGPT: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352, 2023."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/3555041.3589411"},{"key":"e_1_3_2_1_22_1","volume-title":"Tabbie: Pretrained representations of tabular data. arXiv preprint arXiv:2105.02584","author":"Iida H.","year":"2021","unstructured":"H. Iida, D. Thai, V. Manjunatha, and M. Iyyer. Tabbie: Pretrained representations of tabular data. arXiv preprint arXiv:2105.02584, 2021."},{"key":"e_1_3_2_1_23_1","volume-title":"Structgpt: A general framework for large language model to reason over structured data. arXiv preprint arXiv:2305.09645","author":"Jiang J.","year":"2023","unstructured":"J. Jiang, K. Zhou, Z. Dong, K. Ye,W. X. Zhao, and J.-R.Wen. Structgpt: A general framework for large language model to reason over structured data. arXiv preprint arXiv:2305.09645, 2023."},{"key":"e_1_3_2_1_24_1","volume-title":"Large languagemodels are zero-shot reasoners. arXiv preprint arXiv:2205.11916","author":"Kojima T.","year":"2022","unstructured":"T. Kojima, S. S. Gu, M. Reid, Y. Matsuo, and Y. Iwasawa. Large languagemodels are zero-shot reasoners. arXiv preprint arXiv:2205.11916, 2022."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.14778\/3007263.3007314"},{"key":"e_1_3_2_1_26_1","volume-title":"Column type annotation using ChatGPT. arXiv preprint arXiv:2306.00745","author":"Korini K.","year":"2023","unstructured":"K. Korini and C. Bizer. Column type annotation using ChatGPT. arXiv preprint arXiv:2306.00745, 2023."},{"key":"e_1_3_2_1_27_1","volume-title":"Open-wikitable: Dataset for open domain question answering with complex reasoning over table. arXiv preprint arXiv:2305.07288","author":"Kweon S.","year":"2023","unstructured":"S. Kweon, Y. Kwon, S. Cho, Y. Jo, and E. Choi. Open-wikitable: Dataset for open domain question answering with complex reasoning over table. arXiv preprint arXiv:2305.07288, 2023."},{"key":"e_1_3_2_1_28_1","volume-title":"Benchmarking RAG on tables","year":"2023","unstructured":"LangChain. Benchmarking RAG on tables, 2023."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1017\/9781108684163"},{"key":"e_1_3_2_1_30_1","first-page":"9459","article-title":"Retrieval-augmented generation for knowledgeintensive nlp tasks","volume":"33","author":"Lewis P.","year":"2020","unstructured":"P. Lewis, E. Perez, A. Piktus, F. Petroni,V.Karpukhin, N. Goyal, H. K\u00fcttler, M. Lewis, W.-t. Yih, T. Rockt\u00e4schel, et al. Retrieval-augmented generation for knowledgeintensive nlp tasks. NeurIPS, 33:9459--9474, 2020.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDE55515.2023.00275"},{"key":"e_1_3_2_1_32_1","first-page":"36","article-title":"Bringing software productivity to the next level through large language models","author":"Li H.","year":"2024","unstructured":"H. Li, J. Su, Y. Chen, Q. Li, and Z.-X. Zhang. Sheetcopilot: Bringing software productivity to the next level through large language models. NeurIPS, 36, 2024.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_33_1","volume-title":"A survey on retrieval-augmented text generation. arXiv preprint arXiv:2202.01110","author":"Li H.","year":"2022","unstructured":"H. Li, Y. Su, D. Cai, Y.Wang, and L. Liu. A survey on retrieval-augmented text generation. arXiv preprint arXiv:2202.01110, 2022."},{"key":"e_1_3_2_1_34_1","volume-title":"Table-GPT: Table-tuned GPT for diverse table tasks. arXiv preprint arXiv:2310.09263","author":"Li P.","year":"2023","unstructured":"P. Li, Y. He, D. Yashar, W. Cui, S. Ge, H. Zhang, D. R. Fainman, D. Zhang, and S. Chaudhuri. Table-GPT: Table-tuned GPT for diverse table tasks. arXiv preprint arXiv:2310.09263, 2023."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.551"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-short.133"},{"key":"e_1_3_2_1_37_1","volume-title":"Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692","author":"Liu Y.","year":"2019","unstructured":"Y. Liu, M. Ott, N. Goyal, J. Du, M. Joshi, D. Chen, O. Levy, M. Lewis, L. Zettlemoyer, and V. Stoyanov. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692, 2019."},{"key":"e_1_3_2_1_38_1","volume-title":"Large language model for table processing: A survey. arXiv preprint arXiv:2402.05121","author":"Lu W.","year":"2024","unstructured":"W. Lu, J. Zhang, J. Zhang, and Y. Chen. Large language model for table processing: A survey. arXiv preprint arXiv:2402.05121, 2024."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.14778\/3574245.3574258"},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM.2017.45"},{"key":"e_1_3_2_1_41_1","first-page":"36","article-title":"DIN-SQL: Decomposed in-context learning of text-to-SQL with self-correction","author":"Pourreza M.","year":"2023","unstructured":"M. Pourreza and D. Rafiei. DIN-SQL: Decomposed in-context learning of text-to-SQL with self-correction. NeurIPS, 36, 2023.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3470809"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.14778\/3415478.3415564"},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3470811"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1145\/3616855.3635752"},{"key":"e_1_3_2_1_46_1","volume-title":"TAP4LLM: Table provider on sampling, augmenting, and packing semi-structured data for large language model reasoning. arXiv preprint arXiv:2312.09039","author":"Sui Y.","year":"2023","unstructured":"Y. Sui, J. Zou, M. Zhou, X. He, L. Du, S. Han, and D. Zhang. TAP4LLM: Table provider on sampling, augmenting, and packing semi-structured data for large language model reasoning. arXiv preprint arXiv:2312.09039, 2023."},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.217"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.3301281"},{"key":"e_1_3_2_1_49_1","volume-title":"Large languagemodels as urban residents: An llm agent framework for personal mobility generation. arXiv preprint arXiv:2402.14744","author":"Wang J.","year":"2024","unstructured":"J.Wang, R. Jiang, C. Yang, Z.Wu, M. Onizuka, R. Shibasaki, and C. Xiao. Large languagemodels as urban residents: An llm agent framework for personal mobility generation. arXiv preprint arXiv:2402.14744, 2024."},{"key":"e_1_3_2_1_50_1","volume-title":"A survey on large language model based autonomous agents. arXiv preprint arXiv:2308.11432","author":"Wang L.","year":"2023","unstructured":"L.Wang, C. Ma, X. Feng, Z. Zhang, H. Yang, J. Zhang, Z. Chen, J. Tang, X. Chen, Y. Lin, et al. A survey on large language model based autonomous agents. arXiv preprint arXiv:2308.11432, 2023."},{"key":"e_1_3_2_1_51_1","volume-title":"Chain of thought prompting elicits reasoning in large language models. arXiv preprint arXiv:2201.11903","author":"Wei J.","year":"2022","unstructured":"J. Wei, X. Wang, D. Schuurmans, M. Bosma, E. Chi, Q. Le, and D. Zhou. Chain of thought prompting elicits reasoning in large language models. arXiv preprint arXiv:2201.11903, 2022."},{"key":"e_1_3_2_1_52_1","volume-title":"Smart agent-based modeling: On the use of large language models in computer simulations. arXiv preprint arXiv:2311.06330","author":"Wu Z.","year":"2023","unstructured":"Z. Wu, R. Peng, X. Han, S. Zheng, Y. Zhang, and C. Xiao. Smart agent-based modeling: On the use of large language models in computer simulations. arXiv preprint arXiv:2311.06330, 2023."},{"key":"e_1_3_2_1_53_1","volume-title":"Shall we talk: Exploring spontaneous collaborations of competing llm agents. arXiv preprint arXiv:2402.12327","author":"Wu Z.","year":"2024","unstructured":"Z. Wu, S. Zheng, Q. Liu, X. Han, B. I. Kwon, M. Onizuka, S. Tang, R. Peng, and C. Xiao. Shall we talk: Exploring spontaneous collaborations of competing llm agents. arXiv preprint arXiv:2402.12327, 2024."},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/2000824.2000825"},{"key":"e_1_3_2_1_55_1","volume-title":"UnifiedSKG: Unifying and multi-tasking structured knowledge grounding with text-to-text languagemodels. arXiv preprint arXiv:2201.05966","author":"Xie T.","year":"2022","unstructured":"T. Xie, C. H.Wu, P. Shi, R. Zhong, T. Scholak, M. Yasunaga, C.-S.Wu, M. Zhong, P. Yin, S. I.Wang, et al. UnifiedSKG: Unifying and multi-tasking structured knowledge grounding with text-to-text languagemodels. arXiv preprint arXiv:2201.05966, 2022."},{"key":"e_1_3_2_1_56_1","volume-title":"TaBERT: Pretraining for joint understanding of textual and tabular data. arXiv preprint arXiv:2005.08314","author":"Yin P.","year":"2020","unstructured":"P. Yin, G. Neubig,W.-t. Yih, and S. Riedel. TaBERT: Pretraining for joint understanding of textual and tabular data. arXiv preprint arXiv:2005.08314, 2020."},{"key":"e_1_3_2_1_57_1","volume-title":"et al. Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-SQL task. arXiv preprint arXiv:1809.08887","author":"Yu T.","year":"2018","unstructured":"T. Yu, R. Zhang, K. Yang, M. Yasunaga, D.Wang, Z. Li, J. Ma, I. Li, Q. Yao, S. Roman, et al. Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-SQL task. arXiv preprint arXiv:1809.08887, 2018."},{"key":"e_1_3_2_1_58_1","volume-title":"https:\/\/huggingface.co\/ NECOUDBFM\/Jellyfish","author":"Zhang H.","year":"2023","unstructured":"H. Zhang, Y. Dong, C. Xiao, and M. Oyamada. Jellyfish. https:\/\/huggingface.co\/ NECOUDBFM\/Jellyfish, 2023."},{"key":"e_1_3_2_1_59_1","volume-title":"Jellyfish: A large language model for data preprocessing. arXiv preprint arXiv:2312.01678","author":"Zhang H.","year":"2023","unstructured":"H. Zhang, Y. Dong, C. Xiao, and M. Oyamada. Jellyfish: A large language model for data preprocessing. arXiv preprint arXiv:2312.01678, 2023."},{"key":"e_1_3_2_1_60_1","volume-title":"Large language models as data preprocessors. arXiv preprint arXiv:2308.16361","author":"Zhang H.","year":"2023","unstructured":"H. Zhang, Y. Dong, C. Xiao, and M. Oyamada. Large language models as data preprocessors. arXiv preprint arXiv:2308.16361, 2023."},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1145\/3331184.3331385"},{"key":"e_1_3_2_1_62_1","volume-title":"Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792","author":"Zhang S.","year":"2023","unstructured":"S. Zhang, L. Dong, X. Li, S. Zhang, X. Sun, S.Wang, J. Li, R. Hu, T. Zhang, F.Wu, et al. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792, 2023."},{"key":"e_1_3_2_1_63_1","volume-title":"Tablellama: Towards open large generalist models for tables. arXiv preprint arXiv:2311.09206","author":"Zhang T.","year":"2023","unstructured":"T. Zhang, X. Yue, Y. Li, and H. Sun. Tablellama: Towards open large generalist models for tables. arXiv preprint arXiv:2311.09206, 2023."},{"key":"e_1_3_2_1_64_1","volume-title":"Reactable: Enhancing react for table question answering. arXiv preprint arXiv:2310.00815","author":"Zhang Y.","year":"2023","unstructured":"Y. Zhang, J. Henkel, A. Floratou, J. Cahoon, S. Deep, and J. M. Patel. Reactable: Enhancing react for table question answering. arXiv preprint arXiv:2310.00815, 2023."}],"event":{"name":"CIKM '24: The 33rd ACM International Conference on Information and Knowledge Management","location":"Boise ID USA","acronym":"CIKM '24","sponsor":["SIGIR ACM Special Interest Group on Information Retrieval"]},"container-title":["Proceedings of the 33rd ACM International Conference on Information and Knowledge Management"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3627673.3679100","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3627673.3679100","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:03:28Z","timestamp":1750291408000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3627673.3679100"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,21]]},"references-count":64,"alternative-id":["10.1145\/3627673.3679100","10.1145\/3627673"],"URL":"https:\/\/doi.org\/10.1145\/3627673.3679100","relation":{},"subject":[],"published":{"date-parts":[[2024,10,21]]},"assertion":[{"value":"2024-10-21","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}