{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,30]],"date-time":"2026-01-30T02:42:19Z","timestamp":1769740939061,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":73,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T00:00:00Z","timestamp":1701302400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,11,30]]},"DOI":"10.1145\/3611643.3616244","type":"proceedings-article","created":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T23:14:38Z","timestamp":1701386078000},"page":"1470-1482","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":9,"title":["On the Usage of Continual Learning for Out-of-Distribution Generalization in Pre-trained Language Models of Code"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5987-850X","authenticated-orcid":false,"given":"Martin","family":"Weyssow","sequence":"first","affiliation":[{"name":"Universit\u00e9 de Montr\u00e9al, Montreal, Canada"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4558-0622","authenticated-orcid":false,"given":"Xin","family":"Zhou","sequence":"additional","affiliation":[{"name":"Singapore Management University, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4462-6916","authenticated-orcid":false,"given":"Kisub","family":"Kim","sequence":"additional","affiliation":[{"name":"Singapore Management University, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4367-7201","authenticated-orcid":false,"given":"David","family":"Lo","sequence":"additional","affiliation":[{"name":"Singapore Management University, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6304-9926","authenticated-orcid":false,"given":"Houari","family":"Sahraoui","sequence":"additional","affiliation":[{"name":"Universit\u00e9 de Montr\u00e9al, Montreal, Canada"}]}],"member":"320","published-online":{"date-parts":[[2023,11,30]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01219-9_9"},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/3359591.3359735"},{"key":"e_1_3_2_2_4_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE-SEIP52600.2021.00022"},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/MS.2021.3070743"},{"key":"e_1_3_2_2_6_1","unstructured":"Chaitanya Baweja Ben Glocker and Konstantinos Kamnitsas. 2018. Towards continual learning in medical imaging. arXiv preprint arXiv:1811.02496."},{"key":"e_1_3_2_2_7_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_8_1","volume-title":"Advances in Neural Information Processing Systems","author":"Brown Tom","year":"1877","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, and Amanda Askell. 2020. Language Models are Few-Shot Learners. In Advances in Neural Information Processing Systems, H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (Eds.). 33, Curran Associates, Inc., 1877\u20131901. https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2020\/file\/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf"},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01252-6_33"},{"key":"e_1_3_2_2_11_1","volume-title":"Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, and Greg Brockman.","author":"Chen Mark","year":"2021","unstructured":"Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, and Greg Brockman. 2021. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374."},{"key":"e_1_3_2_2_12_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.325"},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/MSR52588.2021.00024"},{"key":"e_1_3_2_2_14_1","volume-title":"XNLI: Evaluating cross-lingual sentence representations. arXiv preprint arXiv:1809.05053.","author":"Conneau Alexis","year":"2018","unstructured":"Alexis Conneau, Guillaume Lample, Ruty Rinott, Adina Williams, Samuel R Bowman, Holger Schwenk, and Veselin Stoyanov. 2018. XNLI: Evaluating cross-lingual sentence representations. arXiv preprint arXiv:1809.05053."},{"key":"e_1_3_2_2_15_1","volume-title":"Continual learning: A comparative study on how to defy forgetting in classification tasks. arXiv preprint arXiv:1909.08383, 2, 6","author":"Lange Matthias De","year":"2019","unstructured":"Matthias De Lange, Rahaf Aljundi, Marc Masana, Sarah Parisot, Xu Jia, Ales Leonardis, Gregory Slabaugh, and Tinne Tuytelaars. 2019. Continual learning: A comparative study on how to defy forgetting in classification tasks. arXiv preprint arXiv:1909.08383, 2, 6 (2019), 2."},{"key":"e_1_3_2_2_16_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805."},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00907"},{"key":"e_1_3_2_2_18_1","volume-title":"Advances in Neural Information Processing Systems, S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh (Eds.). 35, Curran Associates","author":"Ermis Beyza","year":"2022","unstructured":"Beyza Ermis, Giovanni Zappella, Martin Wistuba, Aditya Rawal, and Cedric Archambeau. 2022. Memory Efficient Continual Learning with Transformers. In Advances in Neural Information Processing Systems, S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh (Eds.). 35, Curran Associates, Inc., 10629\u201310642. https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2022\/file\/4522de4178bddb36b49aa26efad537cf-Paper-Conference.pdf"},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","unstructured":"Weyssow et al.. 2023. On the Usage of Continual Learning for Out-of-Distribution Generalization in Pre-trained Language Models of Code: Replication Package. https:\/\/doi.org\/10.5281\/zenodo.8272703 10.5281\/zenodo.8272703","DOI":"10.5281\/zenodo.8272703"},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_21_1","volume-title":"Catastrophic forgetting in connectionist networks. Trends in cognitive sciences, 3, 4","author":"French Robert M","year":"1999","unstructured":"Robert M French. 1999. Catastrophic forgetting in connectionist networks. Trends in cognitive sciences, 3, 4 (1999), 128\u2013135."},{"key":"e_1_3_2_2_22_1","doi-asserted-by":"publisher","unstructured":"Shuzheng Gao Hongyu Zhang Cuiyun Gao and Chaozheng Wang. 2023. Keeping Pace with Ever-Increasing Data: Towards Continual Learning of Code Intelligence Models. arXiv preprint arXiv:2302.03482 https:\/\/doi.org\/10.48550\/arXiv.2302.03482 10.48550\/arXiv.2302.03482","DOI":"10.48550\/arXiv.2302.03482"},{"key":"e_1_3_2_2_23_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_24_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_25_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.tics.2020.09.004"},{"key":"e_1_3_2_2_26_1","unstructured":"Hossein Hajipour Ning Yu Cristian-Alexandru Staicu and Mario Fritz. 2022. SimSCOOD: Systematic Analysis of Out-of-Distribution Behavior of Source Code Models. arXiv preprint arXiv:2210.04802."},{"key":"e_1_3_2_2_27_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE.2019.00101"},{"key":"e_1_3_2_2_28_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/3377811.3380361"},{"key":"e_1_3_2_2_30_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_31_1","unstructured":"Qiang Hu Yuejun Guo Xiaofei Xie Maxime Cordy Lei Ma Mike Papadakis and Yves Le Traon. 2022. CodeS: A Distribution Shift Benchmark Dataset for Source Code Learning. arXiv preprint arXiv:2206.05480."},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.5555\/3304889.3304975"},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00426"},{"key":"e_1_3_2_2_34_1","volume-title":"Dynabench: Rethinking benchmarking in NLP. arXiv preprint arXiv:2104.14337.","author":"Kiela Douwe","year":"2021","unstructured":"Douwe Kiela, Max Bartolo, Yixin Nie, Divyansh Kaushik, Atticus Geiger, Zhengxuan Wu, Bertie Vidgen, Grusha Prasad, Amanpreet Singh, and Pratik Ringshia. 2021. Dynabench: Rethinking benchmarking in NLP. arXiv preprint arXiv:2104.14337."},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1611835114"},{"key":"e_1_3_2_2_36_1","volume-title":"International Conference on Machine Learning. 5637\u20135664","author":"Koh Pang Wei","year":"2021","unstructured":"Pang Wei Koh, Shiori Sagawa, Henrik Marklund, Sang Michael Xie, Marvin Zhang, Akshay Balsubramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, and Irena Gao. 2021. Wilds: A benchmark of in-the-wild distribution shifts. In International Conference on Machine Learning. 5637\u20135664."},{"key":"e_1_3_2_2_37_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2773081"},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1907.11692"},{"key":"e_1_3_2_2_39_1","volume-title":"Proceedings of the 1st Annual Conference on Robot Learning, Sergey Levine, Vincent Vanhoucke, and Ken Goldberg (Eds.) (Proceedings of Machine Learning Research","volume":"26","author":"Lomonaco Vincenzo","year":"2017","unstructured":"Vincenzo Lomonaco and Davide Maltoni. 2017. CORe50: a New Dataset and Benchmark for Continuous Object Recognition. In Proceedings of the 1st Annual Conference on Robot Learning, Sergey Levine, Vincent Vanhoucke, and Ken Goldberg (Eds.) (Proceedings of Machine Learning Research, Vol. 78). PMLR, 17\u201326. https:\/\/proceedings.mlr.press\/v78\/lomonaco17a.html"},{"key":"e_1_3_2_2_40_1","volume-title":"Marc Masana, Jary Pomponi, Gido van de Ven, Martin Mundt, Qi She, Keiland Cooper, Jeremy Forest, Eden Belouadah, Simone Calderara, German I.","author":"Lomonaco Vincenzo","year":"2021","unstructured":"Vincenzo Lomonaco, Lorenzo Pellegrini, Andrea Cossu, Antonio Carta, Gabriele Graffieti, Tyler L. Hayes, Matthias De Lange, Marc Masana, Jary Pomponi, Gido van de Ven, Martin Mundt, Qi She, Keiland Cooper, Jeremy Forest, Eden Belouadah, Simone Calderara, German I. Parisi, Fabio Cuzzolin, Andreas Tolias, Simone Scardapane, Luca Antiga, Subutai Amhad, Adrian Popescu, Christopher Kanan, Joost van de Weijer, Tinne Tuytelaars, Davide Bacciu, and Davide Maltoni. 2021. Avalanche: an End-to-End Library for Continual Learning. In Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (2nd Continual Learning in Computer Vision Workshop)."},{"key":"e_1_3_2_2_41_1","first-page":"2346","article-title":"Learning under concept drift: A review","volume":"31","author":"Lu Jie","year":"2018","unstructured":"Jie Lu, Anjin Liu, Fan Dong, Feng Gu, Joao Gama, and Guangquan Zhang. 2018. Learning under concept drift: A review. IEEE Transactions on Knowledge and Data Engineering, 31, 12 (2018), 2346\u20132363.","journal-title":"IEEE Transactions on Knowledge and Data Engineering"},{"key":"e_1_3_2_2_42_1","volume-title":"CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1).","author":"Lu Shuai","year":"2021","unstructured":"Shuai Lu, Daya Guo, Shuo Ren, Junjie Huang, Alexey Svyatkovskiy, Ambrosio Blanco, Colin Clement, Dawn Drain, Daxin Jiang, and Duyu Tang. 2021. CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1)."},{"key":"e_1_3_2_2_43_1","doi-asserted-by":"publisher","DOI":"10.1109\/ASE.2015.42"},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"publisher","DOI":"10.1016\/S0079-7421(08)60536-8"},{"key":"e_1_3_2_2_45_1","doi-asserted-by":"publisher","DOI":"10.1109\/ASE.2019.00099"},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"publisher","DOI":"10.1145\/1595696.1595767"},{"key":"e_1_3_2_2_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/1806799.1806832"},{"key":"e_1_3_2_2_48_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2019.01.012"},{"key":"e_1_3_2_2_49_1","doi-asserted-by":"publisher","DOI":"10.1145\/2970276.2970330"},{"key":"e_1_3_2_2_50_1","volume-title":"Language models are unsupervised multitask learners. OpenAI blog, 1, 8","author":"Radford Alec","year":"2019","unstructured":"Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1, 8 (2019), 9."},{"key":"e_1_3_2_2_51_1","unstructured":"Shuo Ren Daya Guo Shuai Lu Long Zhou Shujie Liu Duyu Tang Neel Sundaresan Ming Zhou Ambrosio Blanco and Shuai Ma. 2020. Codebleu: a method for automatic evaluation of code synthesis. arXiv preprint arXiv:2009.10297."},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_53_1","unstructured":"Andrei A Rusu Neil C Rabinowitz Guillaume Desjardins Hubert Soyer James Kirkpatrick Koray Kavukcuoglu Razvan Pascanu and Raia Hadsell. 2016. Progressive neural networks. arXiv preprint arXiv:1606.04671."},{"key":"e_1_3_2_2_54_1","unstructured":"Zheyan Shen Jiashuo Liu Yue He Xingxuan Zhang Renzhe Xu Han Yu and Peng Cui. 2021. Towards out-of-distribution generalization: A survey. arXiv preprint arXiv:2108.13624."},{"key":"e_1_3_2_2_55_1","volume-title":"ICML 2022: Workshop on Spurious Correlations, Invariance and Stability. https:\/\/openreview.net\/forum?id=zKDcZBVVEWm","author":"Shi Yuge","year":"2022","unstructured":"Yuge Shi, Imant Daunhawer, Julia E Vogt, Philip Torr, and Amartya Sanyal. 2022. How robust are pre-trained models to distribution shift? In ICML 2022: Workshop on Spurious Correlations, Invariance and Stability. https:\/\/openreview.net\/forum?id=zKDcZBVVEWm"},{"key":"e_1_3_2_2_56_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01220"},{"key":"e_1_3_2_2_57_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_58_1","volume-title":"\u0141 ukasz Kaiser, and Illia Polosukhin","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is All you Need. In Advances in Neural Information Processing Systems, I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (Eds.). 30, Curran Associates, Inc.."},{"key":"e_1_3_2_2_59_1","unstructured":"Max Vladymyrov Andrey Zhmoginov and Mark Sandler. 2023. Continual Few-Shot Learning Using HyperTransformers. arXiv preprint arXiv:2301.04584."},{"key":"e_1_3_2_2_60_1","volume-title":"Proceedings of the 30th International Conference on International Conference on Machine Learning -","volume":"28","author":"Wan Li","year":"2013","unstructured":"Li Wan, Matthew Zeiler, Sixin Zhang, Yann LeCun, and Rob Fergus. 2013. Regularization of Neural Networks Using Dropconnect. In Proceedings of the 30th International Conference on International Conference on Machine Learning - Volume 28 (ICML\u201913). JMLR.org, III\u20131058\u2013III\u20131066."},{"key":"e_1_3_2_2_61_1","volume-title":"GLUE: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461.","author":"Wang Alex","year":"2018","unstructured":"Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2018. GLUE: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461."},{"key":"e_1_3_2_2_62_1","doi-asserted-by":"publisher","DOI":"10.1145\/3540250.3549113"},{"key":"e_1_3_2_2_63_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_64_1","doi-asserted-by":"publisher","DOI":"10.1145\/3485275"},{"key":"e_1_3_2_2_65_1","unstructured":"Martin Weyssow Xin Zhou Kisub Kim David Lo and Houari Sahraoui. 2023. Exploring Parameter-Efficient Fine-Tuning Techniques for Code Generation with Large Language Models. arxiv:2308.10462."},{"key":"e_1_3_2_2_66_1","doi-asserted-by":"publisher","DOI":"10.1023\/A:1018046501280"},{"key":"e_1_3_2_2_67_1","doi-asserted-by":"crossref","unstructured":"Adina Williams Nikita Nangia and Samuel R Bowman. 2017. A broad-coverage challenge corpus for sentence understanding through inference. arXiv preprint arXiv:1704.05426.","DOI":"10.18653\/v1\/N18-1101"},{"key":"e_1_3_2_2_68_1","doi-asserted-by":"crossref","unstructured":"Thomas Wolf Lysandre Debut Victor Sanh Julien Chaumond Clement Delangue Anthony Moi Pierric Cistac Tim Rault R\u00e9mi Louf and Morgan Funtowicz. 2019. Huggingface\u2019s transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771.","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"e_1_3_2_2_69_1","doi-asserted-by":"publisher","DOI":"10.1145\/3520312.3534862"},{"key":"e_1_3_2_2_70_1","doi-asserted-by":"crossref","unstructured":"Linyi Yang Shuibai Zhang Libo Qin Yafu Li Yidong Wang Hanmeng Liu Jindong Wang Xing Xie and Yue Zhang. 2022. GLUE-X: Evaluating Natural Language Understanding Models from an Out-of-distribution Generalization Perspective. arXiv preprint arXiv:2211.08073.","DOI":"10.18653\/v1\/2023.findings-acl.806"},{"key":"e_1_3_2_2_71_1","doi-asserted-by":"publisher","DOI":"10.1145\/3533767.3534390"},{"key":"e_1_3_2_2_72_1","volume-title":"International Conference on Machine Learning. 3987\u20133995","author":"Zenke Friedemann","year":"2017","unstructured":"Friedemann Zenke, Ben Poole, and Surya Ganguli. 2017. Continual learning through synaptic intelligence. In International Conference on Machine Learning. 3987\u20133995."},{"key":"e_1_3_2_2_73_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSME52107.2021.00044"}],"event":{"name":"ESEC\/FSE '23: 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering","location":"San Francisco CA USA","acronym":"ESEC\/FSE '23","sponsor":["SIGSOFT ACM Special Interest Group on Software Engineering"]},"container-title":["Proceedings of the 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3611643.3616244","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3611643.3616244","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T16:36:03Z","timestamp":1750178163000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3611643.3616244"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,30]]},"references-count":73,"alternative-id":["10.1145\/3611643.3616244","10.1145\/3611643"],"URL":"https:\/\/doi.org\/10.1145\/3611643.3616244","relation":{},"subject":[],"published":{"date-parts":[[2023,11,30]]},"assertion":[{"value":"2023-11-30","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}