{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T23:56:06Z","timestamp":1743033366127,"version":"3.40.3"},"publisher-location":"Cham","reference-count":41,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031808883"},{"type":"electronic","value":"9783031808890"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-80889-0_5","type":"book-chapter","created":{"date-parts":[[2025,1,24]],"date-time":"2025-01-24T08:43:42Z","timestamp":1737708222000},"page":"63-78","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["On the\u00a0Variations of\u00a0ChatGPT\u2019s Response Quality for\u00a0Generating Source Code Across Programming Languages"],"prefix":"10.1007","author":[{"given":"\u00c1ngela","family":"Gonz\u00e1lez de Diego","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0462-2283","authenticated-orcid":false,"given":"Franz","family":"Wotawa","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,25]]},"reference":[{"key":"5_CR1","unstructured":"TIOBE Index (2024). https:\/\/www.tiobe.com\/tiobe-index\/. Accessed 10 June 2024"},{"key":"5_CR2","doi-asserted-by":"publisher","unstructured":"Ahmad, W., Chakraborty, S., Ray, B., Chang, K.W.: A transformer-based approach for source code summarization. In: Jurafsky, D., Chai, J., Schluter, N., Tetreault, J. (eds.) Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 4998\u20135007. Association for Computational Linguistics (2020). https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.449, https:\/\/aclanthology.org\/2020.acl-main.449","DOI":"10.18653\/v1\/2020.acl-main.449"},{"key":"5_CR3","unstructured":"Amazon Web Services: AWS CodeWhisperer. https:\/\/aws.amazon.com\/es\/codewhisperer\/. Accessed 04 Apr 2023"},{"key":"5_CR4","unstructured":"Austin, J., et al.: Program synthesis with large language models. arXiv:2108.07732 [cs] (2021)"},{"key":"5_CR5","doi-asserted-by":"publisher","unstructured":"Belgacem, A., Bradai, A., Beghdad\u00a0Bey, K.: ChatGPT backend: a comprehensive analysis. In: ChatGPT Backend: A Comprehensive Analysis, pp.\u00a01\u20136 (2023). https:\/\/doi.org\/10.1109\/ISNCC58260.2023.10323792","DOI":"10.1109\/ISNCC58260.2023.10323792"},{"key":"5_CR6","doi-asserted-by":"publisher","unstructured":"Bender, E.M., Gebru, T., McMillan-Major, A., Shmitchell, S.: On the dangers of stochastic parrots: can language models be too big? In: Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, FAccT 2021, pp. 610\u2013623. Association for Computing Machinery, New York (2021). https:\/\/doi.org\/10.1145\/3442188.3445922","DOI":"10.1145\/3442188.3445922"},{"key":"5_CR7","doi-asserted-by":"publisher","unstructured":"Cassano, F., et al.: MultiPL-E: a scalable and extensible approach to benchmarking neural code generation (2022). https:\/\/doi.org\/10.48550\/ARXIV.2208.08227, https:\/\/arxiv.org\/abs\/2208.08227","DOI":"10.48550\/ARXIV.2208.08227"},{"key":"5_CR8","doi-asserted-by":"publisher","unstructured":"Chen, J., et al.: Divide-and-conquer meets consensus: unleashing the power of functions in code generation (2024). https:\/\/doi.org\/10.48550\/ARXIV.2405.20092, https:\/\/arxiv.org\/abs\/2405.20092. Version Number: 1","DOI":"10.48550\/ARXIV.2405.20092"},{"key":"5_CR9","doi-asserted-by":"publisher","unstructured":"Chen, M., et al.: Evaluating large language models trained on code (2021). https:\/\/doi.org\/10.48550\/ARXIV.2107.03374, https:\/\/arxiv.org\/abs\/2107.03374","DOI":"10.48550\/ARXIV.2107.03374"},{"key":"5_CR10","doi-asserted-by":"publisher","unstructured":"Clark, L., et al.: What makes a good conversation?: challenges in designing truly conversational agents. In: Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems, pp. 1\u201312. ACM, Glasgow (2019). https:\/\/doi.org\/10.1145\/3290605.3300705, https:\/\/dl.acm.org\/doi\/10.1145\/3290605.3300705","DOI":"10.1145\/3290605.3300705"},{"key":"5_CR11","unstructured":"Claude AI: Claude AI. https:\/\/claude.ai\/login. Accessed 04 Apr 2023"},{"key":"5_CR12","unstructured":"de\u00a0Diego, \u00c1.G.: Design and analysis of ChatGPT\u2019s response quality across programming languages. Master\u2019s thesis, Graz University of Technology, Universidad Politecnica de Madrid Escuela Tecnica Superior de Ingenieros de Telecomunicacion (2024)"},{"key":"5_CR13","doi-asserted-by":"publisher","unstructured":"Dong, Y., Ding, J., Jiang, X., Li, G., Li, Z., Jin, Z.: CodeScore: evaluating code generation by learning code execution (2023). https:\/\/doi.org\/10.48550\/ARXIV.2301.09043, https:\/\/arxiv.org\/abs\/2301.09043","DOI":"10.48550\/ARXIV.2301.09043"},{"key":"5_CR14","doi-asserted-by":"publisher","unstructured":"Feng, Z., et al.: CodeBERT: a pre-trained model for programming and natural languages. In: Cohn, T., He, Y., Liu, Y. (eds.) Findings of the Association for Computational Linguistics: EMNLP 2020, pp. 1536\u20131547. Association for Computational Linguistics (2020). https:\/\/doi.org\/10.18653\/v1\/2020.findings-emnlp.139, https:\/\/aclanthology.org\/2020.findings-emnlp.139","DOI":"10.18653\/v1\/2020.findings-emnlp.139"},{"key":"5_CR15","unstructured":"GitHub: GitHub Copilot: Your AI Pair Programmer (2024). https:\/\/github.com\/features\/copilot. Accessed 04 Apr 2024"},{"key":"5_CR16","unstructured":"Google: Gemini. https:\/\/gemini.google.com\/. Accessed 04 Apr 2024"},{"key":"5_CR17","unstructured":"Innovation Graph: Global metrics on programming languages (2023). https:\/\/innovationgraph.github.com\/global-metrics\/programming-languages"},{"key":"5_CR18","doi-asserted-by":"publisher","unstructured":"Khan, M.A.M., Bari, M.S., Do, X.L., Wang, W., Parvez, M.R., Joty, S.: xCodeEval: a large scale multilingual multitask benchmark for code understanding, generation, translation and retrieval (2023). https:\/\/doi.org\/10.48550\/ARXIV.2303.03004, https:\/\/arxiv.org\/abs\/2303.03004. Version Number: 4","DOI":"10.48550\/ARXIV.2303.03004"},{"key":"5_CR19","doi-asserted-by":"publisher","unstructured":"Kulal, S., et al.: SPoC: search-based pseudocode to code (2019). https:\/\/doi.org\/10.48550\/ARXIV.1906.04908, https:\/\/arxiv.org\/abs\/1906.04908. Version Number: 1","DOI":"10.48550\/ARXIV.1906.04908"},{"key":"5_CR20","unstructured":"Lambert, K.A., Osborne, M.: Fundamentals of Python: From First Programs Through Data Structures, International edn. Course Technology, Cengage Learning, Boston (2010)"},{"key":"5_CR21","unstructured":"Lerner, R.: Python Workout: 50 Ten-Minute Exercises. Manning Publications Co., Shelter Island (2020). oCLC: on1121083840"},{"key":"5_CR22","doi-asserted-by":"publisher","unstructured":"Li, Y., et al.: Competition-level code generation with AlphaCode. Science 378(6624), 1092\u20131097 (2022). https:\/\/doi.org\/10.1126\/science.abq1158, arXiv:2203.07814 [cs]","DOI":"10.1126\/science.abq1158"},{"key":"5_CR23","unstructured":"Madnight: GitHub Pull Requests Statistics 2024 (2024). https:\/\/madnight.github.io\/githut\/#\/pull_requests\/2024\/1. Accessed 10 June 2024"},{"issue":"3","key":"5_CR24","doi-asserted-by":"publisher","first-page":"151","DOI":"10.1145\/362566.362568","volume":"14","author":"Z Manna","year":"1971","unstructured":"Manna, Z., Waldinger, R.J.: Toward automatic program synthesis. Commun. ACM 14(3), 151\u2013165 (1971). https:\/\/doi.org\/10.1145\/362566.362568","journal-title":"Commun. ACM"},{"key":"5_CR25","unstructured":"Microsoft: Microsoft Copilot. https:\/\/copilot.microsoft.com\/. Accessed 04 Apr 2023"},{"key":"5_CR26","unstructured":"Microsoft: CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation (2023). https:\/\/github.com\/microsoft\/CodeXGLUE\/tree\/main. Accessed 04 Apr 2023"},{"key":"5_CR27","unstructured":"Murdza, J.: Humaneval results (2023). https:\/\/github.com\/jamesmurdza\/humaneval-results\/tree\/main"},{"key":"5_CR28","doi-asserted-by":"publisher","unstructured":"Ouyang, L., et al.: Training language models to follow instructions with human feedback (2022). https:\/\/doi.org\/10.48550\/ARXIV.2203.02155, https:\/\/arxiv.org\/abs\/2203.02155. arXiv Version Number: 1","DOI":"10.48550\/ARXIV.2203.02155"},{"key":"5_CR29","doi-asserted-by":"publisher","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting on Association for Computational Linguistics - ACL 2002, p.\u00a0311. Association for Computational Linguistics, Philadelphia (2001). https:\/\/doi.org\/10.3115\/1073083.1073135, http:\/\/portal.acm.org\/citation.cfm?doid=1073083.1073135","DOI":"10.3115\/1073083.1073135"},{"key":"5_CR30","doi-asserted-by":"publisher","unstructured":"Parvez, M.R., Ahmad, W.U., Chakraborty, S., Ray, B., Chang, K.W.: Retrieval augmented code generation and summarization (2021). https:\/\/doi.org\/10.48550\/ARXIV.2108.11601, https:\/\/arxiv.org\/abs\/2108.11601","DOI":"10.48550\/ARXIV.2108.11601"},{"key":"5_CR31","doi-asserted-by":"publisher","unstructured":"Ren, S., et al.: CodeBLEU: a method for automatic evaluation of code synthesis (2020). https:\/\/doi.org\/10.48550\/ARXIV.2009.10297, https:\/\/arxiv.org\/abs\/2009.10297","DOI":"10.48550\/ARXIV.2009.10297"},{"key":"5_CR32","doi-asserted-by":"publisher","unstructured":"Stephenson, B.: The Python Workbook: A Brief Introduction with Exercises and Solutions, 1st edn., 2014 edn. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-14240-1","DOI":"10.1007\/978-3-319-14240-1"},{"key":"5_CR33","unstructured":"Sweigart, A.: python programming exercises, gently explained, 2nd printing edn. (2022). https:\/\/www.amazon.com\/Python-Programming-Exercises-Gently-Explained-ebook\/dp\/B0BGYJ7G6T. Licensed under Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"},{"key":"5_CR34","unstructured":"Tabnine: Tabnine: Your AI Coding Assistant. https:\/\/www.tabnine.com\/ai-coding-assistant. Accessed 04 Apr 2023"},{"key":"5_CR35","doi-asserted-by":"publisher","unstructured":"Teubner, T., Flath, C.M., Weinhardt, C., Van Der\u00a0Aalst, W., Hinz, O.: Welcome to the era of ChatGPT et al.: the prospects of large language models. Bus. Inf. Syst. Eng. 65(2), 95\u2013101 (2023). https:\/\/doi.org\/10.1007\/s12599-023-00795-x","DOI":"10.1007\/s12599-023-00795-x"},{"key":"5_CR36","doi-asserted-by":"publisher","unstructured":"Vaswani, A., et al.: Attention is all you need (2017). https:\/\/doi.org\/10.48550\/ARXIV.1706.03762, https:\/\/arxiv.org\/abs\/1706.03762. arXiv Version Number: 7","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"5_CR37","doi-asserted-by":"publisher","unstructured":"White, J., et al.: A prompt pattern catalog to enhance prompt engineering with ChatGPT (2023). https:\/\/doi.org\/10.48550\/ARXIV.2302.11382, https:\/\/arxiv.org\/abs\/2302.11382","DOI":"10.48550\/ARXIV.2302.11382"},{"key":"5_CR38","doi-asserted-by":"publisher","unstructured":"White, J., Hays, S., Fu, Q., Spencer-Smith, J., Schmidt, D.C.: ChatGPT prompt patterns for improving code quality, refactoring, requirements elicitation, and software design (2023). https:\/\/doi.org\/10.48550\/ARXIV.2303.07839, https:\/\/arxiv.org\/abs\/2303.07839","DOI":"10.48550\/ARXIV.2303.07839"},{"key":"5_CR39","doi-asserted-by":"crossref","unstructured":"Yadav, A., Singh, M.: PythonSaga: redefining the benchmark to evaluate code generating LLM. arXiv:2401.03855 [cs] (2024)","DOI":"10.18653\/v1\/2024.findings-emnlp.996"},{"key":"5_CR40","doi-asserted-by":"publisher","unstructured":"Yan, W., Tian, Y., Li, Y., Chen, Q., Wang, W.: CodeTransOcean: a comprehensive multilingual benchmark for code translation (2023). https:\/\/doi.org\/10.48550\/ARXIV.2310.04951, https:\/\/arxiv.org\/abs\/2310.04951","DOI":"10.48550\/ARXIV.2310.04951"},{"key":"5_CR41","doi-asserted-by":"publisher","unstructured":"Zhu, M., Jain, A., Suresh, K., Ravindran, R., Tipirneni, S., Reddy, C.K.: XLCoST: a benchmark dataset for cross-lingual code intelligence (2022). https:\/\/doi.org\/10.48550\/ARXIV.2206.08474, https:\/\/arxiv.org\/abs\/2206.08474","DOI":"10.48550\/ARXIV.2206.08474"}],"container-title":["Lecture Notes in Computer Science","Testing Software and Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-80889-0_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,24]],"date-time":"2025-01-24T08:43:59Z","timestamp":1737708239000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-80889-0_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031808883","9783031808890"],"references-count":41,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-80889-0_5","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"25 January 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare relevant to this article\u2019s content.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"ICTSS","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"IFIP International Conference on Testing Software and Systems","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"London","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31 October 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 November 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"36","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"pts2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conf.researchr.org\/home\/ictss-2024","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}