{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,22]],"date-time":"2025-12-22T20:57:53Z","timestamp":1766437073430,"version":"3.48.0"},"reference-count":47,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Learning Technol."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tlt.2025.3630117","type":"journal-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:49:55Z","timestamp":1763146195000},"page":"1074-1082","source":"Crossref","is-referenced-by-count":0,"title":["Benchmarking In-Context Learning Strategies of Large Language Models for Math Reasoning Tasks"],"prefix":"10.1109","volume":"18","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6031-3741","authenticated-orcid":false,"given":"Yao","family":"Rong","sequence":"first","affiliation":[{"name":"Technical University of Munich, Munich, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3380-4641","authenticated-orcid":false,"given":"Kathrin","family":"Se\u00dfler","sequence":"additional","affiliation":[{"name":"Technical University of Munich, Munich, Germany"}]},{"given":"Emek","family":"G\u00f6zl\u00fckl\u00fc","sequence":"additional","affiliation":[{"name":"Technical University of Munich, Munich, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3146-4484","authenticated-orcid":false,"given":"Enkelejda","family":"Kasneci","sequence":"additional","affiliation":[{"name":"Technical University of Munich, Munich, Germany"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","volume":"33","author":"Brown","year":"2020"},{"year":"2023","key":"ref2","article-title":"GPT-4 technical report"},{"article-title":"LLaMA: Open and efficient foundation language models","year":"2023","author":"Touvron","key":"ref3"},{"year":"2023","key":"ref4","article-title":"Gemini: A family of highly capable multimodal models"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00632"},{"article-title":"News summarization and evaluation in the era of GPT-3","year":"2022","author":"Goyal","key":"ref7"},{"article-title":"Is ChatGPT a good translator? Yes with GPT-4 as the engine","year":"2023","author":"Jiao","key":"ref8"},{"issue":"240","key":"ref9","first-page":"1","article-title":"Palm: Scaling language modeling with pathways","volume":"24","author":"Chowdhery","year":"2023","journal-title":"J. Mach. Learn. Res."},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.889"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.lindif.2023.102274"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.2318124121"},{"key":"ref13","article-title":"LEGO-prover: Neural theorem proving with growing libraries","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Wang","year":"2024"},{"article-title":"Challenges and applications of large language models","year":"2023","author":"Kaddour","key":"ref14"},{"key":"ref15","article-title":"Measuring mathematical problem solving with the math dataset","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Hendrycks","year":"2021"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-industry.4"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1257\/jel.20231736"},{"key":"ref18","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Wei","year":"2022"},{"key":"ref19","article-title":"Automatic chain of thought prompting in large language models","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Zhang","year":"2023"},{"key":"ref20","first-page":"22199","article-title":"Large language models are zero-shot reasoners","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Kojima","year":"2022"},{"key":"ref21","article-title":"Self-consistency improves chain of thought reasoning in language models","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Wang","year":"2023"},{"key":"ref22","article-title":"Complexity-based prompting for multi-step reasoning","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Fu","year":"2023"},{"key":"ref23","first-page":"10764","article-title":"PAL: Program-aided language models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Gao","year":"2023"},{"key":"ref24","article-title":"Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks","author":"Chen","year":"2023","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.294"},{"key":"ref26","article-title":"WizardMath: Empowering mathematical reasoning for large language models via reinforced evol-instruct","volume-title":"Proc. 13th Int. Conf. Learn. Represent.","author":"Luo","year":"2025"},{"key":"ref27","first-page":"27699","article-title":"Mathematical capabilities of ChatGPT","volume-title":"Proc. Int. Conf. Inf. Process. Syst.","author":"Frieder","year":"2023"},{"key":"ref28","article-title":"NaturalProofs: Mathematical theorem proving in natural language","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Welleck","year":"2021"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.817"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.eacl-srw.17"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.67"},{"article-title":"How well do large language models perform in arithmetic tasks?","year":"2023","author":"Yuan","key":"ref32"},{"article-title":"Reasoning with reinforced functional token tuning","year":"2025","author":"Zhang","key":"ref33"},{"key":"ref34","first-page":"15476","article-title":"Star: Bootstrapping reasoning with reasoning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Zelikman","year":"2022"},{"key":"ref35","article-title":"MetaMath: Bootstrap your own mathematical questions for large language models","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Yu","year":"2024"},{"key":"ref36","first-page":"47885","article-title":"MathScale: Scaling instruction tuning for mathematical reasoning","volume-title":"Proc. 41st Int. Conf. Mach. Learn.","author":"Tang","year":"2024"},{"article-title":"Improving large language model fine-tuning for solving math problems","year":"2023","author":"Liu","key":"ref37"},{"article-title":"Evaluating large language models trained on code","year":"2021","author":"Chen","key":"ref38"},{"article-title":"Lamda: Language models for dialog applications","year":"2022","author":"Thoppilan","key":"ref39"},{"key":"ref40","first-page":"3843","article-title":"Solving quantitative reasoning problems with language models","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Lewkowycz","year":"2022"},{"key":"ref41","article-title":"MathVista: Evaluating mathematical reasoning of foundation models in visual contexts","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Lu","year":"2024"},{"year":"2023","key":"ref42","article-title":"Gpt-4v(ision) system card"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-1015"},{"article-title":"Training verifiers to solve math word problems","year":"2021","author":"Cobbe","key":"ref44"},{"year":"2024","key":"ref45","article-title":"Claude 4.5 sonnet"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-naacl.149"},{"article-title":"Mistral 7B","year":"2023","author":"Jiang","key":"ref47"}],"container-title":["IEEE Transactions on Learning Technologies"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/4620076\/10810756\/11249497.pdf?arnumber=11249497","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,22]],"date-time":"2025-12-22T18:41:53Z","timestamp":1766428913000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11249497\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/tlt.2025.3630117","relation":{},"ISSN":["1939-1382","2372-0050"],"issn-type":[{"type":"electronic","value":"1939-1382"},{"type":"electronic","value":"2372-0050"}],"subject":[],"published":{"date-parts":[[2025]]}}}