{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,20]],"date-time":"2026-04-20T20:54:52Z","timestamp":1776718492503,"version":"3.51.2"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T00:00:00Z","timestamp":1732579200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T00:00:00Z","timestamp":1732579200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,11,26]]},"DOI":"10.1109\/fllm63129.2024.10852493","type":"proceedings-article","created":{"date-parts":[[2025,1,28]],"date-time":"2025-01-28T18:35:23Z","timestamp":1738089323000},"page":"476-483","source":"Crossref","is-referenced-by-count":14,"title":["The Benefits of a Concise Chain of Thought on Problem-Solving in Large Language Models"],"prefix":"10.1109","author":[{"given":"Matthew","family":"Renze","sequence":"first","affiliation":[{"name":"Johns Hopkins University,Baltimore,MD,USA"}]},{"given":"Erhan","family":"Guven","sequence":"additional","affiliation":[{"name":"Johns Hopkins University,Baltimore,MD,USA"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Augmented language models: a survey","author":"Mialon","year":"2023"},{"key":"ref2","article-title":"A prompt pattern catalog to enhance prompt engineering with chatgpt","author":"White","year":"2023"},{"key":"ref3","first-page":"22 199","article-title":"Large language models are zero-shot reasoners","volume-title":"Advances in Neural Information Processing Systems","volume":"35","author":"Kojima","year":"2022"},{"key":"ref4","article-title":"Chain-of-thought prompting elicits reasoning in large language models","author":"Wei","year":"2022"},{"key":"ref5","article-title":"Large language models are human-level prompt engineers","volume-title":"The Eleventh International Conference on Learning Representations","author":"Zhou"},{"key":"ref6","article-title":"Agent instructs large language models to be general zero-shot reasoners","author":"Crispino","year":"2023"},{"key":"ref7","article-title":"Numbers every llm developer should know","author":"Kadous","year":"2023"},{"key":"ref8","article-title":"Pricing","year":"2023"},{"key":"ref9","article-title":"Pricing","year":"2023"},{"key":"ref10","article-title":"Text and patterns: For effective chain of thought, it takes two to tango","author":"Madaan","year":"2022"},{"key":"ref11","article-title":"Text and patterns: For effective chain of thought it takes two to tango","author":"Madaan","year":"2022"},{"key":"ref12","article-title":"Think you have solved question answering? try arc, the ai2 reasoning challenge","author":"Clark","year":"2018"},{"key":"ref13","doi-asserted-by":"crossref","DOI":"10.18653\/v1\/P19-1472","article-title":"Hellaswag: Can a machine really finish your sentence?","volume-title":"Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics","author":"Zellers"},{"key":"ref14","first-page":"248","article-title":"Medmcqa: A large-scale multi-subject multi-choice dataset for medical domain question answering","volume-title":"Proceedings of the Conference on Health, Inference, and Learning","author":"Pal"},{"key":"ref15","article-title":"Agieval: A human-centric benchmark for evaluating foundation models","author":"Zhong","year":"2023"},{"key":"ref16","doi-asserted-by":"crossref","DOI":"10.24963\/ijcai.2020\/501","article-title":"Logiqa: A challenge dataset for machine reading comprehension with logical reasoning","volume-title":"International Joint Conference on Artificial Intelligence","author":"Liu"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/taslp.2022.3164218"},{"key":"ref18","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Advances in Neural Information Processing Systems","volume":"33","author":"Brown","year":"2020"},{"key":"ref19","article-title":"Introducing chatgpt","year":"2022"},{"key":"ref20","article-title":"Introducing chatgpt","volume-title":"Gpt-4 technical report","year":"2023"},{"key":"ref21","article-title":"Introducing chatgpt","volume-title":"Gpt-4","year":"2023"},{"key":"ref22","article-title":"Azure openai service","year":"2023"},{"key":"ref23","article-title":"Models","year":"2023"},{"key":"ref24","article-title":"Models","volume-title":"Openai - api reference","year":"2023"},{"key":"ref25","article-title":"Models","volume-title":"Tokens","year":"2023"},{"key":"ref26","first-page":"50","article-title":"On a test of whether one of two random variables is stochastically larger than the other","volume-title":"The Annals of Mathematical Statistics","volume":"18","author":"Mann","year":"1947"},{"key":"ref27","article-title":"Scipy v1.11.4 manual- scipy.stats.mannwhitneyu","year":"2023"}],"event":{"name":"2024 2nd International Conference on Foundation and Large Language Models (FLLM)","location":"Dubai, United Arab Emirates","start":{"date-parts":[[2024,11,26]]},"end":{"date-parts":[[2024,11,29]]}},"container-title":["2024 2nd International Conference on Foundation and Large Language Models (FLLM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10852419\/10852420\/10852493.pdf?arnumber=10852493","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,29]],"date-time":"2025-01-29T06:47:17Z","timestamp":1738133237000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10852493\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,26]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/fllm63129.2024.10852493","relation":{},"subject":[],"published":{"date-parts":[[2024,11,26]]}}}