{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T09:00:43Z","timestamp":1775638843127,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":10,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,6,9]],"date-time":"2024-06-09T00:00:00Z","timestamp":1717891200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"BMBF and State of Hesse","award":["NHR2021HE"],"award-info":[{"award-number":["NHR2021HE"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,6,9]]},"DOI":"10.1145\/3626246.3654732","type":"proceedings-article","created":{"date-parts":[[2024,5,23]],"date-time":"2024-05-23T10:26:39Z","timestamp":1716459999000},"page":"472-475","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":15,"title":["Demonstrating CAESURA: Language Models as Multi-Modal Query Planners"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7418-6181","authenticated-orcid":false,"given":"Matthias","family":"Urban","sequence":"first","affiliation":[{"name":"Technical University of Darmstadt, Darmstadt, Hesse, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2744-7836","authenticated-orcid":false,"given":"Carsten","family":"Binnig","sequence":"additional","affiliation":[{"name":"Technical University of Darmstadt &amp; DFKI, Darmstadt, Hesse, Germany"}]}],"member":"320","published-online":{"date-parts":[[2024,6,9]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Gemini: A Family of Highly Capable Multimodal Models. arxiv: 2312.11805 [cs]","author":"Google Gemini Team","year":"2023","unstructured":"Gemini Team at Google. 2023. Gemini: A Family of Highly Capable Multimodal Models. arxiv: 2312.11805 [cs]"},{"key":"e_1_3_2_2_2_1","volume-title":"Symphony: Towards Natural Language Query Answering over Multi-modal Data Lakes.","author":"Chen Zui","year":"2023","unstructured":"Zui Chen, Zihui Gu, Lei Cao, Ju Fan, Sam Madden, and Nan Tang. 2023. Symphony: Towards Natural Language Query Answering over Multi-modal Data Lakes. (2023)."},{"key":"e_1_3_2_2_3_1","unstructured":"Matthijs Douze Alexandr Guzhva Chengqi Deng Jeff Johnson Gergely Szilvasy Pierre-Emmanuel Mazar\u00e9 Maria Lomeli Lucas Hosseini and Herv\u00e9 J\u00e9gou. 2024. The Faiss library. (2024). arxiv: 2401.08281 [cs.LG]"},{"key":"e_1_3_2_2_4_1","volume-title":"Lin (Eds.)","volume":"33","author":"Lewis Patrick","year":"2020","unstructured":"Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\u00fcttler, Mike Lewis, Wen-tau Yih, Tim Rockt\"aschel, Sebastian Riedel, and Douwe Kiela. 2020. Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks. In Advances in Neural Information Processing Systems, H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (Eds.), Vol. 33. Curran Associates, Inc., 9459--9474."},{"key":"e_1_3_2_2_5_1","unstructured":"OpenAI. 2023. GPT-4 Technical Report. https:\/\/doi.org\/10.48550\/arXiv.2303.08774 arxiv: 2303.08774 [cs]"},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/3626246.3654732"},{"key":"e_1_3_2_2_7_1","volume-title":"Chi, Quoc Le, and Denny Zhou","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed Chi, Quoc Le, and Denny Zhou. 2022. Chain of Thought Prompting Elicits Reasoning in Large Language Models. arXiv:2201.11903 [cs] (April 2022). arxiv: 2201.11903 [cs]"},{"key":"e_1_3_2_2_8_1","volume-title":"Rush","author":"Wiseman Sam","year":"2017","unstructured":"Sam Wiseman, Stuart M. Shieber, and Alexander M. Rush. 2017. Challenges in Data-to-Document Generation. arXiv:1707.08052 [cs] (July 2017). arxiv: 1707.08052 [cs]"},{"key":"e_1_3_2_2_9_1","unstructured":"Chenfei Wu Shengming Yin Weizhen Qi Xiaodong Wang Zecheng Tang and Nan Duan. 2023. Visual ChatGPT : Talking Drawing and Editing with Visual Foundation Models. arxiv: 2303.04671 [cs]"},{"key":"e_1_3_2_2_10_1","unstructured":"Shunyu Yao Jeffrey Zhao Dian Yu Nan Du Izhak Shafran Karthik Narasimhan and Yuan Cao. 2023. ReAct: Synergizing Reasoning and Acting in Language Models. arxiv: 2210.03"}],"event":{"name":"SIGMOD\/PODS '24: International Conference on Management of Data","location":"Santiago AA Chile","acronym":"SIGMOD\/PODS '24","sponsor":["SIGMOD ACM Special Interest Group on Management of Data"]},"container-title":["Companion of the 2024 International Conference on Management of Data"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3626246.3654732","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3626246.3654732","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T11:29:11Z","timestamp":1755862151000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3626246.3654732"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,9]]},"references-count":10,"alternative-id":["10.1145\/3626246.3654732","10.1145\/3626246"],"URL":"https:\/\/doi.org\/10.1145\/3626246.3654732","relation":{},"subject":[],"published":{"date-parts":[[2024,6,9]]},"assertion":[{"value":"2024-06-09","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}