{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T08:38:33Z","timestamp":1771922313816,"version":"3.50.1"},"reference-count":46,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iccvw69036.2025.00719","type":"proceedings-article","created":{"date-parts":[[2026,2,23]],"date-time":"2026-02-23T20:44:02Z","timestamp":1771879442000},"page":"6960-6969","source":"Crossref","is-referenced-by-count":0,"title":["Few-Shot Vision-Language Reasoning for Satellite Imagery via Verifiable Rewards"],"prefix":"10.1109","author":[{"given":"Aybora","family":"K\u00f6ksal","sequence":"first","affiliation":[{"name":"Middle East Technical University (METU),Center for Image Analysis (OGAM),Dept. of Electrical and Electronics Engineering,Ankara,Turkey"}]},{"given":"A. Aydin","family":"Alatan","sequence":"additional","affiliation":[{"name":"Middle East Technical University (METU),Center for Image Analysis (OGAM),Dept. of Electrical and Electronics Engineering,Ankara,Turkey"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023","journal-title":"arXiv preprint"},{"key":"ref2","volume-title":"Deep Agent. Open-rlv","year":"2025"},{"key":"ref3","author":"Bai","year":"2023","journal-title":"Qwen technical report"},{"key":"ref4","article-title":"Qwen-vl: A frontier large vision-language model with versatile abilities","author":"Bai","year":"2023","journal-title":"arXiv preprint"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.3390\/rs16091477"},{"key":"ref6","article-title":"Paligemma: A versatile 3b vlm for trans-fer","author":"Beyer","year":"2024","journal-title":"arXiv preprint"},{"key":"ref7","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural in-formation processing systems"},{"key":"ref8","article-title":"Fine-tuned\u2019small\u2019llms (still) significantly outperform zero-shot generative ai models in text classification","author":"Juan Jos\u00e9 Bucher","year":"2024","journal-title":"arXiv preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02283"},{"key":"ref10","first-page":"2252","article-title":"Patch n\u2019 pack: Navit, a vision transformer for any aspect ratio and resolution","volume":"36","author":"Dehghani","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref11","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv preprint"},{"key":"ref12","article-title":"An image is worth 16\u00d716 words: Trans-formers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv preprint"},{"key":"ref13","article-title":"Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars","author":"Gandhi","year":"2025","journal-title":"arXiv preprint"},{"key":"ref14","article-title":"Deepseek-rl: Incentivizing reasoning capability in llms via reinforcement learning","author":"Guo","year":"2025","journal-title":"arXiv preprint"},{"key":"ref15","article-title":"Lora: Low-rank adaptation of large language models","author":"Hu","year":"2022","journal-title":"arXiv preprint"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.isprsjprs.2025.03.028"},{"key":"ref17","volume-title":"Huggingface. Open-rl","year":"2025"},{"key":"ref18","article-title":"Gpt-40 system card","author":"Hurst","year":"2024","journal-title":"arXiv preprint"},{"key":"ref19","article-title":"Milchat: Introducing chain of thought reasoning and grpo to a multimodal small language model for remote sensing","author":"Koksal","year":"2025","journal-title":"arXiv preprint"},{"key":"ref20","article-title":"Tinyrs-rl: Com-pact multimodal language model for remote sensing","author":"Koksal","year":"2025","journal-title":"arXiv preprint"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02629"},{"key":"ref22","article-title":"Lhrs-bot-nova: Improved multimodal large language model for re-mote sensing vision-language interpretation","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"key":"ref23","author":"Liu","year":"2023","journal-title":"Visual instruction tuning"},{"key":"ref24","article-title":"Understanding rl-zero-like training: A critical perspective","author":"Liu","year":"2025","journal-title":"arXiv preprint"},{"key":"ref25","volume-title":"LMMs-Lab. Open-rl multimodal","year":"2025"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72904-1_26"},{"key":"ref27","article-title":"Quality-driven curation of remote sensing vision-language data via learned scoring models","author":"Muhtar","year":"2025","journal-title":"arXiv preprint"},{"key":"ref28","year":"2023","journal-title":"OpertAl. Gpt-3.5 turbo fine-tuning and api updates"},{"key":"ref29","year":"2023","journal-title":"OpenAI. Gpt-4v(ision) system card"},{"key":"ref30","year":"2024","journal-title":"OpenAI. Introducing openai 01-preview"},{"key":"ref31","year":"2025","journal-title":"OpenAI. Openai o3-mini"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i6.32683"},{"key":"ref33","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018","journal-title":"Technical Report"},{"key":"ref34","article-title":"Learning transferable visual models from natural language super-vision","author":"Radford","year":"2021","journal-title":"arXiv preprint"},{"key":"ref35","article-title":"Rethinking reflection in pre-training","author":"Darsh","year":"2025","journal-title":"arXiv preprint"},{"key":"ref36","article-title":"Deepseekmath: Pushing the limits of mathe-matical reasoning in open language models","author":"Shao","year":"2024","journal-title":"arXiv preprint"},{"key":"ref37","article-title":"Paligemma 2: A family of versatile vlms for transfer","author":"Steiner","year":"2024","journal-title":"arXiv preprint"},{"key":"ref38","author":"Team","year":"2024","journal-title":"Gemma: Open models based on gemini research and technology"},{"key":"ref39","volume-title":"InternLM Team. Internlm: A multilingual language model with progressively enhanced capabilities","year":"2023"},{"key":"ref40","article-title":"Kimi kl. 5: Scaling reinforcement learning with llms","author":"Team","year":"2025","journal-title":"arXiv preprint"},{"key":"ref41","author":"Touvron","year":"2023","journal-title":"Llama: Open and efficient foundation language models"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref43","article-title":"Qwen2-vl: Enhancing vision-language model\u2019s perception of the world at any resolution","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref44","article-title":"Reinforcement learning for reasoning in large language models with one training example","author":"Wang","year":"2025","journal-title":"arXiv preprint"},{"key":"ref45","article-title":"Does reinforcement learning really incentivize reasoning capacity in llms beyond the base model","author":"Yue","year":"2025","journal-title":"arXiv preprint"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2024.3409624"}],"event":{"name":"2025 IEEE\/CVF International Conference on Computer Vision Workshops (ICCVW)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,10,19]]},"end":{"date-parts":[[2025,10,20]]}},"container-title":["2025 IEEE\/CVF International Conference on Computer Vision Workshops (ICCVW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11373940\/11374285\/11374637.pdf?arnumber=11374637","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T07:33:48Z","timestamp":1771918428000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11374637\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/iccvw69036.2025.00719","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}