{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,4]],"date-time":"2026-05-04T10:04:24Z","timestamp":1777889064210,"version":"3.51.4"},"reference-count":60,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key R&D Program of China","doi-asserted-by":"publisher","award":["2022ZD0160102"],"award-info":[{"award-number":["2022ZD0160102"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U21B2042,62320106010"],"award-info":[{"award-number":["U21B2042,62320106010"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iccv51701.2025.00158","type":"proceedings-article","created":{"date-parts":[[2026,4,29]],"date-time":"2026-04-29T19:45:49Z","timestamp":1777491949000},"page":"1613-1623","source":"Crossref","is-referenced-by-count":0,"title":["UIPro: Unleashing Superior Interaction Capability for GUI Agents"],"prefix":"10.1109","author":[{"given":"Hongxin","family":"Li","sequence":"first","affiliation":[{"name":"University of Chinese Academy of Sciences (UCAS)"}]},{"given":"Jingran","family":"Su","sequence":"additional","affiliation":[{"name":"PolyU"}]},{"given":"Jingfan","family":"Chen","sequence":"additional","affiliation":[{"name":"PolyU"}]},{"given":"Zheng","family":"Ju","sequence":"additional","affiliation":[{"name":"University of Chinese Academy of Sciences (UCAS)"}]},{"given":"Yuntao","family":"Chen","sequence":"additional","affiliation":[{"name":"Hong Kong Institute of Science &#x0026; Innovation,CASIA"}]},{"given":"Qing","family":"Li","sequence":"additional","affiliation":[{"name":"PolyU"}]},{"given":"Zhaoxiang","family":"Zhang","sequence":"additional","affiliation":[{"name":"University of Chinese Academy of Sciences (UCAS)"}]}],"member":"263","reference":[{"key":"ref1","volume-title":"Palm 2 technical report","author":"Anil","year":"2023"},{"key":"ref2","first-page":"3058","article-title":"Screenai: a visionlanguage model for ui and infographics understanding","author":"Baechler","year":"2024","journal-title":"IJCAI"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/235"},{"key":"ref4","first-page":"12461","article-title":"Digirl: Training in-thewild device-control agents with autonomous reinforcement learning","volume":"37","author":"Bai","year":"2024","journal-title":"NIPS"},{"key":"ref5","volume-title":"Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond","author":"Bai","year":"2023"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2025.xxi.010"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20074-8_18"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-acl.110"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3391613"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.1065"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02283"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.505"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3126594.3126651"},{"key":"ref14","article-title":"Mind2web: Towards a generalist agent for the web","volume":"36","author":"Deng","year":"2024","journal-title":"NIPS"},{"key":"ref15","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2021","journal-title":"ICLR"},{"key":"ref16","article-title":"Multimodal web navigation with instruction-finetuned foundation models","author":"Furuta","year":"2024","journal-title":"ICLR"},{"key":"ref17","volume-title":"Mobileviews: A large-scale mobile gui dataset","author":"Gao","year":"2024"},{"key":"ref18","article-title":"Navigating the digital world as humans do: Universal visual grounding for GUI agents","author":"Gou","year":"2025","journal-title":"ICLR"},{"key":"ref19","article-title":"A real-world webagent with planning, long context understanding, and program synthesis","volume-title":"ICLR","author":"Gur","year":"2024"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01354"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73113-6_10"},{"key":"ref22","article-title":"Autogui: Scaling gui grounding with automatic functionality annotations from 11 ms","author":"Li","year":"2025","journal-title":"ACL"},{"key":"ref23","first-page":"92130","article-title":"On the effects of data scale on ui control agents","author":"Li","year":"2024","journal-title":"NIPS"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.729"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.443"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.01816"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.52202\/075280-1516"},{"key":"ref28","volume-title":"Llava-next: Improved reasoning, ocr, and world knowledge","author":"Liu","year":"2024"},{"key":"ref29","article-title":"Visualwebbench: How far have multimodal LLMs evolved in web page understanding and grounding?","volume-title":"First Conference on Language Modeling","author":"Liu"},{"key":"ref30","article-title":"Harnessing webpage UIs for text-rich visual understanding","author":"Liu","year":"2025","journal-title":"ICLR"},{"key":"ref31","volume-title":"Gui odyssey: A comprehensive dataset for cross-app gui navigation on mobile devices","author":"Lu","year":"2024"},{"key":"ref32","first-page":"33007","article-title":"WebLINX: Real-world website navigation with multi-turn dialogue","author":"Lu","year":"2024","journal-title":"ICML"},{"key":"ref33","volume-title":"Omniparser for pure vision based gui agent","author":"Lu","year":"2024"},{"key":"ref34","article-title":"Webgpt: Browserassisted question-answering with human feedback","author":"Nakano","year":"2021","journal-title":"arxiv preprint arxiv"},{"key":"ref35","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","author":"Ouyang","year":"2022","journal-title":"NIPS"},{"key":"ref36","volume-title":"Ui-tars: Pioneering automated gui interaction with native agents","author":"Qin","year":"2025"},{"issue":"140","key":"ref37","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"JMLR"},{"key":"ref38","first-page":"59708","article-title":"Android in the wild: A large-scale dataset for android device control","author":"Rawles","year":"2023","journal-title":"NIPS"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref40","article-title":"LAION-5b: An open large-scale dataset for training next generation image-text models","author":"Schuhmann","year":"2022","journal-title":"NIPS"},{"key":"ref41","first-page":"34354","article-title":"From pixels to ui actions: Learning to follow instructions via graphical user interfaces","volume":"36","author":"Shaw","year":"2023","journal-title":"NIPS"},{"key":"ref42","first-page":"5636","article-title":"Towards better semantic understanding of mobile interfaces","volume-title":"Proceedings of the 29th International Conference on Computational Linguistics","author":"Sunkara"},{"key":"ref43","article-title":"OpenAI Team","year":"2024","journal-title":"Gpt-4 technical report"},{"key":"ref44","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arxiv preprint arxiv"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1145\/3472749.3474765"},{"key":"ref46","volume-title":"Qwen2-vl: Enhancing vision-language model\u2019s perception of the world at any resolution","author":"Wang","year":"2024"},{"key":"ref47","article-title":"Emergent abilities of large language models","author":"Wei","year":"2022","journal-title":"TMLR"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1145\/3544548.3581158"},{"key":"ref49","article-title":"OS-ATLAS: Foundation action model for generalist GUI agents","author":"Wu","year":"2025","journal-title":"ICLR"},{"key":"ref50","article-title":"Aguvis: Unified pure vision agents for autonomous GUI interaction","author":"Xu","year":"2025","journal-title":"ICML"},{"key":"ref51","article-title":"Gpt-4v in wonderland: Large multimodal models for zero-shot smartphone gui navigation","author":"Yan","year":"2023","journal-title":"arxiv preprint arxiv"},{"key":"ref52","volume-title":"Set-of-mark prompting unleashes extraordinary visual grounding in gpt-4v","author":"Yang","year":"2023"},{"key":"ref53","first-page":"20744","article-title":"Webshop: Towards scalable real-world web interaction with grounded language agents","volume":"35","author":"Yao","year":"2022","journal-title":"NIPS"},{"key":"ref54","first-page":"240","article-title":"Ferret-ui: Grounded mobile ui understanding with multimodal 11 ms","volume-title":"ECCV","author":"You","year":"2024"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46475-6_5"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1145\/3706598.3713600"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.702"},{"key":"ref58","volume-title":"Beyond llavahd: Diving into high-resolution large multimodal models","author":"Zhang","year":"2024"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.186"},{"key":"ref60","article-title":"Synapse: Trajectory-as-exemplar prompting with memory for computer control","author":"Zheng","year":"2023","journal-title":"ICLR"}],"event":{"name":"2025 IEEE\/CVF International Conference on Computer Vision (ICCV)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,10,19]]},"end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11443115\/11443287\/11444458.pdf?arnumber=11444458","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T05:06:31Z","timestamp":1777611991000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11444458\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":60,"URL":"https:\/\/doi.org\/10.1109\/iccv51701.2025.00158","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}