{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T07:27:26Z","timestamp":1761895646686,"version":"build-2065373602"},"reference-count":20,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/icme59968.2025.11209200","type":"proceedings-article","created":{"date-parts":[[2025,10,30]],"date-time":"2025-10-30T17:57:42Z","timestamp":1761847062000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["G-TADS: GUI Task-Ability Decoupling Strategy for High-Adaptability Multimodal Intelligent Agents"],"prefix":"10.1109","author":[{"given":"Zhiqiang","family":"Xia","sequence":"first","affiliation":[{"name":"Xiaomi Corporation,Beijing,China"}]},{"given":"Xinyuan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Xiaomi Corporation,Beijing,China"}]},{"given":"Yang","family":"Li","sequence":"additional","affiliation":[{"name":"Xiaomi Corporation,Beijing,China"}]},{"given":"Yuchen","family":"Liu","sequence":"additional","affiliation":[{"name":"Xiaomi Corporation,Beijing,China"}]},{"given":"Runyu","family":"Shi","sequence":"additional","affiliation":[{"name":"Xiaomi Corporation,Beijing,China"}]},{"given":"Jiaming","family":"Xu","sequence":"additional","affiliation":[{"name":"Xiaomi Corporation,Beijing,China"}]}],"member":"263","reference":[{"year":"2024","author":"Wu","article-title":"Foundations and recent trends in multimodal mobile agents: A survey","key":"ref1"},{"year":"2024","author":"Zhang","article-title":"Large Language Model-Brained GUI Agents: A Survey","key":"ref2"},{"year":"2024","author":"Hurst","article-title":"Gpt-4o system card","key":"ref3"},{"year":"2024","author":"Wang","article-title":"Qwen2-vl: Enhancing vision-language model\u2019s perception of the world at any resolution","key":"ref4"},{"year":"2024","author":"Yao","article-title":"Minicpm-v: A gpt-4v level mllm on your phone","key":"ref5"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1109\/cvpr52733.2024.02283"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.18653\/v1\/2024.acl-long.505"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.18653\/v1\/2024.findings-emnlp.702"},{"doi-asserted-by":"publisher","key":"ref9","DOI":"10.1109\/CVPR52733.2024.01354"},{"year":"2024","author":"Wang","article-title":"Mobile-Agent-v2: Mobile Device Operation Assistant with Effective Navigation via Multi-Agent Collaboration","key":"ref10"},{"year":"2024","author":"Lu","article-title":"Omniparser for pure vision based gui agent","key":"ref11"},{"year":"2023","author":"Hafner","article-title":"Mastering diverse domains through world models","key":"ref12"},{"year":"2024","author":"Li","article-title":"Appagent v2: Advanced agent for flexible mobile interactions","key":"ref13"},{"year":"2023","author":"Achiam","article-title":"Gpt-4 technical report","key":"ref14"},{"year":"2024","article-title":"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context","key":"ref15"},{"year":"2024","author":"Song","article-title":"Mmac-copilot: Multi-modal agent collaboration operating system copilot","key":"ref16"},{"key":"ref17","first-page":"36","article-title":"Androidinthewild: A large-scale dataset for android device control","author":"Rawles","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref18","first-page":"36","article-title":"Mind2web: Towards a generalist agent for the web","author":"Deng","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.18653\/v1\/2024.findings-acl.186"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1109\/cvpr52734.2025.01816"}],"event":{"name":"2025 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2025,6,30]]},"location":"Nantes, France","end":{"date-parts":[[2025,7,4]]}},"container-title":["2025 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11208895\/11208897\/11209200.pdf?arnumber=11209200","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T05:55:14Z","timestamp":1761890114000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11209200\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":20,"URL":"https:\/\/doi.org\/10.1109\/icme59968.2025.11209200","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}