{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T17:04:10Z","timestamp":1775667850163,"version":"3.50.1"},"reference-count":67,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012165","name":"Key Technologies Research and Development Program","doi-asserted-by":"publisher","award":["2021YFE0100100"],"award-info":[{"award-number":["2021YFE0100100"]}],"id":[{"id":"10.13039\/501100012165","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004731","name":"Natural Science Foundation of Zhejiang Province","doi-asserted-by":"publisher","award":["LZ24F030010"],"award-info":[{"award-number":["LZ24F030010"]}],"id":[{"id":"10.13039\/501100004731","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100006602","name":"Air Force Research Laboratory","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100006602","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100021856","name":"Ministero dell&apos;Universit\u00e0 e della Ricerca","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100021856","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Information Fusion"],"published-print":{"date-parts":[[2026,7]]},"DOI":"10.1016\/j.inffus.2026.104193","type":"journal-article","created":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T07:44:09Z","timestamp":1769672649000},"page":"104193","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":11,"special_numbering":"C","title":["Information-theoretic graph fusion with vision-language-action model for policy reasoning and dual robotic control"],"prefix":"10.1016","volume":"131","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2872-4217","authenticated-orcid":false,"given":"Shunlei","family":"Li","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0005-7993-7203","authenticated-orcid":false,"given":"Longsen","family":"Gao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7136-2517","authenticated-orcid":false,"given":"Jin","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0004-0124-1452","authenticated-orcid":false,"given":"Chang","family":"Che","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0000-0931-6982","authenticated-orcid":false,"given":"Xi","family":"Xiao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6480-5794","authenticated-orcid":false,"given":"Jiuwen","family":"Cao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2452-3570","authenticated-orcid":false,"given":"Yingbai","family":"Hu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7629-3266","authenticated-orcid":false,"given":"Hamid Reza","family":"Karimi","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.inffus.2026.104193_bib0001","series-title":"Springer Handbook of Robotics","doi-asserted-by":"crossref","first-page":"841","DOI":"10.1007\/978-3-319-32552-1_34","article-title":"Visual servoing","author":"Chaumette","year":"2016"},{"key":"10.1016\/j.inffus.2026.104193_bib0002","series-title":"2024 IEEE International Conference on Robotics and Automation (ICRA)","first-page":"4126","article-title":"Vfas-grasp: closed loop grasping with visual feedback and adaptive sampling","author":"Piacenza","year":"2024"},{"issue":"2\u20133","key":"10.1016\/j.inffus.2026.104193_bib0003","first-page":"263","article-title":"Learning the structure of robot manipulation tasks from demonstrations","volume":"39","author":"Manderson","year":"2020","journal-title":"Int. J. Rob. Res."},{"key":"10.1016\/j.inffus.2026.104193_bib0004","series-title":"2016 IEEE International Conference on Robotics and Automation (ICRA)","first-page":"536","article-title":"Deep learning for tactile understanding from visual and haptic data","author":"Gao","year":"2016"},{"issue":"24","key":"10.1016\/j.inffus.2026.104193_bib0005","doi-asserted-by":"crossref","first-page":"27238","DOI":"10.1109\/JSEN.2021.3123638","article-title":"Six-axis force\/torque sensors for robotics applications: a review","volume":"21","author":"Cao","year":"2021","journal-title":"IEEE Sens. J."},{"key":"10.1016\/j.inffus.2026.104193_bib0006","series-title":"8th Annual Conference on Robot Learning","article-title":"HYPERmotion: learning hybrid behavior planning for autonomous loco-manipulation","author":"Wang","year":"2024"},{"key":"10.1016\/j.inffus.2026.104193_bib0007","series-title":"2021 IEEE International Conference on Robotics and Automation (ICRA)","first-page":"6657","article-title":"Multi-step recurrent q-learning for robotic velcro peeling","author":"Yuan","year":"2021"},{"key":"10.1016\/j.inffus.2026.104193_bib0008","unstructured":"A. Brohan, N. Chen, D. Fu, et al., RT-2: vision-language-action models transfer web knowledge to robotic control, arXiv: 2307.15818(2023)."},{"key":"10.1016\/j.inffus.2026.104193_bib0009","unstructured":"M.J. Kim, K. Pertsch, S. Karamcheti, T. Xiao, A. Balakrishna, S. Nair, R. Rafailov, E. Foster, G. Lam, P. Sanketi, et al., Openvla: an open-source vision-language-action model, arXiv: 2406.09246. (2024)."},{"key":"10.1016\/j.inffus.2026.104193_bib0010","unstructured":"J. Black, A. Gokaslan, et al., Pi0: open-ended robotic manipulation with large language models and language-conditioned skills, arXiv: 2402.00100. (2024)."},{"issue":"1","key":"10.1016\/j.inffus.2026.104193_bib0011","article-title":"Invertible liquid neural network-based learning of inverse kinematics and dynamics for robotic manipulators","volume":"15","author":"Zhang","year":"2025","journal-title":"Sci. Rep."},{"key":"10.1016\/j.inffus.2026.104193_bib0012","series-title":"2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","first-page":"3986","article-title":"Robonurse-vla: robotic scrub nurse system based on vision-language-action model","author":"Li","year":"2025"},{"key":"10.1016\/j.inffus.2026.104193_bib0013","series-title":"2024 IEEE International Conference on Robotics and Automation (ICRA)","first-page":"13940","article-title":"Handnerf: learning to reconstruct hand-object interaction scene from a single rgb image","author":"Choi","year":"2024"},{"key":"10.1016\/j.inffus.2026.104193_bib0014","series-title":"Conference on Robot Learning (CoRL)","article-title":"CLIPort: what and where pathways for robotic manipulation","author":"Shridhar","year":"2022"},{"key":"10.1016\/j.inffus.2026.104193_bib0015","unstructured":"E. Jang, C. Lynch, Y. Duan, S. Schaal, S. Levine, M. Hessel, BC-Z: zero-shot task generalization with robotic imitation learning, arXiv: 2207.11355. (2022)."},{"key":"10.1016\/j.inffus.2026.104193_bib0016","unstructured":"R. Mendonca, J. Mu, B. Wang, et al., RT-X: a benchmark for language-conditioned multi-task robotic manipulation, arXiv: 2307.04752. (2023)."},{"key":"10.1016\/j.inffus.2026.104193_bib0017","unstructured":"S. Nair, E. Jang, Y. Lee, M. Khansari, C. Finn, BCT: benchmarking generalization in vision-language models for robotic manipulation, arXiv: 2207.11355. (2022)."},{"key":"10.1016\/j.inffus.2026.104193_bib0018","unstructured":"B. Huang, Y. Hong, Z. Liu, Y. Huang, Y. Zhu, Inner monologue: embodied reasoning through planning with language models, arXiv: 2305.14620. (2023)."},{"key":"10.1016\/j.inffus.2026.104193_bib0019","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2024.102379","article-title":"Fusion dynamical systems with machine learning in imitation learning: a comprehensive overview","volume":"108","author":"Hu","year":"2024","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104193_bib0020","series-title":"The Thirty-ninth Annual Conference on Neural Information Processing Systems (NeurIPS)","article-title":"Contextual integrity in LLMs via reasoning and reinforcement learning","author":"Lan","year":"2025"},{"key":"10.1016\/j.inffus.2026.104193_bib0021","first-page":"13139","article-title":"Language-conditioned imitation learning for robot manipulation tasks","volume":"33","author":"Stepputtis","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104193_bib0022","first-page":"2327","article-title":"Deep imitation learning for bimanual robotic manipulation","volume":"33","author":"Xie","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104193_bib0023","series-title":"IEEE Infocom 2024 - IEEE Conference on Computer Communications","first-page":"1341","article-title":"Learning to decompose asymmetric channel kernels for generalized eigenwave multiplexing","author":"Zou","year":"2024"},{"key":"10.1016\/j.inffus.2026.104193_bib0024","unstructured":"Y. Li, Deep reinforcement learning: an overview, arXiv: 1701.07274. (2018)."},{"key":"10.1016\/j.inffus.2026.104193_bib0025","series-title":"Conference on Robot Learning","first-page":"255","article-title":"Learning complex dexterous manipulation with deep reinforcement learning and demonstrations","author":"Rajeswaran","year":"2018"},{"key":"10.1016\/j.inffus.2026.104193_bib0026","series-title":"Conference on Robot Learning","first-page":"651","article-title":"QT-Opt: scalable deep reinforcement learning for vision-based robotic manipulation","author":"Kalashnikov","year":"2018"},{"key":"10.1016\/j.inffus.2026.104193_bib0027","unstructured":"R. Yeh, S. Bahl, R. Girdhar, A. Gupta, ILoVI: imitation learning from visual instructions for skill generalization, arXiv: 2305.16220. (2023)."},{"issue":"24","key":"10.1016\/j.inffus.2026.104193_bib0028","doi-asserted-by":"crossref","first-page":"4843","DOI":"10.3390\/electronics14244843","article-title":"Liquid-augmented MPC in quadrupedal robot for disturbance learning","volume":"14","author":"Mao","year":"2025","journal-title":"Electronics"},{"key":"10.1016\/j.inffus.2026.104193_bib0029","series-title":"GLOBECOM 2023-2023 IEEE Global Communications Conference","first-page":"2536","article-title":"Capacity achieving by diagonal permutation for mu-mimo channels","author":"Zou","year":"2023"},{"key":"10.1016\/j.inffus.2026.104193_bib0030","series-title":"International Conference on Machine Learning","first-page":"2832","article-title":"Infobot: transfer and exploration via the information bottleneck","author":"Goyal","year":"2019"},{"key":"10.1016\/j.inffus.2026.104193_bib0031","doi-asserted-by":"crossref","DOI":"10.1109\/TETCI.2024.3424527","article-title":"Efficient online planning and robust optimal control for nonholonomic mobile robot in unstructured environments","author":"Hu","year":"2024","journal-title":"IEEE Trans. Emerg. Top. Comput. Intell."},{"key":"10.1016\/j.inffus.2026.104193_bib0032","series-title":"European Conference on Computer Vision","first-page":"88","article-title":"Maxmi: a maximal mutual information criterion for manipulation concept discovery","author":"Zhou","year":"2024"},{"key":"10.1016\/j.inffus.2026.104193_bib0033","doi-asserted-by":"crossref","unstructured":"A. Kruzliak, J. Hartvich, S.P. Patni, L. Rustler, J.K. Behrens, J.F. Abu-Dakka, K. Mikolajczyk, V. Kyrki, M. Hoffmann Interactive Learning of Physical Object Properties Through Robot Manipulation and Database of Object Measurements, 024 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS) (2024) 7596\u2013760310.1109\/IROS58592.2024.10802249.","DOI":"10.1109\/IROS58592.2024.10802249"},{"key":"10.1016\/j.inffus.2026.104193_bib0034","series-title":"2018 IEEE International Conference on Robotics and Automation (ICRA)","first-page":"4243","article-title":"Using simulation and domain adaptation to improve efficiency of deep robotic grasping","author":"Bousmalis","year":"2018"},{"key":"10.1016\/j.inffus.2026.104193_bib0035","series-title":"International Conference on Learning Representations (ICLR)","article-title":"Learning transferable robot skills with hierarchical latent variable models","author":"Peng","year":"2020"},{"issue":"3","key":"10.1016\/j.inffus.2026.104193_bib0036","doi-asserted-by":"crossref","first-page":"465","DOI":"10.1177\/02783649241273565","article-title":"Transfer learning in robotics: an upcoming breakthrough? a review of promises and challenges","volume":"44","author":"Jaquier","year":"2025","journal-title":"Int. J. Rob. Res."},{"key":"10.1016\/j.inffus.2026.104193_bib0037","unstructured":"A. Azzolini, J. Bai, H. Brandon, J. Cao, P. Chattopadhyay, H. Chen, J. Chu, Y. Cui, J. Diamond, Y. Ding, et al., Cosmos-reason1: from physical common sense to embodied reasoning, arXiv: 2503.15558. (2025)."},{"key":"10.1016\/j.inffus.2026.104193_bib0038","first-page":"1","article-title":"Multi-dimensional eigenwave multiplexing (MEM): a general modulation for LTV channels","author":"Zou","year":"2025","journal-title":"IEEE Trans. Veh. Technol."},{"key":"10.1016\/j.inffus.2026.104193_bib0039","unstructured":"G. Lan, S. Zhang, T. Wang, Y. Zhang, D. Zhang, X. Wei, X. Pan, H. Zhang, D.-J. Han, C.G. Brinton, MaPPO: maximum a posteriori preference optimization with prior knowledge, arXiv: 2507.21183. (2025)."},{"key":"10.1016\/j.inffus.2026.104193_bib0040","series-title":"Conference on Robot Learning","first-page":"2165","article-title":"Rt-2: vision-language-action models transfer web knowledge to robotic control","author":"Zitkovich","year":"2023"},{"key":"10.1016\/j.inffus.2026.104193_bib0041","series-title":"First Workshop on Vision-Language Models for Navigation and Manipulation at ICRA 2024","article-title":"Octo: an open-source generalist robot policy","author":"Mees","year":"2024"},{"key":"10.1016\/j.inffus.2026.104193_bib0042","unstructured":"J. Liu, H. Chen, P. An, Z. Liu, R. Zhang, C. Gu, X. Li, Z. Guo, S. Chen, M. Liu, et al., Hybridvla: collaborative diffusion and autoregression in a unified vision-language-action model, arXiv: 2503.10631. (2025)."},{"key":"10.1016\/j.inffus.2026.104193_bib0043","series-title":"Forty-second International Conference on Machine Learning","article-title":"DiffusionVLA: scaling robot foundation models via unified diffusion and autoregression","author":"Wen","year":"2025"},{"key":"10.1016\/j.inffus.2026.104193_bib0044","unstructured":"Q. Li, Y. Liang, Z. Wang, L. Luo, X. Chen, M. Liao, F. Wei, Y. Deng, S. Xu, Y. Zhang, et al., Cogact: a foundational vision-language-action model for synergizing cognition and action in robotic manipulation, arXiv: 2411.19650. (2024)."},{"key":"10.1016\/j.inffus.2026.104193_bib0045","doi-asserted-by":"crossref","unstructured":"K. Black, N. Brown, D. Driess, A. Esmail, M. Equi, C. Finn, N. Fusai, L. Groom, K. Hausman, B. Ichter, et al., pi_0: a vision-language-action flow model for general robot control, arXiv: 2410.24164(2024).","DOI":"10.15607\/RSS.2025.XXI.010"},{"key":"#cr-split#-10.1016\/j.inffus.2026.104193_bib0046.1","unstructured":"P. Intelligence, K. Black, N. Brown, J. Darpinian, K. Dhabalia, D. Driess, A. Esmail, M. Equi, C. Finn, N. Fusai, et al., \u03c00. 5: a vision-language-action model with open-world generalization, 2025, 1"},{"key":"#cr-split#-10.1016\/j.inffus.2026.104193_bib0046.2","unstructured":"(2) (2025) 3, arXiv: 2504.16054."},{"key":"10.1016\/j.inffus.2026.104193_bib0047","series-title":"Proceedings of the Computer Vision and Pattern Recognition Conference","first-page":"1702","article-title":"Cot-vla: visual chain-of-thought reasoning for vision-language-action models","author":"Zhao","year":"2025"},{"key":"10.1016\/j.inffus.2026.104193_bib0048","unstructured":"S. Ye, J. Jang, B. Jeon, S. Joo, J. Yang, B. Peng, A. Mandlekar, R. Tan, Y.-W. Chao, B.Y. Lin, et al., Latent action pretraining from videos, 2025https:\/\/openreview.net\/forum?id=VYOe2eBQehhttps:\/\/openreview.net\/forum?id=VYOe2eBQeh."},{"key":"10.1016\/j.inffus.2026.104193_bib0049","first-page":"27649","article-title":"Robotwin: dual-arm robot benchmark with generative digital twins","author":"Mu","year":"2025","journal-title":"Proc. Comput. Vis. Patt. Recognit. Conf."},{"issue":"3","key":"10.1016\/j.inffus.2026.104193_bib0050","doi-asserted-by":"crossref","first-page":"379","DOI":"10.1002\/j.1538-7305.1948.tb01338.x","article-title":"A mathematical theory of communication","volume":"27","author":"Shannon","year":"1948","journal-title":"Bell Syst. Tech. J."},{"key":"10.1016\/j.inffus.2026.104193_bib0051","series-title":"2024 International Conference on Automation and Computation (AUTOCOM)","first-page":"232","article-title":"Cloud-enabled neural networks for intelligent vehicle emissions tracking and analysis","author":"Anusha","year":"2024"},{"issue":"5","key":"10.1016\/j.inffus.2026.104193_bib0052","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3712003","article-title":"LLM-based multi-agent systems for software engineering: literature review, vision, and the road ahead","volume":"34","author":"He","year":"2025","journal-title":"ACM Trans. Softw. Eng. Method."},{"key":"10.1016\/j.inffus.2026.104193_bib0053","doi-asserted-by":"crossref","first-page":"101016","DOI":"10.1109\/ACCESS.2024.3422211","article-title":"Decentralized and distributed learning for AIoT: a comprehensive review, emerging challenges, and opportunities","volume":"12","author":"Xu","year":"2024","journal-title":"IEEE Access"},{"issue":"4","key":"10.1016\/j.inffus.2026.104193_bib0054","doi-asserted-by":"crossref","DOI":"10.1002\/smb2.70005","article-title":"Leveraging part-based NeRF for robot self-modeling and control","volume":"1","author":"Hu","year":"2025","journal-title":"SmartBot"},{"key":"10.1016\/j.inffus.2026.104193_bib0055","doi-asserted-by":"crossref","DOI":"10.1016\/j.birob.2026.100274","article-title":"Large language model-based task planning for service robots: a review","author":"Bian","year":"2026","journal-title":"Biomimet. Intell. Robot."},{"key":"10.1016\/j.inffus.2026.104193_bib0056","doi-asserted-by":"crossref","first-page":"1245","DOI":"10.1109\/TRO.2025.3530267","article-title":"Exploiting information theory for intuitive robot programming of manual activities","volume":"41","author":"Merlo","year":"2025","journal-title":"IEEE Trans. Rob."},{"issue":"6","key":"10.1016\/j.inffus.2026.104193_bib0057","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3660522","article-title":"Unsupervised social bot detection via structural information theory","volume":"42","author":"Peng","year":"2024","journal-title":"ACM Trans. Inf. Syst."},{"key":"10.1016\/j.inffus.2026.104193_bib0058","unstructured":"Z. Chen, M. Kang, B. Li, Shieldagent: Shielding agents via verifiable safety policy reasoning, arXiv: 2503.22738(2025)."},{"key":"10.1016\/j.inffus.2026.104193_bib0059","doi-asserted-by":"crossref","DOI":"10.1016\/j.neunet.2025.107315","article-title":"FingerPoseNet: a finger-level multitask learning network with residual feature sharing for 3D hand pose estimation","volume":"187","author":"Tewolde","year":"2025","journal-title":"Neural Netw."},{"key":"10.1016\/j.inffus.2026.104193_bib0060","series-title":"2024 IEEE International Conference on Robotics and Automation (ICRA)","first-page":"6892","article-title":"Open x-embodiment: robotic learning datasets and rt-x models: open x-embodiment collaboration 0","author":"O\u2019Neill","year":"2024"},{"key":"10.1016\/j.inffus.2026.104193_bib0061","unstructured":"J. Wei, X. Wang, D. Schuurmans, M. Bosma, B. Ichter, F. Xia, E. Chi, Q. Le, D. Zhou, Chain-of-thought prompting elicits reasoning in large language models, 2023, arXiv: 2201.11903."},{"key":"10.1016\/j.inffus.2026.104193_bib0062","unstructured":"E.J. Hu, Y. Shen, P. Wallis, Z. Allen-Zhu, Y. Li, S. Wang, L. Wang, W. Chen, Lora: low-rank adaptation of large language models, arXiv: 2106.09685(2021)."},{"key":"10.1016\/j.inffus.2026.104193_bib0063","unstructured":"S. Deng, M. Yan, S. Wei, H. Ma, Y. Yang, J. Chen, Z. Zhang, T. Yang, X. Zhang, W. Zhang, et al., Graspvla: a grasping foundation model pre-trained on billion-scale synthetic action data, arXiv: 2505.03233(2025)."},{"key":"10.1016\/j.inffus.2026.104193_bib0064","unstructured":"C. Cui, P. Ding, W. Song, S. Bai, X. Tong, Z. Ge, R. Suo, W. Zhou, Y. Liu, B. Jia, et al., Openhelix: a short survey, empirical analysis, and open-source dual-system vla model for robotic manipulation, arXiv: 2505.03912(2025)."},{"key":"10.1016\/j.inffus.2026.104193_bib0065","unstructured":"J. Bjorck, F. Casta\u00f1eda, N. Cherniadev, X. Da, R. Ding, L. Fan, Y. Fang, D. Fox, F. Hu, S. Huang, et al., Gr00t n1: an open foundation model for generalist humanoid robots, arXiv: 2503.14734. (2025)."},{"key":"10.1016\/j.inffus.2026.104193_bib0066","series-title":"Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing","first-page":"5377","article-title":"Chatvla: unified multimodal understanding and robot control with vision-language-action model","author":"Zhou","year":"2025"}],"container-title":["Information Fusion"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1566253526000722?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1566253526000722?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T20:45:24Z","timestamp":1774039524000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1566253526000722"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,7]]},"references-count":67,"alternative-id":["S1566253526000722"],"URL":"https:\/\/doi.org\/10.1016\/j.inffus.2026.104193","relation":{},"ISSN":["1566-2535"],"issn-type":[{"value":"1566-2535","type":"print"}],"subject":[],"published":{"date-parts":[[2026,7]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Information-theoretic graph fusion with vision-language-action model for policy reasoning and dual robotic control","name":"articletitle","label":"Article Title"},{"value":"Information Fusion","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.inffus.2026.104193","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"104193"}}