{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,22]],"date-time":"2026-01-22T06:59:03Z","timestamp":1769065143939,"version":"3.49.0"},"reference-count":69,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Natural Science Fund for Distinguished Young Scholars","award":["62025304"],"award-info":[{"award-number":["62025304"]}]},{"name":"National Natural Science Foundation of China Young Scientists Fund","award":["62306163"],"award-info":[{"award-number":["62306163"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1109\/lra.2026.3653273","type":"journal-article","created":{"date-parts":[[2026,1,12]],"date-time":"2026-01-12T23:58:44Z","timestamp":1768262324000},"page":"2466-2473","source":"Crossref","is-referenced-by-count":0,"title":["FlowDreamer: A RGB-D World Model With Flow-Based Motion Representations for Robot Manipulation"],"prefix":"10.1109","volume":"11","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6626-4135","authenticated-orcid":false,"given":"Jun","family":"Guo","sequence":"first","affiliation":[{"name":"State Key Laboratory of General Artificial Intelligence (BIGAI), Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5609-3822","authenticated-orcid":false,"given":"Xiaojian","family":"Ma","sequence":"additional","affiliation":[{"name":"State Key Laboratory of General Artificial Intelligence (BIGAI), Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1341-6235","authenticated-orcid":false,"given":"Yikai","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Beijing Normal University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-5841-0176","authenticated-orcid":false,"given":"Min","family":"Yang","sequence":"additional","affiliation":[{"name":"State Key Laboratory of General Artificial Intelligence (BIGAI), Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4042-6044","authenticated-orcid":false,"given":"Huaping","family":"Liu","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Technology, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1185-5365","authenticated-orcid":false,"given":"Qing","family":"Li","sequence":"additional","affiliation":[{"name":"State Key Laboratory of General Artificial Intelligence (BIGAI), Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Recurrent world models facilitate policy evolution","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ha","year":"2018"},{"key":"ref2","article-title":"Learning interactive real-world simulators","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Yang","year":"2024"},{"key":"ref3","article-title":"IRASim: Learning interactive real-robot action simulators","author":"Zhu","year":"2024"},{"key":"ref4","first-page":"68082","article-title":"iVideoGPT: Interactive videoGPTs are scalable world models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Wu","year":"2024"},{"key":"ref5","first-page":"4912","article-title":"Automated creation of digital cousins for robust policy learning","volume-title":"Proc. Conf. Robot Learn.","author":"Dai","year":"2025"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00553"},{"key":"ref7","article-title":"Visual foresight: Model-based deep reinforcement learning for vision-based robotic control","author":"Ebert","year":"2018"},{"key":"ref8","article-title":"Model based reinforcement learning for atari","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kaiser","year":"2020"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-025-08744-2"},{"key":"ref10","first-page":"8387","article-title":"Temporal difference learning for model predictive control","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Hansen","year":"2022"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref12","article-title":"Denoising diffusion implicit models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Song","year":"2021"},{"key":"ref13","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ho","year":"2020"},{"key":"ref14","first-page":"2256","article-title":"Deep unsupervised learning using nonequilibrium thermodynamics","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Sohl-Dickstein","year":"2015"},{"key":"ref15","first-page":"4603","article-title":"Genie: Generative interactive environments","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Bruce","year":"2024"},{"key":"ref16","article-title":"Cosmos world foundation model platform for physical AI","author":"Agarwal","year":"2025"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1873"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.1999.790293"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2023.xix.025"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/lra.2023.3295255"},{"key":"ref21","article-title":"A control-centric benchmark for video prediction","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Tian","year":"2023"},{"key":"ref22","article-title":"Robodesk: A multi-task reinforcement learning benchmark","author":"Kannan","year":"2021"},{"key":"ref23","article-title":"Robosuite: A modular simulation framework and benchmark for robot learning","author":"Zhu","year":"2020"},{"key":"ref24","article-title":"Unsupervised learning for physical interaction through video prediction","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Finn","year":"2016"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989324"},{"issue":"3","key":"ref26","first-page":"56","article-title":"A survey of intelligent sensing technologies in autonomous driving","volume":"19","author":"Shao","year":"2021","journal-title":"ZTE Commun."},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/122344.122377"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-020-03051-4"},{"key":"ref29","first-page":"47234","article-title":"Learning latent dynamic robust representations for world models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Sun","year":"2024"},{"issue":"3","key":"ref30","first-page":"22","article-title":"A practical reinforcement learning framework for automatic radar detection","volume":"21","author":"Yu","year":"2023","journal-title":"ZTE Commun."},{"issue":"3","key":"ref31","first-page":"29","article-title":"Boundary data augmentation for offline reinforcement learning","volume":"21","author":"Shen","year":"2023","journal-title":"ZTE Commun."},{"key":"ref32","article-title":"TD-MPC2: Scalable, robust world models for continuous control","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hansen","year":"2024"},{"key":"ref33","article-title":"Action-conditional video prediction using deep networks in atari games","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Oh","year":"2015"},{"key":"ref34","article-title":"Enerverse: Envisioning embodied future space for robotics manipulation","author":"Huang","year":"2025"},{"key":"ref35","first-page":"2023","article-title":"Learning to act from actionless videos through dense correspondences","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ko"},{"key":"ref36","first-page":"61885","article-title":"Robodreamer: Learning compositional world models for robot imagination","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhou","year":"2024"},{"key":"ref37","article-title":"Zero-shot robotic manipulation with pre-trained image-editing diffusion models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Black","year":"2024"},{"key":"ref38","first-page":"9156","article-title":"Learning universal policies via text-guided video generation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Du","year":"2024"},{"key":"ref39","first-page":"61229","article-title":"3D-VLA: A 3D vision-language-action generative world model","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhen","year":"2024"},{"key":"ref40","article-title":"GR-2: A generative video-language-action model with web-scale knowledge for robot manipulation","author":"Cheang","year":"2024"},{"key":"ref41","article-title":"Video language planning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Du","year":"2024"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00819"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00365"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01769"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.00010"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1145\/3641519.3657497"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/iros47612.2022.9981187"},{"key":"ref48","first-page":"126","article-title":"Learning 3D dynamic scene representations for robot manipulation","volume-title":"Proc. Conf. Robot Learn.","author":"Xu","year":"2020"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989023"},{"key":"ref50","first-page":"2475","article-title":"Flow as the cross-domain manipulation interface","volume-title":"Proc. Conf. Robot Learn.","author":"Xu","year":"2024"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1016\/0004-3702(81)90024-2"},{"key":"ref52","first-page":"674","article-title":"An iterative image registration technique with an application to stereo vision","volume-title":"Proc. Int. Joint Conf. Artif. Intell.","author":"Lucas","year":"1981"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00827"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02126"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.52202\/079017-0688"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01196"},{"key":"ref58","first-page":"3705","article-title":"Evaluating real-world robot manipulation policies in simulation","volume-title":"Conf. Robot Learn.","author":"Li","year":"2025"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00319"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1049\/el:20080522"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2003.819861"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00068"},{"key":"ref63","first-page":"6629","article-title":"GANs trained by a two time-scale update rule converge to a local nash equilibrium","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Heusel","year":"2017"},{"key":"ref64","article-title":"Towards accurate generative models of video: A new metric & challenges","author":"Unterthiner","year":"2018"},{"key":"ref65","article-title":"FitVid: Overfitting in pixel-level video prediction","author":"Babaeizadeh","year":"2021"},{"key":"ref66","first-page":"81","article-title":"High fidelity video prediction with large stochastic recurrent neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Villegas","year":"2019"},{"key":"ref67","first-page":"23371","article-title":"MCVD-masked conditional video diffusion for prediction, generation, and interpolation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Voleti","year":"2022"},{"key":"ref68","first-page":"92","article-title":"Unsupervised learning of object structure and dynamics from videos","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Minderer","year":"2019"},{"key":"ref69","article-title":"MaskVit: Masked visual pre-training for video prediction","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gupta","year":"2023"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7083369\/11359420\/11345941.pdf?arnumber=11345941","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T21:11:59Z","timestamp":1769029919000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11345941\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3]]},"references-count":69,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/lra.2026.3653273","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3]]}}}