{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T22:23:02Z","timestamp":1774045382108,"version":"3.50.1"},"reference-count":89,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,9,1]],"date-time":"2026-09-01T00:00:00Z","timestamp":1788220800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,9,1]],"date-time":"2026-09-01T00:00:00Z","timestamp":1788220800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,9,1]],"date-time":"2026-09-01T00:00:00Z","timestamp":1788220800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,9,1]],"date-time":"2026-09-01T00:00:00Z","timestamp":1788220800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,9,1]],"date-time":"2026-09-01T00:00:00Z","timestamp":1788220800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,9,1]],"date-time":"2026-09-01T00:00:00Z","timestamp":1788220800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,9,1]],"date-time":"2026-09-01T00:00:00Z","timestamp":1788220800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2022 YFB3903404"],"award-info":[{"award-number":["2022 YFB3903404"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Information Fusion"],"published-print":{"date-parts":[[2026,9]]},"DOI":"10.1016\/j.inffus.2026.104252","type":"journal-article","created":{"date-parts":[[2026,2,23]],"date-time":"2026-02-23T16:22:49Z","timestamp":1771863769000},"page":"104252","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["MuBe4D: A mutual benefit framework for generalizable motion segmentation and geometry-first 4D reconstruction"],"prefix":"10.1016","volume":"133","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-5002-4577","authenticated-orcid":false,"given":"Shuo","family":"Zhang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0172-1582","authenticated-orcid":false,"given":"Wei","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5901-8932","authenticated-orcid":false,"given":"Xin","family":"Su","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8943-079X","authenticated-orcid":false,"given":"Jun","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0002-4347-2545","authenticated-orcid":false,"given":"Xinrui","family":"Zeng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3040-3500","authenticated-orcid":false,"given":"Bin","family":"Luo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9207-2076","authenticated-orcid":false,"given":"Chenjie","family":"Wang","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"issue":"1","key":"10.1016\/j.inffus.2026.104252_sbref0001","article-title":"A survey of efficient deep learning models for moving object segmentation","volume":"12","author":"Hou","year":"2023","journal-title":"APSIPA Trans. Sign. Inf. Process."},{"issue":"2","key":"10.1016\/j.inffus.2026.104252_bib0002","doi-asserted-by":"crossref","first-page":"252","DOI":"10.1109\/5.265351","article-title":"Motion and structure from feature correspondences: a review","volume":"82","author":"Huang","year":"1994","journal-title":"Proc. IEEE"},{"issue":"2","key":"10.1016\/j.inffus.2026.104252_sbref0003","doi-asserted-by":"crossref","first-page":"550","DOI":"10.1109\/LRA.2020.3045647","article-title":"DymSLAM: 4D dynamic scene reconstruction based on geometrical motion segmentation","volume":"6","author":"Wang","year":"2021","journal-title":"IEEE Rob. Autom. Lett."},{"key":"10.1016\/j.inffus.2026.104252_sbref0004","series-title":"2016 19th International Conference on Information Fusion (FUSION)","first-page":"1743","article-title":"Motion segmentation and appearance change detection based 2D hand tracking","author":"Hammer","year":"2016"},{"key":"10.1016\/j.inffus.2026.104252_bib0005","series-title":"Embodied Intelligence","first-page":"697","author":"Cangelosi","year":"2015"},{"key":"10.1016\/j.inffus.2026.104252_bib0006","unstructured":"L. Goli, S. Sabour, M. Matthews, M. Brubaker, D. Lagun, A. Jacobson, D.J. Fleet, S. Saxena, A. Tagliasacchi, RoMo: Robust Motion Segmentation Improves Structure from Motion, 2024. 2411.18650 [cs]."},{"key":"10.1016\/j.inffus.2026.104252_sbref0007","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1109\/TPAMI.2022.3198480","article-title":"EM-driven unsupervised learning for efficient motion segmentation","author":"Meunier","year":"2022","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.inffus.2026.104252_bib0008","series-title":"2018 IEEE International Conference on Robotics and Automation (ICRA)","first-page":"7510","article-title":"Robust dense mapping for large-scale dynamic environments","author":"Barsan","year":"2018"},{"issue":"2","key":"10.1016\/j.inffus.2026.104252_bib0009","doi-asserted-by":"crossref","first-page":"1803","DOI":"10.1109\/LRA.2020.2969183","article-title":"Track to reconstruct and reconstruct to track","volume":"5","author":"Luiten","year":"2020","journal-title":"IEEE Rob. Autom. Lett."},{"key":"10.1016\/j.inffus.2026.104252_bib0010","unstructured":"J. Zhang, C. Herrmann, J. Hur, V. Jampani, T. Darrell, F. Cole, D. Sun, M.-H. Yang, MonST3R: A Simple Approach for Estimating Geometry in the Presence of Motion, 2024. 10.48550\/arXiv.2410.03825."},{"key":"10.1016\/j.inffus.2026.104252_bib0011","doi-asserted-by":"crossref","unstructured":"J. Lu, T. Huang, P. Li, Z. Dou, C. Lin, Z. Cui, Z. Dong, S.-K. Yeung, W. Wang, Y. Liu, Align3R: Aligned Monocular Depth Estimation for Dynamic Videos, 2024. 10.48550\/arXiv.2412.03079.","DOI":"10.1109\/CVPR52734.2025.02125"},{"key":"10.1016\/j.inffus.2026.104252_bib0012","series-title":"2023 IEEE\/CVF International Conference on Computer Vision (ICCV)","first-page":"3992","article-title":"Segment anything","author":"Kirillov","year":"2023"},{"key":"10.1016\/j.inffus.2026.104252_bib0013","unstructured":"N. Ravi, V. Gabeur, Y.-T. Hu, R. Hu, C. Ryali, T. Ma, H. Khedr, R. R\u00e4dle, C. Rolland, L. Gustafson, E. Mintun, J. Pan, K.V. Alwala, N. Carion, C.-Y. Wu, R. Girshick, P. Doll\u00e1r, C. Feichtenhofer, SAM 2: Segment Anything in Images and Videos, 2024. 10.48550\/arXiv.2408.00714."},{"key":"10.1016\/j.inffus.2026.104252_bib0014","unstructured":"A. Bochkovskii, A. Delaunoy, H. Germain, M. Santos, Y. Zhou, S.R. Richter, V. Koltun, Depth Pro: Sharp Monocular Metric Depth in Less Than a Second, 2024. arXiv: 2410.02073 [cs], 10.48550\/arXiv.2410.02073."},{"key":"10.1016\/j.inffus.2026.104252_sbref0015","series-title":"Depth anything: unleashing the power of large-scale unlabeled data","first-page":"10371","author":"Yang","year":"2024"},{"key":"10.1016\/j.inffus.2026.104252_bib0016","doi-asserted-by":"crossref","unstructured":"W. Hu, X. Gao, X. Li, S. Zhao, X. Cun, Y. Zhang, L. Quan, Y. Shan, DepthCrafter: Generating Consistent Long Depth Sequences for Open-world Videos, 2024. 10.48550\/arXiv.2409.02095.","DOI":"10.1109\/CVPR52734.2025.00193"},{"key":"10.1016\/j.inffus.2026.104252_bib0017","doi-asserted-by":"crossref","unstructured":"J. Shao, Y. Yang, H. Zhou, Y. Zhang, Y. Shen, V. Guizilini, Y. Wang, M. Poggi, Y. Liao, Learning Temporally Consistent Video Depth from Video Diffusion Priors, 2024. 10.48550\/arXiv.2406.01493.","DOI":"10.1109\/CVPR52734.2025.02127"},{"key":"10.1016\/j.inffus.2026.104252_bib0018","doi-asserted-by":"crossref","unstructured":"J. Xie, W. Xie, A. Zisserman, Segmenting Moving Objects via an Object-Centric Layered Representation, 2022. 10.48550\/arXiv.2207.02206.","DOI":"10.52202\/068431-2032"},{"key":"10.1016\/j.inffus.2026.104252_bib0019","doi-asserted-by":"crossref","unstructured":"J. Xie, W. Xie, A. Zisserman, Appearance-Based Refinement for Object-Centric Motion Segmentation, 2024. 10.48550\/arXiv.2312.11463.","DOI":"10.1007\/978-3-031-72933-1_14"},{"key":"10.1016\/j.inffus.2026.104252_sbref0020","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2024.102863","article-title":"3D-Guided Multi-Feature semantic enhancement network for person re-ID","volume":"117","author":"Ning","year":"2025","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104252_sbref0021","doi-asserted-by":"crossref","first-page":"46","DOI":"10.1016\/j.inffus.2021.05.002","article-title":"2D-3D geometric fusion network using multi-Neighbourhood graph convolution for RGB-D indoor scene classification","volume":"76","author":"Mosella-Montoro","year":"2021","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104252_bib0022","series-title":"2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"4104","article-title":"Structure-from-motion revisited","author":"Sch\u00f6nberger","year":"2016"},{"issue":"2","key":"10.1016\/j.inffus.2026.104252_sbref0023","doi-asserted-by":"crossref","first-page":"249","DOI":"10.1109\/TRO.2016.2623335","article-title":"SVO: semidirect visual odometry for monocular and multicamera systems","volume":"33","author":"Forster","year":"2017","journal-title":"IEEE Trans. Rob."},{"issue":"6","key":"10.1016\/j.inffus.2026.104252_sbref0024","doi-asserted-by":"crossref","first-page":"1874","DOI":"10.1109\/TRO.2021.3075644","article-title":"ORB-SLAM3: an accurate open-source library for visual, visual-inertial, and multimap SLAM","volume":"37","author":"Campos","year":"2021","journal-title":"IEEE Trans. Rob."},{"key":"10.1016\/j.inffus.2026.104252_sbref0025","series-title":"LEAP-VO: long-term effective any point tracking for visual odometry","first-page":"19844","author":"Chen","year":"2024"},{"key":"10.1016\/j.inffus.2026.104252_bib0026","series-title":"Computer Vision - ECCV 2022","first-page":"523","article-title":"ParticleSfM: exploiting dense point trajectories for localizing moving cameras in the wild","author":"Zhao","year":"2022"},{"key":"10.1016\/j.inffus.2026.104252_bib0027","unstructured":"X. Yu, W. Ye, X. Guo, Y. Ming, J. Li, H. Bao, Z. Cui, G. Zhang, D3FlowSLAM: Self-Supervised Dynamic SLAM with Flow Motion Decomposition and DINO Guidance, 2024. https:\/\/arxiv.org\/abs\/2207.08794."},{"key":"10.1016\/j.inffus.2026.104252_bib0028","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"1611","article-title":"Robust consistent video depth estimation","author":"Kopf","year":"2021"},{"key":"10.1016\/j.inffus.2026.104252_bib0029","series-title":"Computer Vision - ECCV 2022","first-page":"20","article-title":"Structure and motion from casual videos","author":"Zhang","year":"2022"},{"key":"10.1016\/j.inffus.2026.104252_bib0030","unstructured":"R. Ranftl, K. Lasinger, D. Hafner, K. Schindler, V. Koltun, Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer, 2020. arXiv: 1907.01341 [cs], 10.48550\/arXiv.1907.01341."},{"key":"10.1016\/j.inffus.2026.104252_bib0031","unstructured":"L. Yang, B. Kang, Z. Huang, Z. Zhao, X. Xu, J. Feng, H. Zhao, Depth Anything V2, 2024. 10.48550\/arXiv.2406.09414."},{"issue":"4","key":"10.1016\/j.inffus.2026.104252_bib0032","doi-asserted-by":"crossref","first-page":"71:71:1","DOI":"10.1145\/3386569.3392377","article-title":"Consistent video depth estimation","volume":"39","author":"Luo","year":"2020","journal-title":"ACM Trans. Graph."},{"issue":"4","key":"10.1016\/j.inffus.2026.104252_bib0033","doi-asserted-by":"crossref","DOI":"10.1145\/3450626.3459871","article-title":"Consistent depth of moving objects in video","volume":"40","author":"Zhang","year":"2021","journal-title":"ACM Trans. Graph."},{"key":"10.1016\/j.inffus.2026.104252_bib0034","doi-asserted-by":"crossref","unstructured":"S. Wang, V. Leroy, Y. Cabon, B. Chidlovskii, J. Revaud, DUSt3R: Geometric 3D Vision Made Easy, 2024a. 10.48550\/arXiv.2312.14132.","DOI":"10.1109\/CVPR52733.2024.01956"},{"key":"10.1016\/j.inffus.2026.104252_bib0035","doi-asserted-by":"crossref","unstructured":"R. Wang, S. Xu, C. Dai, J. Xiang, Y. Deng, X. Tong, J. Yang, MoGe: Unlocking Accurate Monocular Geometry Estimation for Open-Domain Images with Optimal Training Supervision, 2024b. arXiv: 2410.19115 [cs].","DOI":"10.1109\/CVPR52734.2025.00496"},{"key":"10.1016\/j.inffus.2026.104252_bib0036","doi-asserted-by":"crossref","unstructured":"Z. Tang, Y. Fan, D. Wang, H. Xu, R. Ranjan, A. Schwing, Z. Yan, MV-DUSt3R+: Single-Stage Scene Reconstruction from Sparse Views In 2 Seconds, 2024. arXiv: 2412.06974 [cs].","DOI":"10.1109\/CVPR52734.2025.00498"},{"key":"10.1016\/j.inffus.2026.104252_bib0037","doi-asserted-by":"crossref","unstructured":"Y. Liu, S. Dong, S. Wang, Y. Yang, Q. Fan, B. Chen, SLAM3R: Real-Time Dense Scene Reconstruction from Monocular RGB Videos, 2024. arXiv: 2412.09401 [cs], 10.48550\/arXiv.2412.09401.","DOI":"10.1109\/CVPR52734.2025.01552"},{"key":"10.1016\/j.inffus.2026.104252_bib0038","doi-asserted-by":"crossref","unstructured":"J. Yang, A. Sax, K.J. Liang, M. Henaff, H. Tang, A. Cao, J. Chai, F. Meier, M. Feiszli, Fast3R: Towards 3D Reconstruction of 1000+ Images in One Forward Pass, 2025. arXiv: 2501.13928 [cs], 10.48550\/arXiv.2501.13928.","DOI":"10.1109\/CVPR52734.2025.02042"},{"key":"10.1016\/j.inffus.2026.104252_bib0039","series-title":"2025 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"5294","article-title":"VGGT: visual geometry grounded transformer","author":"Wang","year":"2025"},{"key":"10.1016\/j.inffus.2026.104252_bib0040","doi-asserted-by":"crossref","unstructured":"S. Dong, S. Wang, S. Liu, L. Cai, Q. Fan, J. Kannala, Y. Yang, Reloc3r: Large-Scale Training of Relative Camera Pose Regression for Generalizable, Fast, and Accurate Visual Localization, 2024. arXiv: 2412.08376 [cs].","DOI":"10.1109\/CVPR52734.2025.01560"},{"key":"10.1016\/j.inffus.2026.104252_bib0041","doi-asserted-by":"crossref","unstructured":"V. Leroy, Y. Cabon, J. Revaud, Grounding Image Matching in 3D with MASt3R, 2024. arXiv: 2406.09756.","DOI":"10.1007\/978-3-031-73220-1_5"},{"key":"10.1016\/j.inffus.2026.104252_bib0042","unstructured":"B. Smart, C. Zheng, I. Laina, V.A. Prisacariu, Splatt3R: Zero-shot Gaussian Splatting from Uncalibrated Image Pairs, 2024. arXiv: 2408.13912 [cs]."},{"key":"10.1016\/j.inffus.2026.104252_bib0043","unstructured":"Z. Fan, K. Wen, W. Cong, K. Wang, J. Zhang, X. Ding, D. Xu, B. Ivanovic, M. Pavone, G. Pavlakos, Z. Wang, Y. Wang, InstantSplat: Sparse-view SfM-free Gaussian Splatting in Seconds, 2024. arXiv: 2403.20309 [cs]."},{"key":"10.1016\/j.inffus.2026.104252_bib0044","series-title":"Proceedings of the 2008 Conference on Artificial Intelligence Research and Development: Proceedings of the 11Th International Conference of the Catalan Association for Artificial Intelligence","first-page":"398","article-title":"Motion segmentation: a review","author":"Zappella","year":"2008"},{"issue":"2","key":"10.1016\/j.inffus.2026.104252_bib0045","doi-asserted-by":"crossref","first-page":"157","DOI":"10.1007\/BF01420735","article-title":"Motion segmentation and qualitative dynamic scene analysis from an image sequence","volume":"10","author":"Bouthemy","year":"1993","journal-title":"Int. J. Comput. Vis."},{"key":"10.1016\/j.inffus.2026.104252_bib0046","series-title":"Proceedings., International Conference on Image Processing","first-page":"628","article-title":"MRF-based motion segmentation exploiting a 2D motion model robust estimation","volume":"3","author":"Odobez","year":"1995"},{"key":"10.1016\/j.inffus.2026.104252_bib0047","series-title":"Proceedings of the Seventh IEEE International Conference on Computer Vision","first-page":"566","article-title":"Direct identification of moving objects and background from 2D motion models","volume":"1","author":"Csurka","year":"1999"},{"key":"10.1016\/j.inffus.2026.104252_bib0048","series-title":"Computer Vision - ECCV 2012","first-page":"860","article-title":"Detection of independently moving objects in non-planar scenes via multi-frame monocular epipolar constraint","author":"Dey","year":"2012"},{"key":"10.1016\/j.inffus.2026.104252_sbref0049","series-title":"Procedings of the British Machine Vision Conference 2017","first-page":"96","article-title":"Video segmentation with background motion models","author":"Wehrwein","year":"2017"},{"key":"10.1016\/j.inffus.2026.104252_sbref0050","series-title":"Unsupervised space-time network for temporally-consistent segmentation of multiple motions","first-page":"22139","author":"Meunier","year":"2023"},{"key":"10.1016\/j.inffus.2026.104252_bib0051","series-title":"2021 IEEE\/CVF International Conference on Computer Vision (ICCV)","first-page":"7157","article-title":"Self-supervised video object segmentation by motion grouping","author":"Yang","year":"2021"},{"key":"10.1016\/j.inffus.2026.104252_sbref0052","series-title":"DyStaB: unsupervised object segmentation via dynamic-static bootstrapping","first-page":"2826","author":"Yang","year":"2021"},{"key":"10.1016\/j.inffus.2026.104252_bib0053","series-title":"2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"7417","article-title":"Primary object segmentation in videos based on region augmentation and reduction","author":"Koh","year":"2017"},{"key":"10.1016\/j.inffus.2026.104252_bib0054","unstructured":"H. Lamdouar, W. Xie, A. Zisserman, Segmenting invisible moving objects(2021). Publisher: British Machine Vision Association,https:\/\/ora.ox.ac.uk\/objects\/uuid:0195e3ca-245c-40b9-bc77-a4a270c81cb1."},{"issue":"3","key":"10.1016\/j.inffus.2026.104252_bib0055","doi-asserted-by":"crossref","first-page":"908","DOI":"10.1109\/TCSVT.2021.3069094","article-title":"Learning clustering for motion segmentation","volume":"32","author":"Xu","year":"2022","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.inffus.2026.104252_bib0056","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV)","first-page":"9650","article-title":"Emerging properties in self-supervised vision transformers","author":"Caron","year":"2021"},{"key":"10.1016\/j.inffus.2026.104252_sbref0057","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2025.111840","article-title":"Video saliency prediction via single feature enhancement and temporal recurrence","volume":"160","author":"Zhang","year":"2025","journal-title":"Eng. Appl. Artif. Intell."},{"key":"10.1016\/j.inffus.2026.104252_bib0058","unstructured":"X. Chen, Y. Chen, Y. Xiu, A. Geiger, A. Chen, Easi3R: Estimating Disentangled Motion from DUSt3R Without Training, 2025. https:\/\/arxiv.org\/abs\/2503.243912503.24391."},{"key":"10.1016\/j.inffus.2026.104252_bib0059","unstructured":"K. Xu, T.H.E. Tse, J. Peng, A. Yao, DAS3R: Dynamics-Aware Gaussian Splatting for Static Scene Reconstruction, 2024. arXiv: 2412.19584 [cs]."},{"key":"10.1016\/j.inffus.2026.104252_bib0060","doi-asserted-by":"crossref","unstructured":"T. Zhou, S. Wang, Y. Zhou, Y. Yao, J. Li, L. Shao, Motion-Attentive Transition for Zero-Shot Video Object Segmentation, 2020. arXiv: 2003.04253, 10.48550\/arXiv.2003.04253.","DOI":"10.1109\/TIP.2020.3013162"},{"key":"10.1016\/j.inffus.2026.104252_sbref0061","series-title":"See more, know more: unsupervised video object segmentation with co-attention siamese networks","first-page":"3623","author":"Lu","year":"2019"},{"key":"10.1016\/j.inffus.2026.104252_bib0062","doi-asserted-by":"crossref","unstructured":"G. Pei, F. Shen, Y. Yao, G.-S. Xie, Z. Tang, J. Tang, Hierarchical Feature Alignment Network for Unsupervised Video Object Segmentation, 2022. arXiv: 2207.08485 [cs], 10.48550\/arXiv.2207.08485.","DOI":"10.1007\/978-3-031-19830-4_34"},{"key":"10.1016\/j.inffus.2026.104252_sbref0063","series-title":"Unsupervised moving object detection via contextual information separation","first-page":"879","author":"Yang","year":"2019"},{"key":"10.1016\/j.inffus.2026.104252_sbref0064","series-title":"Self-supervised segmentation by grouping optical-flow","author":"Mahendran","year":"2018"},{"key":"10.1016\/j.inffus.2026.104252_bib0065","series-title":"2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"8709","article-title":"Locality-aware inter-and intra-video reconstruction for self-supervised correspondence learning","author":"Li","year":"2022"},{"key":"10.1016\/j.inffus.2026.104252_bib0066","series-title":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"18706","article-title":"Unified mask embedding and correspondence learning for self-supervised video segmentation","author":"Li","year":"2023"},{"key":"10.1016\/j.inffus.2026.104252_bib0067","unstructured":"G. Pei, Y. Yao, J. Jiao, W. Wang, L. Nie, J. Tang, Dynamic in static: hybrid visual correspondence for self-supervised video object segmentation, arXiv preprint arXiv: 2404.13505(2024)."},{"key":"10.1016\/j.inffus.2026.104252_bib0068","unstructured":"A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly, J. Uszkoreit, N. Houlsby, An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale, 2021. arXiv: 2010.11929, 10.48550\/arXiv.2010.11929."},{"key":"10.1016\/j.inffus.2026.104252_bib0069","series-title":"2016 Fourth International Conference on 3D Vision (3DV)","first-page":"239","article-title":"Deeper depth prediction with fully convolutional residual networks","author":"Laina","year":"2016"},{"key":"10.1016\/j.inffus.2026.104252_bib0070","doi-asserted-by":"crossref","unstructured":"S. Cho, M. Lee, S. Lee, C. Park, D. Kim, S. Lee, Treating Motion as Option to Reduce Motion Dependency in Unsupervised Video Object Segmentation, 2022. arXiv: 2209.03138, 10.48550\/arXiv.2209.03138.","DOI":"10.1109\/WACV56688.2023.00511"},{"key":"10.1016\/j.inffus.2026.104252_bib0071","unstructured":"A. Dave, P. Tokmakov, D. Ramanan, Towards Segmenting Anything That Moves, 2020. arXiv: 1902.03715, http:\/\/arxiv.org\/abs\/1902.03715."},{"key":"10.1016\/j.inffus.2026.104252_bib0072","doi-asserted-by":"crossref","unstructured":"W. Jang, P. Weinzaepfel, V. Leroy, L. Agapito, J. Revaud, Pow3R: Empowering Unconstrained 3D Reconstruction with Camera and Scene Priors, 2025. arXiv: 2503.17316 [cs], 10.48550\/arXiv.2503.17316.","DOI":"10.1109\/CVPR52734.2025.00108"},{"key":"10.1016\/j.inffus.2026.104252_bib0073","unstructured":"G. Lin, A. Milan, C. Shen, I. Reid, RefineNet: Multi-Path Refinement Networks for High-Resolution Semantic Segmentation, 2016. http:\/\/arxiv.org\/abs\/1611.06612."},{"key":"10.1016\/j.inffus.2026.104252_bib0074","series-title":"2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"311","article-title":"Monocular relative depth perception with web stereo data supervision","author":"Xian","year":"2018"},{"key":"10.1016\/j.inffus.2026.104252_bib0075","doi-asserted-by":"crossref","unstructured":"R. Ranftl, A. Bochkovskiy, V. Koltun, Vision Transformers for Dense Prediction, 2021. 10.48550\/arXiv.2103.13413.","DOI":"10.1109\/ICCV48922.2021.01196"},{"key":"10.1016\/j.inffus.2026.104252_bib0076","series-title":"2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"724","article-title":"A benchmark dataset and evaluation methodology for video object segmentation","author":"Perazzi","year":"2016"},{"key":"10.1016\/j.inffus.2026.104252_sbref0077","series-title":"Video segmentation by tracking many figure-ground segments","first-page":"2192","author":"Li","year":"2013"},{"issue":"6","key":"10.1016\/j.inffus.2026.104252_bib0078","doi-asserted-by":"crossref","first-page":"1187","DOI":"10.1109\/TPAMI.2013.242","article-title":"Segmentation of moving objects by long term video analysis","volume":"36","author":"Ochs","year":"2014","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.inffus.2026.104252_bib0079","doi-asserted-by":"crossref","DOI":"10.1177\/0278364913491297","article-title":"Vision meets robotics: the KITTI dataset","author":"Geiger","year":"2013","journal-title":"International Journal of Robotics Research (IJRR)"},{"key":"10.1016\/j.inffus.2026.104252_bib0080","unstructured":"N. Carion, L. Gustafson, Y.-T. Hu, S. Debnath, R. Hu, D. Suris, C. Ryali, K.V. Alwala, H. Khedr, A. Huang, J. Lei, T. Ma, B. Guo, A. Kalla, M. Marks, J. Greer, M. Wang, P. Sun, R. R\u00e4dle, T. Afouras, E. Mavroudi, K. Xu, T.-H. Wu, Y. Zhou, L. Momeni, R. Hazra, S. Ding, S. Vaze, F. Porcher, F. Li, S. Li, A. Kamath, H.K. Cheng, P. Doll\u00e1r, N. Ravi, K. Saenko, P. Zhang, C. Feichtenhofer, SAM 3: Segment Anything with Concepts, 2025. https:\/\/arxiv.org\/abs\/2511.16719."},{"key":"10.1016\/j.inffus.2026.104252_bib0081","series-title":"2019 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","first-page":"7855","article-title":"ReFusion: 3D reconstruction in dynamic environments for RGB-D cameras exploiting residuals","author":"Palazzolo","year":"2019"},{"key":"10.1016\/j.inffus.2026.104252_bib0082","unstructured":"J. Pont-Tuset, F. Perazzi, S. Caelles, P. Arbel\u00e1ez, A. Sorkine-Hornung, L.V. Gool, The 2017\u202fDAVIS Challenge on Video Object Segmentation, 2018. arXiv: 1704.00675 [cs]."},{"key":"10.1016\/j.inffus.2026.104252_bib0083","unstructured":"C. Wang, C. Li, B. Luo, W. Wang, J. Liu, RiWNet: A moving object instance segmentation Network being Robust in adverse Weather conditions, 2021. arXiv: 2109.01820."},{"key":"10.1016\/j.inffus.2026.104252_bib0084","doi-asserted-by":"crossref","unstructured":"N. Xu, L. Yang, Y. Fan, D. Yue, Y. Liang, J. Yang, T. Huang, YouTube-VOS: A Large-Scale Video Object Segmentation Benchmark, 2018. arXiv: 1809.03327 [cs], 10.48550\/arXiv.1809.03327.","DOI":"10.1007\/978-3-030-01228-1_36"},{"key":"10.1016\/j.inffus.2026.104252_bib0085","unstructured":"Y. Cabon, N. Murray, M. Humenberger, Virtual KITTI 2, 2020. arXiv: 2001.10773 [cs]."},{"key":"10.1016\/j.inffus.2026.104252_bib0086","doi-asserted-by":"crossref","unstructured":"Z. Teed, J. Deng, RAFT: Recurrent All-Pairs Field Transforms for Optical Flow, 2020. arXiv: 2003.12039 [cs].","DOI":"10.1007\/978-3-030-58536-5_24"},{"key":"10.1016\/j.inffus.2026.104252_bib0087","doi-asserted-by":"crossref","unstructured":"Y. Wang, L. Lipson, J. Deng, SEA-RAFT: Simple, Efficient, Accurate RAFT for Optical Flow, 2024. arxiv: 2405.14793 [cs].","DOI":"10.1007\/978-3-031-72667-5_3"},{"key":"10.1016\/j.inffus.2026.104252_bib0088","series-title":"2012 IEEE\/RSJ International Conference on Intelligent Robots and Systems","first-page":"573","article-title":"A benchmark for the evaluation of RGB-D SLAM systems","author":"Sturm","year":"2012"},{"key":"10.1016\/j.inffus.2026.104252_bib0089","unstructured":"Y.W.L.W.L.Z.Y.L.C.X.W.Z.J.T.Y.Z.Y.P.S.J.Y.Z.Z. Deng;, Deeply seeking boundary for lunar regolith segmentation. 10.12074\/202512.00315."}],"container-title":["Information Fusion"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1566253526001314?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1566253526001314?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T20:45:40Z","timestamp":1774039540000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1566253526001314"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,9]]},"references-count":89,"alternative-id":["S1566253526001314"],"URL":"https:\/\/doi.org\/10.1016\/j.inffus.2026.104252","relation":{},"ISSN":["1566-2535"],"issn-type":[{"value":"1566-2535","type":"print"}],"subject":[],"published":{"date-parts":[[2026,9]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"MuBe4D: A mutual benefit framework for generalizable motion segmentation and geometry-first 4D reconstruction","name":"articletitle","label":"Article Title"},{"value":"Information Fusion","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.inffus.2026.104252","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Published by Elsevier B.V.","name":"copyright","label":"Copyright"}],"article-number":"104252"}}