{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T17:38:58Z","timestamp":1770917938030,"version":"3.50.1"},"reference-count":118,"publisher":"Springer Science and Business Media LLC","issue":"8","license":[{"start":{"date-parts":[[2025,4,9]],"date-time":"2025-04-09T00:00:00Z","timestamp":1744156800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,4,9]],"date-time":"2025-04-09T00:00:00Z","timestamp":1744156800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62322604"],"award-info":[{"award-number":["62322604"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62176159"],"award-info":[{"award-number":["62176159"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Natural Science Foundation of Shanghai","award":["21ZR1432200"],"award-info":[{"award-number":["21ZR1432200"]}]},{"name":"Shanghai Municipal Science and Technology Major Project","award":["2021SHZDZX0102"],"award-info":[{"award-number":["2021SHZDZX0102"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2025,8]]},"DOI":"10.1007\/s11263-025-02421-7","type":"journal-article","created":{"date-parts":[[2025,4,9]],"date-time":"2025-04-09T16:42:51Z","timestamp":1744216971000},"page":"5138-5160","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["Segment Anything in 3D with Radiance Fields"],"prefix":"10.1007","volume":"133","author":[{"given":"Jiazhong","family":"Cen","sequence":"first","affiliation":[]},{"given":"Jiemin","family":"Fang","sequence":"additional","affiliation":[]},{"given":"Zanwei","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Chen","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Lingxi","family":"Xie","sequence":"additional","affiliation":[]},{"given":"Xiaopeng","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Shen","sequence":"additional","affiliation":[]},{"given":"Qi","family":"Tian","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,4,9]]},"reference":[{"key":"2421_CR1","doi-asserted-by":"publisher","first-page":"2481","DOI":"10.1109\/TPAMI.2016.2644615","volume":"39","author":"V Badrinarayanan","year":"2017","unstructured":"Badrinarayanan, V., Kendall, A., & Cipolla, R. (2017). Segnet: A deep convolutional encoder-decoder architecture for image segmentation. IEEE TPAMI, 39, 2481\u20132495.","journal-title":"IEEE TPAMI"},{"key":"2421_CR2","doi-asserted-by":"crossref","unstructured":"Barron, JT., Mildenhall, B., Verbin, D., Srinivasan, PP., & Hedman, P. (2022). Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In: CVPR, pp 5460\u20135469","DOI":"10.1109\/CVPR52688.2022.00539"},{"key":"2421_CR3","unstructured":"Bing, W., Chen, L., & Yang, B. (2023) Dm-nerf: 3d scene geometry decomposition and manipulation from 2d images. In: ICLR"},{"key":"2421_CR4","doi-asserted-by":"crossref","unstructured":"Caron, M., Touvron, H., Misra, I., J\u00e9gou, H., Mairal, J., Bojanowski, P., & Joulin, A. (2021) Emerging properties in self-supervised vision transformers. In: ICCV, pp 9630\u20139640","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"2421_CR5","unstructured":"Cen, J., Fang, J., Yang, C., Xie, L., Zhang, X., Shen, W., & Tian, Q. (2023a) Segment any 3d gaussians. arXiv preprint arXiv:2312.00860"},{"key":"2421_CR6","unstructured":"Cen, J., Zhou, Z., Fang, J., yang, c., Shen, W., Xie, L., Jiang, D., ZHANG, X., & Tian, Q. (2023b) Segment anything in 3d with nerfs. In: NeurIPS"},{"key":"2421_CR7","first-page":"333","volume":"13692","author":"A Chen","year":"2022","unstructured":"Chen, A., Xu, Z., Geiger, A., Yu, J., & Su, H. (2022). Tensorf: Tensorial radiance fields. ECCV, 13692, 333\u2013350.","journal-title":"ECCV"},{"key":"2421_CR8","unstructured":"Chen, G., & Wang, W. (2024) A survey on 3d gaussian splatting. arXiv preprint arXiv:2401.03890"},{"key":"2421_CR9","first-page":"1","volume":"62","author":"K Chen","year":"2024","unstructured":"Chen, K., Liu, C., Chen, H., Zhang, H., Li, W., Zou, Z., & Shi, Z. (2024). Rsprompter: Learning to prompt for remote sensing instance segmentation based on visual foundation model. IEEE TGRS, 62, 1\u201317.","journal-title":"IEEE TGRS"},{"key":"2421_CR10","doi-asserted-by":"publisher","first-page":"834","DOI":"10.1109\/TPAMI.2017.2699184","volume":"40","author":"L Chen","year":"2018","unstructured":"Chen, L., Papandreou, G., Kokkinos, I., Murphy, K., & Yuille, A. L. (2018). Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. IEEE TPAMI, 40, 834\u2013848.","journal-title":"IEEE TPAMI"},{"key":"2421_CR11","doi-asserted-by":"crossref","unstructured":"Chen, X., Zhao, Z., Zhang, Y., Duan, M., Qi, D., & Zhao, H. (2022b) Focalclick: Towards practical interactive image segmentation. In: CVPR, pp 1290\u20131299","DOI":"10.1109\/CVPR52688.2022.00136"},{"key":"2421_CR12","unstructured":"Cheng, B., Schwing, AG., & Kirillov, A. (2021) Per-pixel classification is not all you need for semantic segmentation. In: NeurIPS"},{"key":"2421_CR13","doi-asserted-by":"crossref","unstructured":"Cheng, B., Misra, I., Schwing, AG., Kirillov, A., & Girdhar, R. (2022) Masked-attention mask transformer for universal image segmentation. In: CVPR, pp 1280\u20131289","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"2421_CR14","doi-asserted-by":"crossref","unstructured":"Chu, H., Ma, W.C., Kundu, K., Urtasun, R., & Fidler, S. (2018) Surfconv: Bridging 3d and 2d convolution for rgbd images. In: CVPR, pp 3002\u20133011","DOI":"10.1109\/CVPR.2018.00317"},{"key":"2421_CR15","unstructured":"Deng, R., Cui, C., Liu, Q., Yao, T., Remedios, LW., Bao, S., Landman, BA., Wheless, LE., Coburn, LA., & Wilson, KT., et al. (2023) Segment anything model (sam) for digital pathology: Assess zero-shot segmentation on whole slide imaging. arXiv preprint arXiv:2304.04155"},{"key":"2421_CR16","doi-asserted-by":"crossref","unstructured":"Ding, R., Yang, J., Xue, C., Zhang, W., Bai, S., & Qi, X. (2023) Pla: Language-driven open-vocabulary 3d scene understanding. In: CVPR, pp 7010\u20137019","DOI":"10.1109\/CVPR52729.2023.00677"},{"key":"2421_CR17","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., & Houlsby, N. (2021) An image is worth 16x16 words: Transformers for image recognition at scale. In: ICLR"},{"key":"2421_CR18","doi-asserted-by":"crossref","unstructured":"Duan, Y., Wei, F., Dai, Q., He, Y., Chen, W., & Chen, B. (2024) 4d gaussian splatting: Towards efficient novel view synthesis for dynamic scenes. In: ICLR","DOI":"10.1145\/3641519.3657463"},{"key":"2421_CR19","doi-asserted-by":"publisher","first-page":"3252","DOI":"10.1007\/s11263-023-01862-2","volume":"131","author":"J Fan","year":"2023","unstructured":"Fan, J., & Zhang, Z. (2023). Toward practical weakly supervised semantic segmentation via point-level supervision. International Journal of Computer Vision, 131, 3252\u20133271.","journal-title":"International Journal of Computer Vision"},{"key":"2421_CR20","unstructured":"Fan, Z., Wang, P., Jiang, Y., Gong, X., Xu, D., & Wang, Z. (2023) Nerf-sos: Any-view self-supervised object segmentation on complex scenes. In: ICLR"},{"key":"2421_CR21","doi-asserted-by":"crossref","unstructured":"Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., & Kanazawa, A. (2022) Plenoxels: Radiance fields without neural networks. In: CVPR, pp 5491\u20135500","DOI":"10.1109\/CVPR52688.2022.00542"},{"key":"2421_CR22","doi-asserted-by":"crossref","unstructured":"Fu, X., Zhang, S., Chen, T., Lu, Y., Zhu, L., Zhou, X., Geiger, A., & Liao, Y. (2022) Panoptic nerf: 3d-to-2d label transfer for panoptic urban scene segmentation. In: 3DV, pp 1\u201311","DOI":"10.1109\/3DV57658.2022.00042"},{"key":"2421_CR23","doi-asserted-by":"crossref","unstructured":"Garbin, S.J., Kowalski, M., Johnson, M., Shotton, J., & Valentin, J. (2021) Fastnerf: High-fidelity neural rendering at 200fps. In: ICCV, pp 14326\u201314335","DOI":"10.1109\/ICCV48922.2021.01408"},{"key":"2421_CR24","doi-asserted-by":"crossref","unstructured":"Goel, R., Sirikonda, D., Saini, S., & Narayanan, P. (2023) Interactive segmentation of radiance fields. In: CVPR, pp 4201\u20134211","DOI":"10.1109\/CVPR52729.2023.00409"},{"key":"2421_CR25","first-page":"1968","volume":"7","author":"N Gosala","year":"2022","unstructured":"Gosala, N., & Valada, A. (2022). Bird\u2019s-eye-view panoptic segmentation using monocular frontal view images. IEEE RA-L, 7, 1968\u20131975.","journal-title":"IEEE RA-L"},{"key":"2421_CR26","first-page":"643","volume":"205","author":"H Ha","year":"2022","unstructured":"Ha, H., & Song, S. (2022). Semantic abstraction: Open-world 3d scene understanding from 2d vision-language models. CoRL, 205, 643\u2013653.","journal-title":"CoRL"},{"key":"2421_CR27","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., & Girshick, R.B. (2017) Mask R-CNN. In: ICCV, pp 2980\u20132988","DOI":"10.1109\/ICCV.2017.322"},{"key":"2421_CR28","doi-asserted-by":"crossref","unstructured":"Huang, J., & You, S. (2016) Point cloud labeling using 3d convolutional neural network. In: ICPR, pp 2670\u20132675","DOI":"10.1109\/ICPR.2016.7900038"},{"key":"2421_CR29","doi-asserted-by":"publisher","first-page":"1310","DOI":"10.1007\/s11263-023-01936-1","volume":"132","author":"N Jain","year":"2024","unstructured":"Jain, N., Kumar, S., & Van Gool, L. (2024). Learning robust multi-scale representation for neural radiance fields from unposed images. International Journal of Computer Vision, 132, 1310\u20131335.","journal-title":"International Journal of Computer Vision"},{"key":"2421_CR30","doi-asserted-by":"crossref","unstructured":"Jatavallabhula, KM., Kuwajerwala, A., Gu, Q., Omama, M., Chen, T., Li, S., Iyer, G., Saryazdi, S., Keetha, N., & Tewari, A., et al. (2023) Conceptfusion: Open-set multimodal 3d mapping. In: RSS","DOI":"10.15607\/RSS.2023.XIX.066"},{"key":"2421_CR31","doi-asserted-by":"crossref","unstructured":"Ji, GP., Fan, DP., Xu, P., Zhou, B., Cheng, MM., & Van\u00a0Gool, L. (2023) Sam struggles in concealed scenes\u2013empirical study on segment anything. SCIS 66(12)","DOI":"10.1007\/s11432-023-3881-x"},{"key":"2421_CR32","unstructured":"Katsumata, K., Vo, D.M., & Nakayama, H. (2023) An efficient 3d gaussian representation for monocular\/multi-view dynamic scenes. arXiv preprint arXiv:2311.12897"},{"key":"2421_CR33","unstructured":"Ke, L., Ye, M., Danelljan, M., Tai, YW., Tang, CK., & Yu, F., et al. (2023) Segment anything in high quality. In: NeurIPS"},{"key":"2421_CR34","doi-asserted-by":"crossref","unstructured":"Kerbl, B., Kopanas, G., Leimk\u00fchler, T., & Drettakis, G. (2023) 3d gaussian splatting for real-time radiance field rendering. ACM TOG 42:139:1\u2013139:14","DOI":"10.1145\/3592433"},{"key":"2421_CR35","doi-asserted-by":"crossref","unstructured":"Kerr, J., Kim, CM., Goldberg, K., Kanazawa, A., & Tancik, M. (2023) Lerf: Language embedded radiance fields. In: ICCV, pp 19672\u201319682","DOI":"10.1109\/ICCV51070.2023.01807"},{"key":"2421_CR36","doi-asserted-by":"crossref","unstructured":"Kim, CM., Wu, M., Kerr, J., Goldberg, K., Tancik, M., & Kanazawa, A. (2024) Garfield: Group anything with radiance fields. In: CVPR, pp 21530\u201321539","DOI":"10.1109\/CVPR52733.2024.02034"},{"key":"2421_CR37","doi-asserted-by":"crossref","unstructured":"Kirillov, A., He, K., Girshick, RB., Rother, C., & Doll\u00e1r, P. (2019) Panoptic segmentation. In: CVPR, pp 9404\u20139413","DOI":"10.1109\/CVPR.2019.00963"},{"key":"2421_CR38","doi-asserted-by":"crossref","unstructured":"Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, AC., & Lo, WY., et al. (2023) Segment anything. In: ICCV, pp 3992\u20134003","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"2421_CR39","doi-asserted-by":"crossref","unstructured":"Knapitsch, A., Park, J., Zhou, QY., & Koltun, V. (2017) Tanks and temples: Benchmarking large-scale scene reconstruction. ACM TOG 36:78:1\u201378:13","DOI":"10.1145\/3072959.3073599"},{"key":"2421_CR40","unstructured":"Kobayashi, S., Matsumoto, E., & Sitzmann, V. (2022) Decomposing nerf for editing via feature field distillation. In: NeurIPS"},{"key":"2421_CR41","doi-asserted-by":"crossref","unstructured":"Lee, B., Lee, H., Sun, X., Ali, U., & Park, E. (2024) Deblurring 3d gaussian splatting. arXiv preprint arXiv:2401.00834","DOI":"10.1007\/978-3-031-73636-0_8"},{"key":"2421_CR42","doi-asserted-by":"crossref","unstructured":"Li, L., Wang, W., & Yang, Y. (2023a) Logicseg: Parsing visual semantics with neural logic learning and reasoning. In: ICCV, pp 4122\u20134133","DOI":"10.1109\/ICCV51070.2023.00381"},{"key":"2421_CR43","doi-asserted-by":"crossref","unstructured":"Li, L., Wang, W., Zhou, T., Quan, R., & Yang, Y. (2023). Semantic hierarchy-aware segmentation. IEEE TPAMI, 46, 2123\u20132138.","DOI":"10.1109\/TPAMI.2023.3332435"},{"key":"2421_CR44","unstructured":"Liang, R., Zhang, J., Li, H., Yang, C., Guan, Y., & Vijaykumar, N. (2022) Spidr: Sdf-based neural point fields for illumination and deformation. arXiv preprint arXiv:2210.08398"},{"key":"2421_CR45","unstructured":"Lin, Y., Florence, P., Barron, JT., Lin, T., Rodriguez, A., & Isola, P. (2022) Nerf-supervision: Learning dense object descriptors from neural radiance fields. In: ICRA, pp 6496\u20136503"},{"key":"2421_CR46","doi-asserted-by":"crossref","unstructured":"Liu, M., Zhu, Y., Cai, H., Han, S., Ling, Z., Porikli, F., & Su, H. (2023a) Partslip: Low-shot part segmentation for 3d point clouds via pretrained image-language models. In: CVPR, pp 21736\u201321746","DOI":"10.1109\/CVPR52729.2023.02082"},{"key":"2421_CR47","doi-asserted-by":"crossref","unstructured":"Liu, Q., Xu, Z., Bertasius, G., & Niethammer, M. (2023b) Simpleclick: Interactive image segmentation with simple vision transformers. In: ICCV, pp 22233\u201322243","DOI":"10.1109\/ICCV51070.2023.02037"},{"key":"2421_CR48","first-page":"38","volume":"15105","author":"S Liu","year":"2024","unstructured":"Liu, S., Zeng, Z., Ren, T., Li, F., Zhang, H., Yang, J., Jiang, Q., Li, C., Yang, J., Su, H., et al. (2024). Grounding dino: Marrying dino with grounded pre-training for open-set object detection. ECCV, 15105, 38\u201355.","journal-title":"ECCV"},{"key":"2421_CR49","unstructured":"Liu, X., Chen, J., Yu, H., Tai, Y., & Tang, C. (2022) Unsupervised multi-view object segmentation using radiance field propagation. In: NeurIPS"},{"key":"2421_CR50","doi-asserted-by":"crossref","unstructured":"Liu, Y., Hu, B., Huang, J., Tai, YW., & Tang, CK. (2023c) Instance neural radiance field. In: ICCV, pp 787\u2013796","DOI":"10.1109\/ICCV51070.2023.00079"},{"key":"2421_CR51","unstructured":"Liu, Z., Tang, H., Lin, Y., & Han, S. (2019) Point-voxel cnn for efficient 3d deep learning. In: NeurIPS"},{"key":"2421_CR52","doi-asserted-by":"crossref","unstructured":"Luiten, J., Kopanas, G., Leibe, B., & Ramanan, D. (2024) Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In: 3DV, pp 800\u2013809","DOI":"10.1109\/3DV62453.2024.00044"},{"key":"2421_CR53","doi-asserted-by":"publisher","first-page":"654","DOI":"10.1038\/s41467-024-44824-z","volume":"15","author":"J Ma","year":"2024","unstructured":"Ma, J., He, Y., Li, F., Han, L., You, C., & Wang, B. (2024). Segment anything in medical images. Nature Communications, 15, 654.","journal-title":"Nature Communications"},{"key":"2421_CR54","volume":"89","author":"MA Mazurowski","year":"2023","unstructured":"Mazurowski, M. A., Dong, H., Gu, H., Yang, J., Konz, N., & Zhang, Y. (2023). Segment anything model for medical image analysis: an experimental study. MedIA, 89, 102918.","journal-title":"MedIA"},{"key":"2421_CR55","doi-asserted-by":"crossref","unstructured":"Mildenhall, B., Srinivasan, PP., Cayon, RO., Kalantari, N.K., Ramamoorthi, R., Ng, R., & Kar, A. (2019) Local light field fusion: practical view synthesis with prescriptive sampling guidelines. ACM TOG 38:29:1\u201329:14","DOI":"10.1145\/3306346.3322980"},{"key":"2421_CR56","first-page":"405","volume":"12346","author":"B Mildenhall","year":"2020","unstructured":"Mildenhall, B., Srinivasan, P. P., Tancik, M., Barron, J. T., Ramamoorthi, R., & Ng, R. (2020). Nerf: Representing scenes as neural radiance fields for view synthesis. ECCV, 12346, 405\u2013421.","journal-title":"ECCV"},{"key":"2421_CR57","doi-asserted-by":"crossref","unstructured":"Mirzaei, A., Aumentado-Armstrong, T., Derpanis, KG., Kelly, J., Brubaker, MA., Gilitschenski, I., & Levinshtein, A. (2023) SPIn-NeRF: Multiview segmentation and perceptual inpainting with neural radiance fields. In: CVPR, pp 20669\u201320679","DOI":"10.1109\/CVPR52729.2023.01980"},{"key":"2421_CR58","doi-asserted-by":"crossref","unstructured":"M\u00fcller, T., Evans, A., Schied, C., & Keller, A. (2022) Instant neural graphics primitives with a multiresolution hash encoding. ACM TOG 41:102:1\u2013102:15","DOI":"10.1145\/3528223.3530127"},{"key":"2421_CR59","first-page":"45","volume":"40","author":"T Neff","year":"2021","unstructured":"Neff, T., Stadlbauer, P., Parger, M., Kurz, A., Mueller, J. H., Chaitanya, C. R. A., Kaplanyan, A., & Steinberger, M. (2021). Donerf: Towards real-time rendering of compact neural radiance fields using depth oracle networks. EGSR, 40, 45\u201359.","journal-title":"EGSR"},{"key":"2421_CR60","unstructured":"Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., Desmaison, A., K\u00f6pf, A., Yang, E.Z., DeVito, Z., Raison, M., Tejani, A., Chilamkurthy, S., Steiner, B., Fang, L., Bai, J., & Chintala, S. (2019) Pytorch: An imperative style, high-performance deep learning library. In: NeurIPS"},{"key":"2421_CR61","doi-asserted-by":"crossref","unstructured":"Peng, S., Genova, K., Jiang, C., Tagliasacchi, A., Pollefeys, M., & Funkhouser, T., et al. (2023) Openscene: 3d scene understanding with open vocabularies. In: CVPR, pp 815\u2013824","DOI":"10.1109\/CVPR52729.2023.00085"},{"key":"2421_CR62","doi-asserted-by":"crossref","unstructured":"Peng, Z., Xu, Z., Zeng, Z., Xie, L., Tian, Q., & Shen, W. (2024a) Parameter efficient fine-tuning via cross block orchestration for segment anything model. In: CVPR, pp 3743\u20133752","DOI":"10.1109\/CVPR52733.2024.00359"},{"key":"2421_CR63","doi-asserted-by":"crossref","unstructured":"Peng, Z., Xu, Z., Zeng, Z., Yang, X., & Shen, W. (2024b) Sam-parser: Fine-tuning sam efficiently by parameter space reconstruction. In: AAAI, pp 4515\u20134523","DOI":"10.1609\/aaai.v38i5.28250"},{"key":"2421_CR64","doi-asserted-by":"crossref","unstructured":"Qi, CR., Su, H., Mo, K., & Guibas, LJ. (2017a) Pointnet: Deep learning on point sets for 3d classification and segmentation. In: CVPR, pp 77\u201385","DOI":"10.1109\/CVPR.2017.16"},{"key":"2421_CR65","unstructured":"Qi, CR., Yi, L., Su, H., & Guibas, LJ. (2017b) Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In: NeurIPS"},{"key":"2421_CR66","doi-asserted-by":"crossref","unstructured":"Qin, M., Li, W., Zhou, J., Wang, H., & Pfister, H. (2024) Langsplat: 3d language gaussian splatting. In: CVPR, pp 20051\u201320060","DOI":"10.1109\/CVPR52733.2024.01895"},{"key":"2421_CR67","first-page":"8748","volume":"139","author":"A Radford","year":"2021","unstructured":"Radford, A., Kim, J. W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., Krueger, G., & Sutskever, I. (2021). Learning transferable visual models from natural language supervision. ICML, 139, 8748\u20138763.","journal-title":"ICML"},{"key":"2421_CR68","doi-asserted-by":"crossref","unstructured":"Reiser, C., Peng, S., Liao, Y., & Geiger, A. (2021) Kilonerf: Speeding up neural radiance fields with thousands of tiny mlps. In: ICCV, pp 14335\u201314345","DOI":"10.1109\/ICCV48922.2021.01407"},{"key":"2421_CR69","doi-asserted-by":"crossref","unstructured":"Ren, Z., Agarwala, A., Russell, BC., Schwing, AG., & Wang, O. (2022) Neural volumetric object selection. In: CVPR, pp 6123\u20136132","DOI":"10.1109\/CVPR52688.2022.00604"},{"key":"2421_CR70","doi-asserted-by":"crossref","unstructured":"Sch\u00f6nberger, JL., & Frahm, JM. (2016) Structure-from-motion revisited. In: CVPR, pp 4104\u20134113","DOI":"10.1109\/CVPR.2016.445"},{"key":"2421_CR71","first-page":"501","volume":"9907","author":"JL Sch\u00f6nberger","year":"2016","unstructured":"Sch\u00f6nberger, J. L., Zheng, E., Pollefeys, M., & Frahm, J. M. (2016). Pixelwise view selection for unstructured multi-view stereo. ECCV, 9907, 501\u2013518.","journal-title":"ECCV"},{"key":"2421_CR72","doi-asserted-by":"publisher","first-page":"640","DOI":"10.1109\/TPAMI.2016.2572683","volume":"39","author":"E Shelhamer","year":"2017","unstructured":"Shelhamer, E., Long, J., & Darrell, T. (2017). Fully convolutional networks for semantic segmentation. IEEE TPAMI, 39, 640\u2013651.","journal-title":"IEEE TPAMI"},{"key":"2421_CR73","doi-asserted-by":"publisher","first-page":"9284","DOI":"10.1109\/TPAMI.2023.3246102","volume":"45","author":"W Shen","year":"2023","unstructured":"Shen, W., Peng, Z., Wang, X., Wang, H., Cen, J., Jiang, D., Xie, L., Yang, X., & Tian, Q. (2023). A survey on label-efficient deep image segmentation: Bridging the gap between weak supervision and dense prediction. IEEE TPAMI, 45, 9284\u20139305.","journal-title":"IEEE TPAMI"},{"key":"2421_CR74","doi-asserted-by":"crossref","unstructured":"Sofiiuk, K., Petrov, IA., & Konushin, A. (2022) Reviving iterative training with mask guidance for interactive segmentation. In: ICIP, pp 3141\u20133145","DOI":"10.1109\/ICIP46576.2022.9897365"},{"key":"2421_CR75","unstructured":"Stelzner, K., Kersting, K., & Kosiorek, AR. (2021) Decomposing 3d scenes into objects via unsupervised volume segmentation. arXiv preprint arXiv:2104.01148"},{"key":"2421_CR76","unstructured":"d Straub, J., Whelan, T., Ma, L., Chen, Y., Wijmans, E., Green, S., Engel, JJ., Mur-Artal, R., Ren, C., & Verma, S., et al. (2019) The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797"},{"key":"2421_CR77","doi-asserted-by":"crossref","unstructured":"Strudel, R., Pinel, RG., Laptev, I., & Schmid, C. (2021) Segmenter: Transformer for semantic segmentation. In: ICCV, pp 7242\u20137252","DOI":"10.1109\/ICCV48922.2021.00717"},{"key":"2421_CR78","doi-asserted-by":"crossref","unstructured":"Sun, C., Sun, M., & Chen, H. (2022a) Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In: CVPR, pp 5449\u20135459","DOI":"10.1109\/CVPR52688.2022.00538"},{"key":"2421_CR79","doi-asserted-by":"crossref","unstructured":"Sun, C., Sun, M., & Chen, H. (2022b) Improved direct voxel grid optimization for radiance fields reconstruction. arXiv preprint arXiv:2212.13545","DOI":"10.1109\/CVPR52688.2022.00538"},{"key":"2421_CR80","first-page":"685","volume":"12373","author":"H Tang","year":"2020","unstructured":"Tang, H., Liu, Z., Zhao, S., Lin, Y., Lin, J., Wang, H., & Han, S. (2020). Searching efficient 3d architectures with sparse point-voxel convolution. ECCV, 12373, 685\u2013702.","journal-title":"ECCV"},{"key":"2421_CR81","unstructured":"Tang, L., Xiao, H., & Li, B. (2023) Can sam segment anything? when sam meets camouflaged object detection. arXiv preprint arXiv:2304.04709"},{"key":"2421_CR82","doi-asserted-by":"crossref","unstructured":"Tschernezki, V., Laina, I., Larlus, D., & Vedaldi, A. (2022) Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In: 3DV, pp 443\u2013453","DOI":"10.1109\/3DV57658.2022.00056"},{"key":"2421_CR83","unstructured":"d Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, AN., Kaiser, L., & Polosukhin, I. (2017) Attention is all you need. In: NeurIPS"},{"key":"2421_CR84","unstructured":"Vora, S., Radwan, N., Greff, K., Meyer, H., Genova, K., Sajjadi, MS., Pot, E., Tagliasacchi, A., & Duckworth, D. (2022) Nesf: Neural semantic fields for generalizable semantic segmentation of 3d scenes. TMLR 2022"},{"key":"2421_CR85","first-page":"144","volume":"11215","author":"W Wang","year":"2018","unstructured":"Wang, W., & Neumann, U. (2018). Depth-aware cnn for rgb-d segmentation. ECCV, 11215, 144\u2013161.","journal-title":"ECCV"},{"key":"2421_CR86","doi-asserted-by":"publisher","first-page":"10835","DOI":"10.1109\/TPAMI.2023.3263464","volume":"45","author":"Y Wei","year":"2023","unstructured":"Wei, Y., Liu, S., Zhou, J., & Lu, J. (2023). Depth-guided optimization of neural radiance fields for indoor multi-view stereo. IEEE TPAMI, 45, 10835\u201310849.","journal-title":"IEEE TPAMI"},{"key":"2421_CR87","doi-asserted-by":"crossref","unstructured":"Wu, G., Yi, T., Fang, J., Xie, L., Zhang, X., Wei, W., Liu, W., Tian, Q., & Wang, X. (2024) 4d gaussian splatting for real-time dynamic scene rendering. In: CVPR, pp 20310\u201320320","DOI":"10.1109\/CVPR52733.2024.01920"},{"key":"2421_CR88","unstructured":"Wu, J., Fu, R., Fang, H., Liu, Y., Wang, Z., Xu, Y., Jin, Y., & Arbel, T. (2023) Medical sam adapter: Adapting segment anything model for medical image segmentation. arXiv preprint arXiv:2304.12620"},{"key":"2421_CR89","unstructured":"Wu, Z,. Allibert, G., Stolz, C., Ma, C., & Demonceaux, C. (2022) Depth-adapted cnns for rgb-d semantic segmentation. arXiv preprint arXiv:2206.03939"},{"key":"2421_CR90","unstructured":"Xie, E., Wang, W., Yu, Z., Anandkumar, A., Alvarez, JM., & Luo, P. (2021) Segformer: Simple and efficient design for semantic segmentation with transformers. In: NeurIPS"},{"key":"2421_CR91","first-page":"555","volume":"12364","author":"Y Xing","year":"2020","unstructured":"Xing, Y., Wang, J., & Zeng, G. (2020). Malleable 2.5 d convolution: Learning receptive fields along the depth-axis for rgb-d scene parsing. ECCV, 12364, 555\u2013571.","journal-title":"ECCV"},{"key":"2421_CR92","doi-asserted-by":"crossref","unstructured":"Yang, C., Li, S., Fang, J., Liang, R., Xie, L., Zhang, X., Shen, W., & Tian, Q. (2024a) Gaussianobject: High-quality 3d object reconstruction from four views with gaussian splatting. ACM TOG 43:199:1\u2013199:13","DOI":"10.1145\/3687759"},{"key":"2421_CR93","doi-asserted-by":"crossref","unstructured":"Yang, J., Ding, R., Wang, Z., & Qi, X. (2023) Regionplc: Regional point-language contrastive learning for open-world 3d scene understanding. In: CVPR, pp 19823\u201319832","DOI":"10.1109\/CVPR52733.2024.01874"},{"key":"2421_CR94","unstructured":"Yang, Z., Yang, H., Pan, Z., Zhu, X., & Zhang, L. (2024b) Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. In: ICLR"},{"key":"2421_CR95","doi-asserted-by":"crossref","unstructured":"Ye, D., Zhou, Z., Chen, W., Xie, Y., Wang, Y., Wang, P., & Foroosh, H. (2023) Lidarmultinet: Towards a unified multi-task network for lidar perception. In: AAAI, pp 3231\u20133240","DOI":"10.1609\/aaai.v37i3.25429"},{"key":"2421_CR96","first-page":"162","volume":"15087","author":"M Ye","year":"2024","unstructured":"Ye, M., Danelljan, M., Yu, F., & Ke, L. (2024). Gaussian grouping: Segment and edit anything in 3d scenes. ECCV, 15087, 162\u2013179.","journal-title":"ECCV"},{"key":"2421_CR97","doi-asserted-by":"crossref","unstructured":"Ying, H., Yin, Y., Zhang, J., Wang, F., Yu, T., Huang, R., & Fang, L. (2024) Omniseg3d: Omniversal 3d segmentation via hierarchical contrastive learning. In: CVPR, pp 20612\u201320622","DOI":"10.1109\/CVPR52733.2024.01948"},{"key":"2421_CR98","doi-asserted-by":"crossref","unstructured":"Yu, A., Li, R., Tancik, M., Li, H., Ng, R., & Kanazawa, A. (2021) Plenoctrees for real-time rendering of neural radiance fields. In: ICCV, pp 5732\u20135741","DOI":"10.1109\/ICCV48922.2021.00570"},{"key":"2421_CR99","unstructured":"Yu, H., Guibas, LJ., & Wu, J. (2022) Unsupervised discovery of object radiance fields. In: ICLR"},{"key":"2421_CR100","unstructured":"Yu, T., Feng, R., Feng, R., Liu, J., Jin, X., Zeng, W., & Chen, Z. (2023) Inpaint anything: Segment anything meets image inpainting. arXiv preprint arXiv:2304.06790"},{"key":"2421_CR101","doi-asserted-by":"crossref","unstructured":"Yu, Z., Chen, A., Huang, B., Sattler, T., & Geiger, A. (2024) Mip-splatting: Alias-free 3d gaussian splatting. In: CVPR, pp 19447\u201319456","DOI":"10.1109\/CVPR52733.2024.01839"},{"key":"2421_CR102","unstructured":"Zhang, C., Han, D., Qiao, Y., Kim, JU., Bae, SH., Lee, S., & Hong, CS. (2023a) Faster segment anything: Towards lightweight sam for mobile applications. arXiv preprint arXiv:2306.14289"},{"key":"2421_CR103","doi-asserted-by":"crossref","unstructured":"Zhang, D., Liang, D., Yang, H., Zou, Z., Ye, X., Liu, Z., & Bai, X. (2023b) Sam3d: Zero-shot 3d object detection via segment anything model. SCIS 67","DOI":"10.1007\/s11432-023-3943-6"},{"key":"2421_CR104","doi-asserted-by":"crossref","unstructured":"Zhang, J., Dong, R., & Ma, K. (2023c) Clip-fo3d: Learning free open-world 3d scene representations from 2d dense clip. In: ICCV (Workshops), 2040\u20132051","DOI":"10.1109\/ICCVW60793.2023.00219"},{"key":"2421_CR105","first-page":"238","volume":"11214","author":"Z Zhang","year":"2018","unstructured":"Zhang, Z., Cui, Z., Xu, C., Jie, Z., Li, X., & Yang, J. (2018). Joint task-recursive learning for semantic segmentation and depth estimation. ECCV, 11214, 238\u2013255.","journal-title":"ECCV"},{"key":"2421_CR106","doi-asserted-by":"crossref","unstructured":"Zhao, H., Shi, J., Qi, X., Wang, X., & Jia, J. (2017) Pyramid scene parsing network. In: CVPR, pp 6230\u20136239","DOI":"10.1109\/CVPR.2017.660"},{"key":"2421_CR107","doi-asserted-by":"crossref","unstructured":"Zhao, H., Jiang, L., Jia, J., Torr, PH., & Koltun, V. (2021) Point transformer. In: ICCV, pp 16239\u201316248","DOI":"10.1109\/ICCV48922.2021.01595"},{"key":"2421_CR108","unstructured":"Zhao, X., Ding, W., An, Y., Du, Y., Yu, T., Li, M., Tang, M., & Wang, J. (2023) Fast segment anything. arXiv preprint arXiv:2306.12156"},{"key":"2421_CR109","doi-asserted-by":"crossref","unstructured":"Zheng, S., Lu, J., Zhao ,H., Zhu, X., Luo, Z., Wang, Y., Fu, Y., Feng, J., Xiang, T., Torr, PHS., & Zhang, L. (2021) Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In: CVPR, pp 6881\u20136890","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"2421_CR110","doi-asserted-by":"crossref","unstructured":"Zhi, S., Laidlow, T., Leutenegger, S., & Davison, AJ. (2021) In-place scene labelling and understanding with implicit scene representation. In: ICCV, pp 15818\u201315827","DOI":"10.1109\/ICCV48922.2021.01554"},{"key":"2421_CR111","unstructured":"Zhou, H., Zhu, X., Song, X., Ma, Y., Wang, Z., Li, H., & Lin, D. (2020) Cylinder3d: An effective 3d framework for driving-scene lidar semantic segmentation. arXiv preprint arXiv:2008.01550"},{"key":"2421_CR112","doi-asserted-by":"crossref","unstructured":"Zhou, S., Chang, H., Jiang, S., Fan, Z., Zhu, Z., Xu, D., Chari, P., You, S., Wang, Z., & Kadambi, A. (2024) Feature 3dgs: Supercharging 3d gaussian splatting to enable distilled feature fields. In: CVPR, pp 21676\u201321685","DOI":"10.1109\/CVPR52733.2024.02048"},{"key":"2421_CR113","doi-asserted-by":"publisher","first-page":"5398","DOI":"10.1109\/TPAMI.2024.3367952","volume":"46","author":"T Zhou","year":"2024","unstructured":"Zhou, T., & Wang, W. (2024). Cross-image pixel contrasting for semantic segmentation. IEEE TPAMI, 46, 5398\u20135412.","journal-title":"IEEE TPAMI"},{"key":"2421_CR114","doi-asserted-by":"crossref","unstructured":"Zhou, T., & Wang, W. (2024). Prototype-based semantic segmentation. IEEE TPAMI, 46, 6858\u20136872.","DOI":"10.1109\/TPAMI.2024.3387116"},{"key":"2421_CR115","doi-asserted-by":"publisher","first-page":"2649","DOI":"10.1007\/s11263-023-01829-3","volume":"131","author":"J Zhu","year":"2023","unstructured":"Zhu, J., Zhu, H., Zhang, Q., Zhu, F., Ma, Z., & Cao, X. (2023). Pyramid nerf: Frequency guided fast radiance field optimization. International Journal of Computer Vision, 131, 2649\u20132664.","journal-title":"International Journal of Computer Vision"},{"key":"2421_CR116","first-page":"145","volume":"15097","author":"Z Zhu","year":"2024","unstructured":"Zhu, Z., Fan, Z., Jiang, Y., & Wang, Z. (2024). FSGS: real-time few-shot view synthesis using gaussian splatting. ECCV, 15097, 145\u2013163.","journal-title":"ECCV"},{"key":"2421_CR117","unstructured":"Zou, X., Yang, J., Zhang, H., Li, F., Li, L., Wang, J., Wang, L., Gao, J., & Lee, YJ. (2023). Segment everything everywhere all at once. In: NeurIPS"},{"key":"2421_CR118","doi-asserted-by":"publisher","first-page":"611","DOI":"10.1007\/s11263-024-02183-8","volume":"133","author":"X Zuo","year":"2025","unstructured":"Zuo, X., Samangouei, P., Zhou, Y., Di, Y., & Li, M. (2025). FMGS: foundation model embedded 3d gaussian splatting for holistic 3d scene understanding. IJCV, 133, 611\u2013627.","journal-title":"IJCV"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02421-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-025-02421-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02421-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T10:28:27Z","timestamp":1757154507000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-025-02421-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,9]]},"references-count":118,"journal-issue":{"issue":"8","published-print":{"date-parts":[[2025,8]]}},"alternative-id":["2421"],"URL":"https:\/\/doi.org\/10.1007\/s11263-025-02421-7","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,4,9]]},"assertion":[{"value":"7 June 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 March 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 April 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"All authors disclosed no relevant relationships.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}