{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,26]],"date-time":"2026-02-26T15:23:07Z","timestamp":1772119387282,"version":"3.50.1"},"reference-count":44,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2026,1,8]],"date-time":"2026-01-08T00:00:00Z","timestamp":1767830400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,8]],"date-time":"2026-01-08T00:00:00Z","timestamp":1767830400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"This work was supported by the National Key R\\&D Program of Chin","award":["2022YFF0902303"],"award-info":[{"award-number":["2022YFF0902303"]}]},{"name":"This work was supported by the National Key R\\&D Program of Chin","award":["2022YFF0902303"],"award-info":[{"award-number":["2022YFF0902303"]}]},{"name":"This work was supported by the National Key R\\&D Program of Chin","award":["2022YFF0902303"],"award-info":[{"award-number":["2022YFF0902303"]}]},{"name":"This work was supported by the National Key R\\&D Program of Chin","award":["2022YFF0902303"],"award-info":[{"award-number":["2022YFF0902303"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1007\/s00530-025-02104-w","type":"journal-article","created":{"date-parts":[[2026,1,8]],"date-time":"2026-01-08T11:33:58Z","timestamp":1767872038000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Minigs: efficient 3D Gaussian splatting with full factors weighted pruning for scene representation"],"prefix":"10.1007","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8769-6201","authenticated-orcid":false,"given":"Qing","family":"Yang","sequence":"first","affiliation":[]},{"given":"Xiaonuo","family":"Dongye","sequence":"additional","affiliation":[]},{"given":"Hanzhi","family":"Guo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2352-0896","authenticated-orcid":false,"given":"Dongdong","family":"Weng","sequence":"additional","affiliation":[]},{"given":"Le","family":"Luo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,8]]},"reference":[{"key":"2104_CR1","doi-asserted-by":"crossref","unstructured":"Barron, JT., Mildenhall, B., Tancik, M., et\u00a0al.: Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp 5855\u20135864(2021)","DOI":"10.1109\/ICCV48922.2021.00580"},{"key":"2104_CR2","doi-asserted-by":"crossref","unstructured":"Barron, JT., Mildenhall, B., Verbin, D., et\u00a0al.: Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 5470\u20135479 (2022)","DOI":"10.1109\/CVPR52688.2022.00539"},{"key":"2104_CR3","doi-asserted-by":"crossref","unstructured":"Barron, JT., Mildenhall, B., Verbin, D., et\u00a0al.: Zip-nerf: Anti-aliased grid-based neural radiance fields. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 19697\u201319705 (2023)","DOI":"10.1109\/ICCV51070.2023.01804"},{"key":"2104_CR4","doi-asserted-by":"crossref","unstructured":"Chen, A., Xu, Z., Geiger, A., et\u00a0al.: Tensorf: Tensorial radiance fields. In: European Conference on Computer Vision. Springer, pp 333\u2013350 (2022)","DOI":"10.1007\/978-3-031-19824-3_20"},{"key":"2104_CR5","doi-asserted-by":"crossref","unstructured":"Dongye, X., et\u00a0al.: Gaussian replacement: Gaussians-mesh joint rendering for real-time vr interaction. In: Chinese Conference on Image and Graphics Technologies. Springer Nature Singapore, Singapore (2024)","DOI":"10.1007\/978-981-97-9919-0_25"},{"key":"2104_CR6","unstructured":"Dongye, X., et\u00a0al.: Lodavatar: Hierarchical embedding and adaptive levels of detail with gaussian splatting for enhanced human avatars. arXiv preprint arXiv:2410.20789 (2024)"},{"key":"2104_CR7","doi-asserted-by":"crossref","unstructured":"Fan, Z., Wang, K., Wen, K., et\u00a0al.: Lightgaussian: Unbounded 3d gaussian compression with 15x reduction and 200+ fps. arXiv preprint arXiv:2311.17245 (2023)","DOI":"10.52202\/079017-4447"},{"key":"2104_CR8","first-page":"165","volume-title":"Eur Conf Comput Vision","author":"G Fang","year":"2024","unstructured":"Fang, G., Wang, B.: Mini-splatting: representing scenes with a constrained number of gaussians. In: Eur Conf Comput Vision, pp. 165\u2013181. Springer Nature, Cham (2024)"},{"key":"2104_CR9","doi-asserted-by":"crossref","unstructured":"Fridovich-Keil, S., Yu, A., Tancik, M., et\u00a0al.: Plenoxels: Radiance fields without neural networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 5501\u20135510 (2022)","DOI":"10.1109\/CVPR52688.2022.00542"},{"key":"2104_CR10","doi-asserted-by":"crossref","unstructured":"Gafni, G., Thies, J., Zollhofer, M., et\u00a0al.: Dynamic neural radiance fields for monocular 4d facial avatar reconstruction. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 8649\u20138658 (2021)","DOI":"10.1109\/CVPR46437.2021.00854"},{"key":"2104_CR11","doi-asserted-by":"crossref","unstructured":"Gao, C., Saraf, A., Kopf, J., et\u00a0al.: Dynamic view synthesis from dynamic monocular video. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 5712\u20135721(2021)","DOI":"10.1109\/ICCV48922.2021.00566"},{"key":"2104_CR12","doi-asserted-by":"crossref","unstructured":"Goesele, M., Snavely, N., Curless, B., et\u00a0al.: Multi-view stereo for community photo collections. In: 2007 IEEE 11th International Conference on Computer Vision. IEEE, pp 1\u20138 (2007)","DOI":"10.1109\/ICCV.2007.4408933"},{"issue":"6","key":"2104_CR13","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3272127.3275084","volume":"37","author":"P Hedman","year":"2018","unstructured":"Hedman, P., Philip, J., Price, T., et al.: Deep blending for free-viewpoint image-based rendering. ACM Trans Graphics (ToG) 37(6), 1\u201315 (2018)","journal-title":"ACM Trans Graphics (ToG)"},{"key":"2104_CR14","doi-asserted-by":"crossref","unstructured":"Hong, Y., Peng, B., Xiao, H., et\u00a0al.: Headnerf: A real-time nerf-based parametric head model. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 20374\u201320384 (2022)","DOI":"10.1109\/CVPR52688.2022.01973"},{"key":"2104_CR15","doi-asserted-by":"crossref","unstructured":"Keetha, N., et\u00a0al.: Splatam: Splat track & map 3d gaussians for dense rgb-d slam. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition(2024)","DOI":"10.1109\/CVPR52733.2024.02018"},{"issue":"4","key":"2104_CR16","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1145\/3592433","volume":"42","author":"B Kerbl","year":"2023","unstructured":"Kerbl, B., Kopanas, G., Leimk\u00fchler, T., et al.: 3d gaussian splatting for real-time radiance field rendering. ACM Trans Graph 42(4), 139\u20131 (2023)","journal-title":"ACM Trans Graph"},{"issue":"4","key":"2104_CR17","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3072959.3073599","volume":"36","author":"A Knapitsch","year":"2017","unstructured":"Knapitsch, A., Park, J., Zhou, Q.Y., et al.: Tanks and temples: benchmarking large-scale scene reconstruction. ACM Trans Graphics (ToG) 36(4), 1\u201313 (2017)","journal-title":"ACM Trans Graphics (ToG)"},{"key":"2104_CR18","doi-asserted-by":"crossref","unstructured":"Lee, JC., et\u00a0al.: Compact 3d gaussian representation for radiance field. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2024)","DOI":"10.1109\/CVPR52733.2024.02052"},{"key":"2104_CR19","doi-asserted-by":"crossref","unstructured":"Li, Z., Niklaus, S., Snavely, N., et\u00a0al.: Neural scene flow fields for space-time view synthesis of dynamic scenes. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 6498\u20136508 (2021)","DOI":"10.1109\/CVPR46437.2021.00643"},{"key":"2104_CR20","doi-asserted-by":"crossref","unstructured":"Liu, W., Guan, T., Zhu, B., et\u00a0al.: Efficientgs: Streamlining gaussian splatting for large-scale high-resolution scene representation. arXiv preprint arXiv:2404.12777 (2024)","DOI":"10.1109\/MMUL.2025.3543224"},{"key":"2104_CR21","doi-asserted-by":"crossref","unstructured":"Martin-Brualla, R., Radwan, N., Sajjadi, MS., et\u00a0al.: Nerf in the wild: Neural radiance fields for unconstrained photo collections. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 7210\u20137219 (2021)","DOI":"10.1109\/CVPR46437.2021.00713"},{"issue":"1","key":"2104_CR22","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1145\/3503250","volume":"65","author":"B Mildenhall","year":"2021","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., et al.: Nerf: representing scenes as neural radiance fields for view synthesis. Commun. ACM 65(1), 99\u2013106 (2021)","journal-title":"Commun. ACM"},{"issue":"4","key":"2104_CR23","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3528223.3530127","volume":"41","author":"T M\u00fcller","year":"2022","unstructured":"M\u00fcller, T., Evans, A., Schied, C., et al.: Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans Graphics (TOG) 41(4), 1\u201315 (2022)","journal-title":"ACM Trans Graphics (TOG)"},{"key":"2104_CR24","unstructured":"Navaneet, K., Meibodi, KP., Koohpayegani, SA., et\u00a0al.: Compact3d: Compressing gaussian splat radiance field models with vector quantization. arXiv preprint arXiv:2311.18159 (2023)"},{"key":"2104_CR25","doi-asserted-by":"crossref","unstructured":"Niedermayr, S., Stumpfegger, J., Westermann, R.: Compressed 3d gaussian splatting for accelerated novel view synthesis. arXiv preprint arXiv:2401.02436 [cs.CV] (2023)","DOI":"10.1109\/CVPR52733.2024.00985"},{"key":"2104_CR26","doi-asserted-by":"crossref","unstructured":"Niemeyer, M., Manhardt, F., Rakotosaona, MJ., et\u00a0al.:Radsplat: Radiance field-informed gaussian splatting for robust real-time rendering with 900+ fps. arXiv preprint arXiv:2403.13806 (2024)","DOI":"10.1109\/3DV66043.2025.00018"},{"key":"2104_CR27","doi-asserted-by":"crossref","unstructured":"Rosinol, A., Leonard, JJ., Carlone, L.: Nerf-slam: Real-time dense monocular slam with neural radiance fields. In: 2023 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE (2023)","DOI":"10.1109\/IROS55552.2023.10341922"},{"key":"2104_CR28","doi-asserted-by":"crossref","unstructured":"Schonberger, JL., Frahm, JM.: Structure-from-motion revisited. In: Conference on Computer Vision and Pattern Recognition (CVPR) (2016)","DOI":"10.1109\/CVPR.2016.445"},{"key":"2104_CR29","doi-asserted-by":"crossref","unstructured":"Snavely, N., Seitz, SM., Szeliski, R.: Photo tourism: Exploring photo collections in 3d. In: ACM SIGGRAPH 2006 Papers, pp 835\u2013846 (2006)","DOI":"10.1145\/1141911.1141964"},{"key":"2104_CR30","doi-asserted-by":"crossref","unstructured":"Sun, C., Sun, M., Chen, HT.: Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 5459\u20135469 (2022)","DOI":"10.1109\/CVPR52688.2022.00538"},{"key":"2104_CR31","doi-asserted-by":"crossref","unstructured":"Wang, Z., Simoncelli, EP., Bovik, AC.: Multiscale structural similarity for image quality assessment. In: The Thrity-Seventh Asilomar Conference on Signals, Systems & Computers, 2003, Ieee, pp 1398\u20131402 (2003)","DOI":"10.1109\/ACSSC.2003.1292216"},{"key":"2104_CR32","doi-asserted-by":"crossref","unstructured":"Yang, Q., Zhang, H.: F3fad: Fast 3d facial avatar digitization with xr cloud service. In: 2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC\/DSS\/SmartCity\/DependSys). IEEE, pp 2227\u20132234 (2022)","DOI":"10.1109\/HPCC-DSS-SmartCity-DependSys57074.2022.00329"},{"key":"2104_CR33","doi-asserted-by":"crossref","unstructured":"Yang, Q., Weng, D., Zhang, H.: Fourier feature activated neural-field modelling for human avatar representation. In: 2023 IEEE Smart World Congress (SWC). IEEE (2023)","DOI":"10.1109\/SWC57546.2023.10449023"},{"key":"2104_CR34","doi-asserted-by":"crossref","unstructured":"Yang, Q., Weng, D., Liu, Y.: Utilizing periodic feature-enhanced neural-field modeling for the photorealistic representation of human head avatars. The Visual Computer pp 1\u201312 (2024)","DOI":"10.1007\/s00371-024-03299-1"},{"key":"2104_CR35","unstructured":"Yang, R., et\u00a0al.: Spectrally pruned gaussian fields with neural compensation. arXiv preprint arXiv:2405.00676 (2024)"},{"key":"2104_CR36","doi-asserted-by":"crossref","unstructured":"Ye, Y., You, G., Fwu, JK., et\u00a0al.: Channel pruning via optimal thresholding. In: Neural Information Processing: 27th International Conference, ICONIP 2020, Bangkok, Thailand, November 18\u201322, 2020, Proceedings, Part V 27, Springer, pp 508\u2013516 (2020)","DOI":"10.1007\/978-3-030-63823-8_58"},{"key":"2104_CR37","doi-asserted-by":"crossref","unstructured":"Ye, Z., et\u00a0al.: Absgs: Recovering fine details in 3d gaussian splatting. In: Proceedings of the 32nd ACM International Conference on Multimedia (2024)","DOI":"10.1145\/3664647.3681361"},{"key":"2104_CR38","doi-asserted-by":"crossref","unstructured":"Yu, A., Li, R., Tancik, M., et\u00a0al.: Plenoctrees for real-time rendering of neural radiance fields. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 5752\u20135761 (2021)","DOI":"10.1109\/ICCV48922.2021.00570"},{"issue":"6","key":"2104_CR39","first-page":"1","volume":"43","author":"Z Yu","year":"2024","unstructured":"Yu, Z., Sattler, T., Geiger, A.: Gaussian opacity fields: efficient adaptive surface reconstruction in unbounded scenes. ACM Trans Graphics (TOG) 43(6), 1\u201313 (2024)","journal-title":"ACM Trans Graphics (TOG)"},{"key":"2104_CR40","unstructured":"Zhang, K., Riegler, G., Snavely, N., et\u00a0al.: Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492 (2020)"},{"key":"2104_CR41","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, AA., et\u00a0al.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"2104_CR42","doi-asserted-by":"crossref","unstructured":"Zhang, Y., et\u00a0al.: Go-slam: Global optimization for consistent 3d instant reconstruction. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (2023)","DOI":"10.1109\/ICCV51070.2023.00345"},{"key":"2104_CR43","doi-asserted-by":"crossref","unstructured":"Zheng, Y., Abrevaya, VF., M.C., et\u00a0al.: Im avatar: Implicit morphable head avatars from videos. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 13545\u201313555 (2022a)","DOI":"10.1109\/CVPR52688.2022.01318"},{"key":"2104_CR44","doi-asserted-by":"crossref","unstructured":"Zheng, Y., Yifan, W., Wetzstein, G., et\u00a0al.: Pointavatar: Deformable point-based head avatars from videos. arXiv preprint arXiv:2212.08377(2022b)","DOI":"10.1109\/CVPR52729.2023.02017"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-025-02104-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-025-02104-w","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-025-02104-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T04:20:24Z","timestamp":1770783624000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-025-02104-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,1,8]]},"references-count":44,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,2]]}},"alternative-id":["2104"],"URL":"https:\/\/doi.org\/10.1007\/s00530-025-02104-w","relation":{"has-preprint":[{"id-type":"doi","id":"10.21203\/rs.3.rs-6897837\/v1","asserted-by":"object"}]},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,1,8]]},"assertion":[{"value":"15 June 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 November 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 January 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"66"}}