{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T13:34:45Z","timestamp":1742996085232,"version":"3.40.3"},"publisher-location":"Cham","reference-count":49,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783031270765"},{"type":"electronic","value":"9783031270772"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-27077-2_24","type":"book-chapter","created":{"date-parts":[[2023,3,28]],"date-time":"2023-03-28T05:03:10Z","timestamp":1679979790000},"page":"306-317","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["SRes-NeRF: Improved Neural Radiance Fields for\u00a0Realism and\u00a0Accuracy of\u00a0Specular Reflections"],"prefix":"10.1007","author":[{"given":"Shufan","family":"Dai","sequence":"first","affiliation":[]},{"given":"Yangjie","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Pengsong","family":"Duan","sequence":"additional","affiliation":[]},{"given":"Xianfu","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,3,29]]},"reference":[{"key":"24_CR1","doi-asserted-by":"crossref","unstructured":"Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-NeRF: a multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5855\u20135864 (2021)","DOI":"10.1109\/ICCV48922.2021.00580"},{"key":"24_CR2","unstructured":"Bi, S., et al.: Neural reflectance fields for appearance acquisition. arXiv preprint arXiv:2008.03824 (2020)"},{"key":"24_CR3","doi-asserted-by":"crossref","unstructured":"Boss, M., Braun, R., Jampani, V., Barron, J.T., Liu, C., Lensch, H.: Nerd: neural reflectance decomposition from image collections. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 12684\u201312694 (2021)","DOI":"10.1109\/ICCV48922.2021.01245"},{"key":"24_CR4","doi-asserted-by":"crossref","unstructured":"Chan, E.R., Monteiro, M., Kellnhofer, P., Wu, J., Wetzstein, G.: pi-GAN: periodic implicit generative adversarial networks for 3d-aware image synthesis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5799\u20135809 (2021)","DOI":"10.1109\/CVPR46437.2021.00574"},{"key":"24_CR5","unstructured":"Deng, B., Barron, J.T., Srinivasan, P.P.: JaxNeRF: an efficient JAX implementation of nerf (2020). https:\/\/github.com\/google-research\/google-research\/tree\/master\/jaxnerf"},{"key":"24_CR6","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"24_CR7","doi-asserted-by":"crossref","unstructured":"Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: Radiance fields without neural networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5501\u20135510 (2022)","DOI":"10.1109\/CVPR52688.2022.00542"},{"key":"24_CR8","doi-asserted-by":"crossref","unstructured":"Gafni, G., Thies, J., Zollhofer, M., Nie\u00dfner, M.: Dynamic neural radiance fields for monocular 4D facial avatar reconstruction. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8649\u20138658 (2021)","DOI":"10.1109\/CVPR46437.2021.00854"},{"key":"24_CR9","doi-asserted-by":"crossref","unstructured":"Gao, C., Saraf, A., Kopf, J., Huang, J.B.: Dynamic view synthesis from dynamic monocular video. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5712\u20135721 (2021)","DOI":"10.1109\/ICCV48922.2021.00566"},{"key":"24_CR10","doi-asserted-by":"crossref","unstructured":"Garbin, S.J., Kowalski, M., Johnson, M., Shotton, J., Valentin, J.: FastNeRF: High-fidelity neural rendering at 200fps. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 14346\u201314355 (2021)","DOI":"10.1109\/ICCV48922.2021.01408"},{"key":"24_CR11","unstructured":"Glorot, X., Bordes, A., Bengio, Y.: Deep sparse rectifier neural networks. In: Proceedings of the Fourteenth International Conference on Artificial Intelligence and Statistics, pp. 315\u2013323. JMLR Workshop and Conference Proceedings (2011)"},{"key":"24_CR12","doi-asserted-by":"crossref","unstructured":"Hedman, P., Srinivasan, P.P., Mildenhall, B., Barron, J.T., Debevec, P.: Baking neural radiance fields for real-time view synthesis. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5875\u20135884 (2021)","DOI":"10.1109\/ICCV48922.2021.00582"},{"key":"24_CR13","unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: accelerating deep network training by reducing internal covariate shift. In: International Conference on Machine Learning, pp. 448\u2013456. PMLR (2015)"},{"key":"24_CR14","unstructured":"Jacot, A., Gabriel, F., Hongler, C.: Neural tangent kernel: convergence and generalization in neural networks. In: Advances in Neural Information Processing Systems, vol. 31 (2018)"},{"key":"24_CR15","doi-asserted-by":"crossref","unstructured":"Jeong, Y., Ahn, S., Choy, C., Anandkumar, A., Cho, M., Park, J.: Self-calibrating neural radiance fields. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5846\u20135854 (2021)","DOI":"10.1109\/ICCV48922.2021.00579"},{"issue":"4","key":"24_CR16","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3072959.3073599","volume":"36","author":"A Knapitsch","year":"2017","unstructured":"Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: benchmarking large-scale scene reconstruction. ACM Trans. Graphics (ToG) 36(4), 1\u201313 (2017)","journal-title":"ACM Trans. Graphics (ToG)"},{"key":"24_CR17","unstructured":"Kosiorek, A.R., et al.: NeRF-VAE: a geometry aware 3D scene generative model. In: International Conference on Machine Learning, pp. 5742\u20135752. PMLR (2021)"},{"key":"24_CR18","doi-asserted-by":"crossref","unstructured":"Li, Z., Niklaus, S., Snavely, N., Wang, O.: Neural scene flow fields for space-time view synthesis of dynamic scenes. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6498\u20136508 (2021)","DOI":"10.1109\/CVPR46437.2021.00643"},{"key":"24_CR19","doi-asserted-by":"crossref","unstructured":"Lin, C.H., Ma, W.C., Torralba, A., Lucey, S.: BARF: bundle-adjusting neural radiance fields. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5741\u20135751 (2021)","DOI":"10.1109\/ICCV48922.2021.00569"},{"key":"24_CR20","doi-asserted-by":"crossref","unstructured":"Lindell, D.B., Martel, J.N., Wetzstein, G.: Autoint: automatic integration for fast neural volume rendering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14556\u201314565 (2021)","DOI":"10.1109\/CVPR46437.2021.01432"},{"key":"24_CR21","first-page":"15651","volume":"33","author":"L Liu","year":"2020","unstructured":"Liu, L., Gu, J., Zaw Lin, K., Chua, T.S., Theobalt, C.: Neural sparse voxel fields. Adv. Neural. Inf. Process. Syst. 33, 15651\u201315663 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"issue":"4","key":"24_CR22","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3306346.3323020","volume":"38","author":"S Lombardi","year":"2019","unstructured":"Lombardi, S., Simon, T., Saragih, J., Schwartz, G., Lehrmann, A., Sheikh, Y.: Neural volumes: learning dynamic renerable volumes from images. ACM Trans. Graphics 38(4), 1\u201314 (2019). https:\/\/doi.org\/10.1145\/3306346.3323020","journal-title":"ACM Trans. Graphics"},{"key":"24_CR23","doi-asserted-by":"crossref","unstructured":"Martin-Brualla, R., Radwan, N., Sajjadi, M.S., Barron, J.T., Dosovitskiy, A., Duckworth, D.: NeRF in the wild: neural radiance fields for unconstrained photo collections. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7210\u20137219 (2021)","DOI":"10.1109\/CVPR46437.2021.00713"},{"key":"24_CR24","doi-asserted-by":"crossref","unstructured":"Meng, Q., et al.: GNeRF: GAN-based neural radiance field without posed camera. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6351\u20136361 (2021)","DOI":"10.1109\/ICCV48922.2021.00629"},{"key":"24_CR25","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1007\/978-3-030-58452-8_24","volume-title":"Computer Vision \u2013 ECCV 2020","author":"B Mildenhall","year":"2020","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 405\u2013421. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_24"},{"key":"24_CR26","doi-asserted-by":"crossref","unstructured":"Noguchi, A., Sun, X., Lin, S., Harada, T.: Neural articulated radiance field. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5762\u20135772 (2021)","DOI":"10.1109\/ICCV48922.2021.00571"},{"key":"24_CR27","doi-asserted-by":"crossref","unstructured":"Park, K., et al.: Deformable neural radiance fields (2020)","DOI":"10.1109\/ICCV48922.2021.00581"},{"key":"24_CR28","doi-asserted-by":"crossref","unstructured":"Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-NeRF: neural radiance fields for dynamic scenes. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10318\u201310327 (2021)","DOI":"10.1109\/CVPR46437.2021.01018"},{"key":"24_CR29","unstructured":"Rahaman, N., et al.: On the spectral bias of neural networks. In: International Conference on Machine Learning, pp. 5301\u20135310. PMLR (2019)"},{"key":"24_CR30","unstructured":"Ramachandran, P., Zoph, B., Le, Q.V.: Searching for activation functions. arXiv preprint arXiv:1710.05941 (2017)"},{"key":"24_CR31","doi-asserted-by":"crossref","unstructured":"Rebain, D., Jiang, W., Yazdani, S., Li, K., Yi, K.M., Tagliasacchi, A.: DeRF: decomposed radiance fields. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14153\u201314161 (2021)","DOI":"10.1109\/CVPR46437.2021.01393"},{"key":"24_CR32","doi-asserted-by":"crossref","unstructured":"Reiser, C., Peng, S., Liao, Y., Geiger, A.: KiloNeRF: speeding up neural radiance fields with thousands of tiny MLPs. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 14335\u201314345 (2021)","DOI":"10.1109\/ICCV48922.2021.01407"},{"key":"24_CR33","first-page":"20154","volume":"33","author":"K Schwarz","year":"2020","unstructured":"Schwarz, K., Liao, Y., Niemeyer, M., Geiger, A.: GRAF: generative radiance fields for 3d-aware image synthesis. Adv. Neural. Inf. Process. Syst. 33, 20154\u201320166 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"24_CR34","first-page":"7462","volume":"33","author":"V Sitzmann","year":"2020","unstructured":"Sitzmann, V., Martel, J., Bergman, A., Lindell, D., Wetzstein, G.: Implicit neural representations with periodic activation functions. Adv. Neural. Inf. Process. Syst. 33, 7462\u20137473 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"24_CR35","unstructured":"Sitzmann, V., Zollh\u00f6fer, M., Wetzstein, G.: Scene representation networks: Continuous 3d-structure-aware neural scene representations. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"24_CR36","doi-asserted-by":"crossref","unstructured":"Srinivasan, P.P., Deng, B., Zhang, X., Tancik, M., Mildenhall, B., Barron, J.T.: NeRV: neural reflectance and visibility fields for relighting and view synthesis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7495\u20137504 (2021)","DOI":"10.1109\/CVPR46437.2021.00741"},{"key":"24_CR37","doi-asserted-by":"crossref","unstructured":"Sun, C., Sun, M., Chen, H.T.: Direct voxel grid optimization: super-fast convergence for radiance fields reconstruction. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5459\u20135469 (2022)","DOI":"10.1109\/CVPR52688.2022.00538"},{"key":"24_CR38","doi-asserted-by":"crossref","unstructured":"Tancik, M., et al.: Learned initializations for optimizing coordinate-based neural representations. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2846\u20132855 (2021)","DOI":"10.1109\/CVPR46437.2021.00287"},{"key":"24_CR39","first-page":"7537","volume":"33","author":"M Tancik","year":"2020","unstructured":"Tancik, M., et al.: Fourier features let networks learn high frequency functions in low dimensional domains. Adv. Neural. Inf. Process. Syst. 33, 7537\u20137547 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"24_CR40","first-page":"24261","volume":"34","author":"IO Tolstikhin","year":"2021","unstructured":"Tolstikhin, I.O., et al.: MLP-mixer: an all-MLP architecture for vision. Adv. Neural. Inf. Process. Syst. 34, 24261\u201324272 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"24_CR41","doi-asserted-by":"crossref","unstructured":"Touvron, H., et al.: ResMLP: feedforward networks for image classification with data-efficient training. arXiv preprint arXiv:2105.03404 (2021)","DOI":"10.1109\/TPAMI.2022.3206148"},{"key":"24_CR42","doi-asserted-by":"crossref","unstructured":"Tretschk, E., Tewari, A., Golyanik, V., Zollh\u00f6fer, M., Lassner, C., Theobalt, C.: Non-rigid neural radiance fields: reconstruction and novel view synthesis of a dynamic scene from monocular video. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 12959\u201312970 (2021)","DOI":"10.1109\/ICCV48922.2021.01272"},{"key":"24_CR43","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"24_CR44","unstructured":"Wang, Z., Wu, S., Xie, W., Chen, M., Prisacariu, V.A.: NeRF-: neural radiance fields without known camera parameters. arXiv preprint arXiv:2102.07064 (2021)"},{"key":"24_CR45","doi-asserted-by":"crossref","unstructured":"Xian, W., Huang, J.B., Kopf, J., Kim, C.: Space-time neural irradiance fields for free-viewpoint video. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9421\u20139431 (2021)","DOI":"10.1109\/CVPR46437.2021.00930"},{"key":"24_CR46","doi-asserted-by":"crossref","unstructured":"Yao, Y., et al.: BlendedMVS: a large-scale dataset for generalized multi-view stereo networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1790\u20131799 (2020)","DOI":"10.1109\/CVPR42600.2020.00186"},{"key":"24_CR47","doi-asserted-by":"crossref","unstructured":"Yen-Chen, L., Florence, P., Barron, J.T., Rodriguez, A., Isola, P., Lin, T.Y.: INeRF: inverting neural radiance fields for pose estimation. In: 2021 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 1323\u20131330. IEEE (2021)","DOI":"10.1109\/IROS51168.2021.9636708"},{"key":"24_CR48","doi-asserted-by":"crossref","unstructured":"Yu, A., Li, R., Tancik, M., Li, H., Ng, R., Kanazawa, A.: PlenOctrees for real-time rendering of neural radiance fields. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5752\u20135761 (2021)","DOI":"10.1109\/ICCV48922.2021.00570"},{"issue":"6","key":"24_CR49","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3478513.3480500","volume":"40","author":"X Zhang","year":"2021","unstructured":"Zhang, X., Srinivasan, P.P., Deng, B., Debevec, P., Freeman, W.T., Barron, J.T.: NeRFactor: neural factorization of shape and reflectance under an unknown illumination. ACM Trans. Graphics (TOG) 40(6), 1\u201318 (2021)","journal-title":"ACM Trans. Graphics (TOG)"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-27077-2_24","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,3,28]],"date-time":"2023-03-28T05:13:19Z","timestamp":1679980399000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-27077-2_24"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031270765","9783031270772"],"references-count":49,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-27077-2_24","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"29 March 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Bergen","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Norway","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"9 January 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 January 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Conftool Pro","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"267","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"86","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"32% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}