{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,18]],"date-time":"2026-02-18T23:57:18Z","timestamp":1771459038690,"version":"3.50.1"},"reference-count":41,"publisher":"MDPI AG","issue":"19","license":[{"start":{"date-parts":[[2022,10,9]],"date-time":"2022-10-09T00:00:00Z","timestamp":1665273600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"National Natural Science Fund","award":["61371143"],"award-info":[{"award-number":["61371143"]}]},{"name":"National Natural Science Fund","award":["2020YFC0811004"],"award-info":[{"award-number":["2020YFC0811004"]}]},{"name":"National Natural Science Fund","award":["110052971921\/002"],"award-info":[{"award-number":["110052971921\/002"]}]},{"name":"National Natural Science Fund","award":["2018A03029"],"award-info":[{"award-number":["2018A03029"]}]},{"name":"National Natural Science Fund","award":["201902083001"],"award-info":[{"award-number":["201902083001"]}]},{"name":"National Natural Science Fund","award":["KM202110009002"],"award-info":[{"award-number":["KM202110009002"]}]},{"name":"National Natural Science Fund","award":["2020-Y3-A-014"],"award-info":[{"award-number":["2020-Y3-A-014"]}]},{"name":"National Key Research and Development Program Project","award":["61371143"],"award-info":[{"award-number":["61371143"]}]},{"name":"National Key Research and Development Program Project","award":["2020YFC0811004"],"award-info":[{"award-number":["2020YFC0811004"]}]},{"name":"National Key Research and Development Program Project","award":["110052971921\/002"],"award-info":[{"award-number":["110052971921\/002"]}]},{"name":"National Key Research and Development Program Project","award":["2018A03029"],"award-info":[{"award-number":["2018A03029"]}]},{"name":"National Key Research and Development Program Project","award":["201902083001"],"award-info":[{"award-number":["201902083001"]}]},{"name":"National Key Research and Development Program Project","award":["KM202110009002"],"award-info":[{"award-number":["KM202110009002"]}]},{"name":"National Key Research and Development Program Project","award":["2020-Y3-A-014"],"award-info":[{"award-number":["2020-Y3-A-014"]}]},{"name":"Beijing Science and Technology Innovation Service","award":["61371143"],"award-info":[{"award-number":["61371143"]}]},{"name":"Beijing Science and Technology Innovation Service","award":["2020YFC0811004"],"award-info":[{"award-number":["2020YFC0811004"]}]},{"name":"Beijing Science and Technology Innovation Service","award":["110052971921\/002"],"award-info":[{"award-number":["110052971921\/002"]}]},{"name":"Beijing Science and Technology Innovation Service","award":["2018A03029"],"award-info":[{"award-number":["2018A03029"]}]},{"name":"Beijing Science and Technology Innovation Service","award":["201902083001"],"award-info":[{"award-number":["201902083001"]}]},{"name":"Beijing Science and Technology Innovation Service","award":["KM202110009002"],"award-info":[{"award-number":["KM202110009002"]}]},{"name":"Beijing Science and Technology Innovation Service","award":["2020-Y3-A-014"],"award-info":[{"award-number":["2020-Y3-A-014"]}]},{"name":"Science and Technology Development Center for the Ministry of Education \u201cTiancheng Huizhi\u201d Innovation and Education Promotion Fund","award":["61371143"],"award-info":[{"award-number":["61371143"]}]},{"name":"Science and Technology Development Center for the Ministry of Education \u201cTiancheng Huizhi\u201d Innovation and Education Promotion Fund","award":["2020YFC0811004"],"award-info":[{"award-number":["2020YFC0811004"]}]},{"name":"Science and Technology Development Center for the Ministry of Education \u201cTiancheng Huizhi\u201d Innovation and Education Promotion Fund","award":["110052971921\/002"],"award-info":[{"award-number":["110052971921\/002"]}]},{"name":"Science and Technology Development Center for the Ministry of Education \u201cTiancheng Huizhi\u201d Innovation and Education Promotion Fund","award":["2018A03029"],"award-info":[{"award-number":["2018A03029"]}]},{"name":"Science and Technology Development Center for the Ministry of Education \u201cTiancheng Huizhi\u201d Innovation and Education Promotion Fund","award":["201902083001"],"award-info":[{"award-number":["201902083001"]}]},{"name":"Science and Technology Development Center for the Ministry of Education \u201cTiancheng Huizhi\u201d Innovation and Education Promotion Fund","award":["KM202110009002"],"award-info":[{"award-number":["KM202110009002"]}]},{"name":"Science and Technology Development Center for the Ministry of Education \u201cTiancheng Huizhi\u201d Innovation and Education Promotion Fund","award":["2020-Y3-A-014"],"award-info":[{"award-number":["2020-Y3-A-014"]}]},{"name":"Cooperative Education Project of Higher Education Department of the Ministry of Education","award":["61371143"],"award-info":[{"award-number":["61371143"]}]},{"name":"Cooperative Education Project of Higher Education Department of the Ministry of Education","award":["2020YFC0811004"],"award-info":[{"award-number":["2020YFC0811004"]}]},{"name":"Cooperative Education Project of Higher Education Department of the Ministry of Education","award":["110052971921\/002"],"award-info":[{"award-number":["110052971921\/002"]}]},{"name":"Cooperative Education Project of Higher Education Department of the Ministry of Education","award":["2018A03029"],"award-info":[{"award-number":["2018A03029"]}]},{"name":"Cooperative Education Project of Higher Education Department of the Ministry of Education","award":["201902083001"],"award-info":[{"award-number":["201902083001"]}]},{"name":"Cooperative Education Project of Higher Education Department of the Ministry of Education","award":["KM202110009002"],"award-info":[{"award-number":["KM202110009002"]}]},{"name":"Cooperative Education Project of Higher Education Department of the Ministry of Education","award":["2020-Y3-A-014"],"award-info":[{"award-number":["2020-Y3-A-014"]}]},{"name":"Science and Technology Project of Beijing Education Commission","award":["61371143"],"award-info":[{"award-number":["61371143"]}]},{"name":"Science and Technology Project of Beijing Education Commission","award":["2020YFC0811004"],"award-info":[{"award-number":["2020YFC0811004"]}]},{"name":"Science and Technology Project of Beijing Education Commission","award":["110052971921\/002"],"award-info":[{"award-number":["110052971921\/002"]}]},{"name":"Science and Technology Project of Beijing Education Commission","award":["2018A03029"],"award-info":[{"award-number":["2018A03029"]}]},{"name":"Science and Technology Project of Beijing Education Commission","award":["201902083001"],"award-info":[{"award-number":["201902083001"]}]},{"name":"Science and Technology Project of Beijing Education Commission","award":["KM202110009002"],"award-info":[{"award-number":["KM202110009002"]}]},{"name":"Science and Technology Project of Beijing Education Commission","award":["2020-Y3-A-014"],"award-info":[{"award-number":["2020-Y3-A-014"]}]},{"name":"Hangzhou Innovation Institute of Beihang University","award":["61371143"],"award-info":[{"award-number":["61371143"]}]},{"name":"Hangzhou Innovation Institute of Beihang University","award":["2020YFC0811004"],"award-info":[{"award-number":["2020YFC0811004"]}]},{"name":"Hangzhou Innovation Institute of Beihang University","award":["110052971921\/002"],"award-info":[{"award-number":["110052971921\/002"]}]},{"name":"Hangzhou Innovation Institute of Beihang University","award":["2018A03029"],"award-info":[{"award-number":["2018A03029"]}]},{"name":"Hangzhou Innovation Institute of Beihang University","award":["201902083001"],"award-info":[{"award-number":["201902083001"]}]},{"name":"Hangzhou Innovation Institute of Beihang University","award":["KM202110009002"],"award-info":[{"award-number":["KM202110009002"]}]},{"name":"Hangzhou Innovation Institute of Beihang University","award":["2020-Y3-A-014"],"award-info":[{"award-number":["2020-Y3-A-014"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Sensors"],"abstract":"<jats:p>A coarse-to-fine multi-view stereo network with Transformer (MVS-T) is proposed to solve the problems of sparse point clouds and low accuracy in reconstructing 3D scenes from low-resolution multi-view images. The network uses a coarse-to-fine strategy to estimate the depth of the image progressively and reconstruct the 3D point cloud. First, pyramids of image features are constructed to transfer the semantic and spatial information among features at different scales. Then, the Transformer module is employed to aggregate the image\u2019s global context information and capture the internal correlation of the feature map. Finally, the image depth is inferred by constructing a cost volume and iterating through the various stages. For 3D reconstruction of low-resolution images, experiment results show that the 3D point cloud obtained by the network is more accurate and complete, which outperforms other advanced algorithms in terms of objective metrics and subjective visualization.<\/jats:p>","DOI":"10.3390\/s22197659","type":"journal-article","created":{"date-parts":[[2022,10,10]],"date-time":"2022-10-10T05:12:21Z","timestamp":1665378741000},"page":"7659","update-policy":"https:\/\/doi.org\/10.3390\/mdpi_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["MVS-T: A Coarse-to-Fine Multi-View Stereo Network with Transformer for Low-Resolution Images 3D Reconstruction"],"prefix":"10.3390","volume":"22","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2430-4183","authenticated-orcid":false,"given":"Ruiming","family":"Jia","sequence":"first","affiliation":[{"name":"School of Information Science and Technology, North China University of Technology, Beijing 100144, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8341-7041","authenticated-orcid":false,"given":"Xin","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, North China University of Technology, Beijing 100144, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1611-6043","authenticated-orcid":false,"given":"Jiali","family":"Cui","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, North China University of Technology, Beijing 100144, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6106-0416","authenticated-orcid":false,"given":"Zhenghui","family":"Hu","sequence":"additional","affiliation":[{"name":"Hangzhou Innovation Institute, Beihang University, Hangzhou 310051, China"}]}],"member":"1968","published-online":{"date-parts":[[2022,10,9]]},"reference":[{"key":"ref_1","doi-asserted-by":"crossref","unstructured":"Zhao, J., Liu, S., and Li, J. (2022). Research and Implementation of Autonomous Navigation for Mobile Robots Based on SLAM Algorithm under ROS. Sensors, 22.","DOI":"10.3390\/s22114172"},{"key":"ref_2","doi-asserted-by":"crossref","first-page":"102136","DOI":"10.1016\/j.rcim.2021.102136","article-title":"A robot hand-eye calibration method of line laser sensor based on 3D reconstruction","volume":"71","author":"Li","year":"2021","journal-title":"Robot. Comput. Integr. Manuf."},{"key":"ref_3","doi-asserted-by":"crossref","first-page":"2012","DOI":"10.1109\/TVCG.2020.2973477","article-title":"Live Semantic 3D Perception for Immersive Augmented Reality","volume":"26","author":"Han","year":"2020","journal-title":"IEEE Trans. Vis. Comput. Graph."},{"key":"ref_4","doi-asserted-by":"crossref","unstructured":"Barreto, M.A., Perez-Gonzalez, J., Herr, H.M., and Huegel, J.C. (2022). ARACAM: A RGB-D Multi-View Photogrammetry System for Lower Limb 3D Reconstruction Applications. Sensors, 22.","DOI":"10.3390\/s22072443"},{"key":"ref_5","first-page":"473","article-title":"3D MODELING of GIRIFALCO FORTRESS","volume":"42","author":"Masiero","year":"2019","journal-title":"ISPRS Ann. Photogramm. Remote Sens. Spat. Inf. Sci."},{"key":"ref_6","doi-asserted-by":"crossref","first-page":"328","DOI":"10.1109\/TPAMI.2007.1166","article-title":"Stereo processing by semiglobal matching and mutual information","volume":"30","year":"2008","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"ref_7","doi-asserted-by":"crossref","unstructured":"Schonberger, J.L., and Frahm, J.M. (2016, January 27\u201330). Structure-from-Motion Revisited. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, Las Vegas, NV, USA.","DOI":"10.1109\/CVPR.2016.445"},{"key":"ref_8","doi-asserted-by":"crossref","first-page":"153","DOI":"10.1007\/s11263-016-0902-9","article-title":"Large-Scale Data for Multiple-View Stereopsis","volume":"120","author":"Jensen","year":"2016","journal-title":"Int. J. Comput. Vis."},{"key":"ref_9","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3072959.3073599","article-title":"Tanks and temples: Benchmarking large-scale scene reconstruction","volume":"36","author":"Knapitsch","year":"2017","journal-title":"ACM Trans. Graph."},{"key":"ref_10","doi-asserted-by":"crossref","unstructured":"Yao, Y., Luo, Z., Li, S., Fang, T., and Quan, L. (2018, January 8\u201314). MVSNet: Depth inference for unstructured multi-view stereo. Proceedings of the European Conference on Computer Vision, Munich, Germany.","DOI":"10.1007\/978-3-030-01237-3_47"},{"key":"ref_11","doi-asserted-by":"crossref","unstructured":"Yang, J., Mao, W., Alvarez, J.M., and Liu, M. (2020, January 16\u201318). Cost volume pyramid based depth inference for multi-view stereo. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, Seattle, WA, USA.","DOI":"10.1109\/CVPR42600.2020.00493"},{"key":"ref_12","doi-asserted-by":"crossref","first-page":"448","DOI":"10.1016\/j.isprsjprs.2021.03.010","article-title":"Attention aware cost volume pyramid based multi-view stereo network for 3D reconstruction","volume":"175","author":"Yu","year":"2021","journal-title":"ISPRS J. Photogramm. Remote Sens."},{"key":"ref_13","doi-asserted-by":"crossref","unstructured":"Wei, Z., Zhu, Q., Min, C., Chen, Y., and Wang, G. (2021, January 10\u201317). AA-RMVSNet: Adaptive Aggregation Recurrent Multi-view Stereo Network. Proceedings of the IEEE International Conference on Computer Vision, Montreal, QC, Canada.","DOI":"10.1109\/ICCV48922.2021.00613"},{"key":"ref_14","doi-asserted-by":"crossref","unstructured":"Gu, X., Fan, Z., Zhu, S., Dai, Z., Tan, F., and Tan, P. (2020, January 13\u201319). Cascade Cost Volume for High-Resolution Multi-View Stereo and Stereo Matching. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, Seattle, WA, USA.","DOI":"10.1109\/CVPR42600.2020.00257"},{"key":"ref_15","doi-asserted-by":"crossref","unstructured":"Ma, X., Gong, Y., Wang, Q., Huang, J., Chen, L., and Yu, F. (2021, January 10\u201317). EPP-MVSNet: Epipolar-assembling based Depth Prediction for Multi-view Stereo. Proceedings of the 2021 IEEE\/CVF International Conference on Computer Vision (ICCV), Montreal, QC, Canada.","DOI":"10.1109\/ICCV48922.2021.00568"},{"key":"ref_16","doi-asserted-by":"crossref","unstructured":"Wang, F., Galliani, S., Vogel, C., Speciale, P., and Pollefeys, M. (2021, January 19\u201325). PatchMatchNet: Learned multi-view patchmatch stereo. Proceedings of the 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Virtual.","DOI":"10.1109\/CVPR46437.2021.01397"},{"key":"ref_17","doi-asserted-by":"crossref","unstructured":"Yan, J., Wei, Z., Yi, H., Ding, M., Zhang, R., Chen, Y., Wang, G., and Tai, Y.W. (2020, January 23\u201328). Dense Hybrid Recurrent Multi-view Stereo Net with Dynamic Consistency Checking. Proceedings of the European Conference on Computer Vision, Glasgow, UK.","DOI":"10.1007\/978-3-030-58548-8_39"},{"key":"ref_18","first-page":"5999","article-title":"Attention is all you need","volume":"2017-Decem","author":"Vaswani","year":"2017","journal-title":"Adv. Neural Inf. Process Syst."},{"key":"ref_19","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., and Gelly, S. (2021, January 4). An image is worthI 16 \u00d7 16 words: Transformers for image recognition at scale. Proceedings of the International Conference on Learning Representations, Vienna, Austria."},{"key":"ref_20","doi-asserted-by":"crossref","unstructured":"He, C., Li, R., Li, S., and Zhang, L. (2022, January 18\u201324). Voxel Set Transformer: A Set-to-Set Approach to 3D Object Detection from Point Clouds. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, New Orleans, LA, USA.","DOI":"10.1109\/CVPR52688.2022.00823"},{"key":"ref_21","doi-asserted-by":"crossref","unstructured":"Xu, L., Ouyang, W., Bennamoun, M., Boussaid, F., and Xu, D. (2022, January 18\u201324). Multi-class Token Transformer for Weakly Supervised Semantic Segmentation. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, New Orleans, LA, USA.","DOI":"10.1109\/CVPR52688.2022.00427"},{"key":"ref_22","doi-asserted-by":"crossref","unstructured":"Ding, Y., Yuan, W., Zhu, Q., Zhang, H., Liu, X., Wang, Y., and Liu, X. (2022, January 18\u201324). TransMVSNet: Global Context-aware Multi-view Stereo Network with Transformers. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, New Orleans, LA, USA.","DOI":"10.1109\/CVPR52688.2022.00839"},{"key":"ref_23","unstructured":"Zhu, J., Peng, B., Li, W., Shen, H., Zhang, Z., and Lei, J. (2021). Multi-View Stereo with Transformer. arXiv."},{"key":"ref_24","doi-asserted-by":"crossref","unstructured":"Wang, X., Zhu, Z., Qin, F., Ye, Y., Huang, G., Chi, X., He, Y., and Wang, X. (2022). MVSTER: Epipolar Transformer for Efficient Multi-View Stereo. arXiv.","DOI":"10.1007\/978-3-031-19821-2_33"},{"key":"ref_25","unstructured":"Liao, J., Ding, Y., Shavit, Y., Huang, D., Ren, S., Guo, J., Feng, W., and Zhang, K. (2022). WT-MVSNet: Window-based Transformers for Multi-view Stereo. arXiv."},{"key":"ref_26","doi-asserted-by":"crossref","unstructured":"Furukawa, Y., Curless, B., Seitz, S.M., and Szeliski, R. (2010, January 13\u201318). Towards Internet-scale Multi-view Stereo. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, San Francisco, CA, USA.","DOI":"10.1109\/CVPR.2010.5539802"},{"key":"ref_27","doi-asserted-by":"crossref","first-page":"48","DOI":"10.1016\/j.autcon.2012.09.017","article-title":"Image-based 3D scene reconstruction and exploration in augmented reality","volume":"33","author":"Yang","year":"2013","journal-title":"Autom. Constr."},{"key":"ref_28","doi-asserted-by":"crossref","first-page":"55","DOI":"10.1007\/s10462-012-9365-8","article-title":"Visual simultaneous localization and mapping: A survey","volume":"43","year":"2015","journal-title":"Artif. Intell. Rev."},{"key":"ref_29","unstructured":"Redmon, J., and Farhadi, A. (2018). YOLO v.3. arXiv."},{"key":"ref_30","first-page":"3827","article-title":"Digging into self-supervised monocular depth estimation","volume":"2019-Octob","author":"Godard","year":"2019","journal-title":"Proc. IEEE Int. Conf. Comput. Vis."},{"key":"ref_31","doi-asserted-by":"crossref","unstructured":"Rozumnyi, D., Oswald, M., Ferrari, V., Matas, J., and Pollefeys, M. (2020). DeFMO: Deblurring and shape recovery of fast moving objects. arXiv.","DOI":"10.1109\/CVPR46437.2021.00346"},{"key":"ref_32","first-page":"2326","article-title":"SurfaceNet: An End-to-End 3D Neural Network for Multiview Stereopsis","volume":"2017-Octob","author":"Ji","year":"2017","journal-title":"Proc. IEEE Int. Conf. Comput. Vis."},{"key":"ref_33","first-page":"365","article-title":"Learning a multi-view stereo machine","volume":"2017-Decem","author":"Kar","year":"2017","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref_34","unstructured":"Touvron, H., Massa, F., Cord, M., and Sablayrolles, A. (2012). Training data-efficient image transformers & distillation through attention. arXiv."},{"key":"ref_35","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., and Guo, B. (2021, January 10\u201317). Swin Transformer: Hierarchical Vision Transformer using Shifted Windows. Proceedings of the 2021 IEEE\/CVF International Conference on Computer Vision (ICCV), Montreal, QC, Canada.","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref_36","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., and Brox, T. (2015, January 5\u20139). UNet: Convolutional Networks for Biomedical Image Segmentation. Proceedings of the International Conference on Medical Image Computing and Computer Assisted Intervention, Munich, Germany.","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref_37","unstructured":"Devlin, J., Chang, M.W., Lee, K., and Toutanova, K. (2019, January 2\u20137). BERT: Pre-training of deep bidirectional transformers for language understanding. Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies\u2014Proceedings of the Conference, Minneapolis, MN, USA."},{"key":"ref_38","doi-asserted-by":"crossref","unstructured":"Yao, Y., Luo, Z., Li, S., Zhang, J., Ren, Y., Zhou, L., Fang, T., and Quan, L. (2020, January 13\u201319). BlendedMVS: A large-scale dataset for generalized multi-view stereo networks. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, Seattle, WA, USA.","DOI":"10.1109\/CVPR42600.2020.00186"},{"key":"ref_39","unstructured":"Seitz, S.M., Diebel, J., Scharstein, D., and Szeliski, R. (2006, January 17\u201322). A Comparison and Evaluation of Multi-View Stereo Reconstruction Algorithms. Proceedings of the 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR\u201906), New York, NY, USA."},{"key":"ref_40","unstructured":"Kingma, D.P., and Ba, J.L. (2015, January 7\u20139). Adam: A method for stochastic optimization. Proceedings of the 3rd International Conference on Learning Representations, ICLR 2015\u2014Conference Track Proceedings, San Diego, CA, USA."},{"key":"ref_41","doi-asserted-by":"crossref","unstructured":"Merrell, P., Akbarzadeh, A., Wang, L., Mordohai, P., Frahm, J.M., Yang, R., Nist\u00e9r, D., and Pollefeys, M. (2007, January 14\u201321). Real-time visibility-based fusion of depth maps. Proceedings of the 2007 IEEE 11th International Conference on Computer Vision, Rio de Janeiro, Brazil.","DOI":"10.1109\/ICCV.2007.4408984"}],"container-title":["Sensors"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.mdpi.com\/1424-8220\/22\/19\/7659\/pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,11]],"date-time":"2025-10-11T00:48:51Z","timestamp":1760143731000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.mdpi.com\/1424-8220\/22\/19\/7659"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10,9]]},"references-count":41,"journal-issue":{"issue":"19","published-online":{"date-parts":[[2022,10]]}},"alternative-id":["s22197659"],"URL":"https:\/\/doi.org\/10.3390\/s22197659","relation":{},"ISSN":["1424-8220"],"issn-type":[{"value":"1424-8220","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,10,9]]}}}