{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,4]],"date-time":"2026-03-04T17:05:39Z","timestamp":1772643939672,"version":"3.50.1"},"reference-count":49,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100007129","name":"Natural Science Foundation of Shandong Province","doi-asserted-by":"publisher","award":["ZR2024MF046"],"award-info":[{"award-number":["ZR2024MF046"]}],"id":[{"id":"10.13039\/501100007129","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2026,1]]},"DOI":"10.1007\/s00371-025-04210-2","type":"journal-article","created":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T09:35:54Z","timestamp":1764581754000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["NLFER: multi-branch attention cross-fusion for robust facial expression recognition amidst noisy labels"],"prefix":"10.1007","volume":"42","author":[{"given":"Cheng-Yue","family":"Che","sequence":"first","affiliation":[]},{"given":"Hong-Mei","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Yu-Xiang","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Shuang","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Rui-Sheng","family":"Jia","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,12,1]]},"reference":[{"key":"4210_CR1","doi-asserted-by":"publisher","unstructured":"Li, Hangyu, et al., \"Towards semi-supervised deep facial expression recognition with an adaptive confidence margin,\" Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 2022, pp. 4166\u20134175. https:\/\/doi.org\/10.1109\/CVPR52688.2022.00413","DOI":"10.1109\/CVPR52688.2022.00413"},{"issue":"5","key":"4210_CR2","doi-asserted-by":"publisher","first-page":"3192","DOI":"10.1109\/TCSVT.2023.3312858","volume":"34","author":"X Zhang","year":"2024","unstructured":"Zhang, X., Li, M., Lin, S., Xu, H., Xiao, G.: Transformer-based multimodal emotional perception for dynamic facial expression recognition in the wild. IEEE Trans. Circuits Syst. Video Technol. 34(5), 3192\u20133203 (2024). https:\/\/doi.org\/10.1109\/TCSVT.2023.3312858","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"4210_CR3","doi-asserted-by":"publisher","DOI":"10.1007\/s00371-023-02784-3","author":"SS Aldin","year":"2023","unstructured":"Aldin, S.S., Aldin, N.B., Ayka, M.: Enhanced image classification using edge CNN (E-CNN). Vis. Comput. (2023). https:\/\/doi.org\/10.1007\/s00371-023-02784-3","journal-title":"Vis. Comput."},{"key":"4210_CR4","doi-asserted-by":"publisher","unstructured":"Zheng, Jiawen, et al., \"Attack can benefit: An adversarial approach to recognizing facial expressions under noisy annotations,\" Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 37. No. 3. pp. 3660\u20133668, 2023. https:\/\/doi.org\/10.1609\/aaai.v37i3.25477","DOI":"10.1609\/aaai.v37i3.25477"},{"key":"4210_CR5","doi-asserted-by":"publisher","unstructured":"Zeng, Dan, et al., \"Face2Exp: Combating Data Biases for Facial Expression Recognition,\" Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 20259\u201320268. https:\/\/doi.org\/10.1109\/CVPR52688.2022.01965","DOI":"10.1109\/CVPR52688.2022.01965"},{"issue":"10","key":"4210_CR6","doi-asserted-by":"publisher","first-page":"5346","DOI":"10.1109\/TNNLS.2021.3070463","volume":"33","author":"P Li","year":"2022","unstructured":"Li, P., Sheng, B., Chen, C.L.P.: Face sketch synthesis using regularized broad learning system. IEEE Trans. Neural Netw. Learn. Syst. 33(10), 5346\u20135360 (2022). https:\/\/doi.org\/10.1109\/TNNLS.2021.3070463","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"4210_CR7","doi-asserted-by":"publisher","unstructured":"Zeng, Jiabei, Shiguang Shan, and **lin Chen, \"Facial expression recognition with inconsistently annotated datasets,\" Proceedings of the European conference on computer vision, 2018, pp. 222\u2013237. https:\/\/doi.org\/10.1007\/978-3-030-01261-8_14","DOI":"10.1007\/978-3-030-01261-8_14"},{"key":"4210_CR8","doi-asserted-by":"publisher","first-page":"50","DOI":"10.1109\/TMM.2021.3120873","volume":"25","author":"X Lin","year":"2023","unstructured":"Lin, X., Sun, S., Huang, W., Sheng, B., Li, P., Feng, D.D.: EAPT: efficient attention pyramid transformer for image processing. IEEE Trans. MultiMedia 25, 50\u201361 (2023). https:\/\/doi.org\/10.1109\/TMM.2021.3120873","journal-title":"IEEE Trans. MultiMedia"},{"key":"4210_CR9","first-page":"17616","volume":"34","author":"Y Zhang","year":"2021","unstructured":"Zhang, Y., Wang, C., Deng, W.: Relative uncertainty learning for facial expression recognition. Adv. Neural. Inf. Process. Syst. 34, 17616\u201317627 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"4210_CR10","doi-asserted-by":"publisher","unstructured":"Zhang, Yuhang, et al., \"Learn from all: Erasing attention consistency for noisy label facial expression recognition,\" European Conference on Computer Vision. Cham: Springer Nature Switzerland, 2022, pp. 418\u2013434. https:\/\/doi.org\/10.1007\/978-3-031-19809-0_24","DOI":"10.1007\/978-3-031-19809-0_24"},{"issue":"1","key":"4210_CR11","doi-asserted-by":"publisher","DOI":"10.1002\/cav.2203","volume":"35","author":"Yu Liu","year":"2024","unstructured":"Liu, Yu., et al.: 3d facial attractiveness prediction based on deep feature fusion. Comput. Anim. Virtual Worlds 35(1), e2203 (2024). https:\/\/doi.org\/10.1002\/cav.2203","journal-title":"Comput. Anim. Virtual Worlds"},{"key":"4210_CR12","doi-asserted-by":"publisher","unstructured":"Z. Huang, J. Zhang and H. Shan, \"Twin Contrastive Learning with Noisy Labels,\" Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 11661\u201311670. https:\/\/doi.org\/10.1109\/CVPR52729.2023.01122","DOI":"10.1109\/CVPR52729.2023.01122"},{"key":"4210_CR13","doi-asserted-by":"publisher","unstructured":"Zhang, Chiyuan, et al., \"Understanding deep learning (still) requires rethinking generalization,\" Communications of the ACM, 2021, 64(3), pp. 107\u2013115. https:\/\/doi.org\/10.48550\/arXiv.1611.03530","DOI":"10.48550\/arXiv.1611.03530"},{"key":"4210_CR14","doi-asserted-by":"publisher","unstructured":"C. Zheng, M. Mendieta and C. Chen, \"POSTER: A Pyramid Cross-Fusion Transformer Network for Facial Expression Recognition,\" Proceedings of the IEEE\/CVF International Conference on Computer Vision. 2023, pp. 3146\u20133155. https:\/\/doi.org\/10.1109\/ICCVW60793.2023.00339","DOI":"10.1109\/ICCVW60793.2023.00339"},{"issue":"2","key":"4210_CR15","doi-asserted-by":"publisher","DOI":"10.1002\/cav.2230","volume":"35","author":"D Bellenger","year":"2024","unstructured":"Bellenger, D., Chen, M., Xu, Z.: Facial emotion recognition with a reduced feature set for video game and metaverse avatars. Comput. Anim. Virtual Worlds 35(2), e2230 (2024). https:\/\/doi.org\/10.1002\/cav.2230","journal-title":"Comput. Anim. Virtual Worlds"},{"issue":"2","key":"4210_CR16","doi-asserted-by":"publisher","first-page":"585","DOI":"10.1007\/s00371-023-02803-3","volume":"40","author":"YS Gan","year":"2024","unstructured":"Gan, Y.S., Lien, S.E., Liong, C.S.T.: LAENet for micro-expression recognition. Vis. Comput. 40(2), 585\u2013599 (2024). https:\/\/doi.org\/10.1007\/s00371-023-02803-3","journal-title":"Vis. Comput."},{"issue":"11","key":"4210_CR17","doi-asserted-by":"publisher","first-page":"8135","DOI":"10.1109\/TNNLS.2022.3152527","volume":"34","author":"H Song","year":"2022","unstructured":"Song, H., Kim, M., Park, D., Shin, Y., Lee, J.G.: Learning from noisy labels with deep neural networks: a survey. IEEE Trans. Neural. Netw. Learn. Sys. 34(11), 8135\u20138153 (2022)","journal-title":"IEEE Trans. Neural. Netw. Learn. Sys."},{"key":"4210_CR18","doi-asserted-by":"publisher","unstructured":"Wang, Kai, et al., \"Suppressing uncertainties for large-scale facial expression recognition,\" Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 2020, pp. 6897\u20136906. https:\/\/doi.org\/10.1109\/CVPR42600.2020.00693","DOI":"10.1109\/CVPR42600.2020.00693"},{"issue":"5","key":"4210_CR19","doi-asserted-by":"publisher","first-page":"2033","DOI":"10.1109\/TCSVT.2022.3220669","volume":"33","author":"Y Gu","year":"2023","unstructured":"Gu, Y., Yan, H., Zhang, X., Wang, Y., Ji, Y., Ren, F.: Toward facial expression recognition in the wild via noise-tolerant network. IEEE Trans. Circuits Syst. Video Technol. 33(5), 2033\u20132047 (2023). https:\/\/doi.org\/10.1109\/TCSVT.2022.3220669","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"1","key":"4210_CR20","doi-asserted-by":"publisher","first-page":"590","DOI":"10.1109\/TCSVT.2023.3286546","volume":"34","author":"D Okamura","year":"2024","unstructured":"Okamura, D., Harakawa, R., Iwahashi, M.: LCNME: label correction using network prediction based on memorization effects for cross-modal retrieval with noisy labels. IEEE Trans. Circuits Syst. Video Technol. 34(1), 590\u2013602 (2024). https:\/\/doi.org\/10.1109\/TCSVT.2023.3286546","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"1","key":"4210_CR21","doi-asserted-by":"publisher","first-page":"57","DOI":"10.1016\/j.vrih.2022.07.006","volume":"5","author":"M Zhang","year":"2023","unstructured":"Zhang, M., Tian, X.: Transformer architecture based on mutual attention for image-anomaly detection. Virt. Real. Intell. Hardw. 5(1), 57\u201367 (2023). https:\/\/doi.org\/10.1016\/j.vrih.2022.07.006","journal-title":"Virt. Real. Intell. Hardw."},{"key":"4210_CR22","doi-asserted-by":"publisher","unstructured":"Z. Wu and J. Cui, \"LA-Net: Landmark-Aware Learning for Reliable Facial Expression Recognition under Label Noise,\" 2023 IEEE\/CVF International Conference on Computer Vision (ICCV), Paris, France, 2023, pp. 20641\u201320650. https:\/\/doi.org\/10.1109\/ICCV51070.2023.01892","DOI":"10.1109\/ICCV51070.2023.01892"},{"key":"4210_CR23","doi-asserted-by":"publisher","unstructured":"Iscen, Ahmet, et al., \"Learning with neighbor consistency for noisy labels,\" Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 4672\u20134681. https:\/\/doi.org\/10.1109\/CVPR52688.2022.00463","DOI":"10.1109\/CVPR52688.2022.00463"},{"key":"4210_CR24","doi-asserted-by":"publisher","unstructured":"J. Li, C. Xiong and S. C. H. Hoi, \"Learning from Noisy Data with Robust Representation Learning,\" Proceedings of the IEEE\/CVF International Conference on Computer Vision, 2021, pp. 9485\u20139494. https:\/\/doi.org\/10.1109\/ICCV48922.2021.00935","DOI":"10.1109\/ICCV48922.2021.00935"},{"key":"4210_CR25","doi-asserted-by":"publisher","unstructured":"S. Chen, J. Wang, Y. Chen, Z. Shi, X. Geng and Y. Rui, \"Label Distribution Learning on Auxiliary Label Space Graphs for Facial Expression Recognition,\" Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 2020, pp. 13984\u201313993. https:\/\/doi.org\/10.1109\/CVPR42600.2020.01400","DOI":"10.1109\/CVPR42600.2020.01400"},{"key":"4210_CR26","doi-asserted-by":"publisher","unstructured":"She, Jiahui, et al., \"Dive into ambiguity: Latent distribution mining and pairwise uncertainty estimation for facial expression recognition,\" Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 2021, pp. 6248\u20136257. https:\/\/doi.org\/10.1109\/CVPR46437.2021.00618","DOI":"10.1109\/CVPR46437.2021.00618"},{"key":"4210_CR27","doi-asserted-by":"publisher","unstructured":"Jiang, Lu, et al., \"Mentornet: Learning data-driven curriculum for very deep neural networks on corrupted labels,\" International conference on machine learning, PMLR, 2018, pp. 2304\u20132313. https:\/\/doi.org\/10.48550\/arXiv.1712.05055","DOI":"10.48550\/arXiv.1712.05055"},{"key":"4210_CR28","doi-asserted-by":"publisher","unstructured":"Veit, Andreas, et al., \"Learning from noisy large-scale datasets with minimal supervision,\" Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 839\u2013847. https:\/\/doi.org\/10.1109\/CVPR.2017.696","DOI":"10.1109\/CVPR.2017.696"},{"key":"4210_CR29","doi-asserted-by":"publisher","unstructured":"Wang, Lijuan, et al., \"Ease: Robust facial expression recognition via emotion ambiguity-sensitive cooperative networks,\" Proceedings of the 30th ACM International Conference on Multimedia, 2022, pp. 218\u2013227. https:\/\/doi.org\/10.1145\/3503161.3548005","DOI":"10.1145\/3503161.3548005"},{"key":"4210_CR30","doi-asserted-by":"publisher","unstructured":"Wei, Hongxin, et al., \"Combating noisy labels by agreement: A joint training method with co-regularization,\" Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 2020, pp. 13726\u201313735. https:\/\/doi.org\/10.1109\/CVPR42600.2020.01374","DOI":"10.1109\/CVPR42600.2020.01374"},{"key":"4210_CR31","doi-asserted-by":"publisher","first-page":"9280","DOI":"10.1109\/TMM.2024.3387831","volume":"26","author":"H Wu","year":"2024","unstructured":"Wu, H., Sun, J.: Robust image classification with noisy labels by negative learning and feature space renormalization. IEEE Trans. MultiMedia 26, 9280\u20139291 (2024). https:\/\/doi.org\/10.1109\/TMM.2024.3387831","journal-title":"IEEE Trans. MultiMedia"},{"key":"4210_CR32","unstructured":"Chen, Cunjian, \"Pytorch face landmark: A fast and accurate facial landmark detector,\" (2021). https:\/\/pytorch_face_landmark."},{"key":"4210_CR33","doi-asserted-by":"publisher","unstructured":"Vaswani, Ashish, et al., \"Attention is all you need,\" Advances in neural information processing systems, 2017, pp.\u00a06000\u00a0-\u00a06010. https:\/\/doi.org\/10.48550\/arXiv.1706.03762","DOI":"10.48550\/arXiv.1706.03762"},{"key":"4210_CR34","doi-asserted-by":"publisher","unstructured":"Dosovitskiy, Alexey, \"An image is worth 16x16 words: Transformers for image recognition at scale,\" arxiv preprint arxiv:2010.11929 (2020). https:\/\/doi.org\/10.48550\/arXiv.2010.11929","DOI":"10.48550\/arXiv.2010.11929"},{"key":"4210_CR35","doi-asserted-by":"publisher","unstructured":"Hu, Wei, et al., \"Noise-tolerant paradigm for training face recognition CNNs,\" Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 2019, pp. 11887\u201311896. https:\/\/doi.org\/10.1109\/CVPR.2019.01216","DOI":"10.1109\/CVPR.2019.01216"},{"key":"4210_CR36","doi-asserted-by":"publisher","unstructured":"Liu, Weiyang, et al., \"Sphereface: Deep hypersphere embedding for face recognition,\" Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 212\u2013220. https:\/\/doi.org\/10.1109\/CVPR.2017.713","DOI":"10.1109\/CVPR.2017.713"},{"key":"4210_CR37","doi-asserted-by":"publisher","unstructured":"S. Li, W. Deng and J. Du, \"Reliable Crowdsourcing and Deep Locality-Preserving Learning for Expression Recognition in the Wild,\" 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Honolulu, HI, USA, 2017, pp. 2584\u20132593. https:\/\/doi.org\/10.1109\/CVPR.2017.277","DOI":"10.1109\/CVPR.2017.277"},{"key":"4210_CR38","doi-asserted-by":"publisher","unstructured":"Barsoum, Emad, et al., \"Training deep networks for facial expression recognition with crowd-sourced label distribution,\" Proceedings of the 18th ACM international conference on multimodal interaction, 2016, pp. 279\u00a0-\u00a0283. https:\/\/doi.org\/10.1145\/2993148.2993165","DOI":"10.1145\/2993148.2993165"},{"key":"4210_CR39","doi-asserted-by":"publisher","unstructured":"Goodfellow, Ian J., et al., \"Challenges in representation learning: A report on three machine learning contests,\" Neural information processing: 20th international conference, ICONIP 2013, daegu, korea, november 3\u20137, 2013. Proceedings, Part III 20. Springer berlin heidelberg, 2013, pp. 117\u2013124. https:\/\/doi.org\/10.1016\/j.neunet.2014.09.005","DOI":"10.1016\/j.neunet.2014.09.005"},{"issue":"1","key":"4210_CR40","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/TAFFC.2017.2740923","volume":"10","author":"A Mollahosseini","year":"2017","unstructured":"Mollahosseini, A., Hasani, B., Mahoor, M.H.: Affectnet: a database for facial expression, valence, and arousal computing in the wild. IEEE Trans. Affect. Comput. 10(1), 18\u201331 (2017). https:\/\/doi.org\/10.1109\/TAFFC.2017.2740923","journal-title":"IEEE Trans. Affect. Comput."},{"key":"4210_CR41","doi-asserted-by":"publisher","unstructured":"He, Kaiming, et al., 2016 \"Deep residual learning for image recognition,\" Proceedings of the IEEE conference on computer vision and pattern recognition, https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"4210_CR42","doi-asserted-by":"publisher","unstructured":"Chang, Jie, et al., \"Data uncertainty learning in face recognition,\" Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 2020, pp. 5710\u20135719. https:\/\/doi.org\/10.1109\/CVPR42600.2020.00575","DOI":"10.1109\/CVPR42600.2020.00575"},{"issue":"4","key":"4210_CR43","doi-asserted-by":"publisher","first-page":"3510","DOI":"10.1609\/aaai.v35i4.16465","volume":"35","author":"Z Zhao","year":"2021","unstructured":"Zhao, Z., Liu, Q., Zhou, F.: Robust lightweight facial expression recognition network with label distribution training. Proc. AAAI Conf. Artif. Intell. 35(4), 3510\u20133519 (2021). https:\/\/doi.org\/10.1609\/aaai.v35i4.16465","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"4210_CR44","doi-asserted-by":"publisher","unstructured":"Wang, Kai, et al., \"Region attention networks for pose and occlusion robust facial expression recognition,\" IEEE Transactions on Image Processing, 2020, 29, pp. 4057\u20134069. https:\/\/doi.org\/10.1109\/TIP.2019.2956143","DOI":"10.1109\/TIP.2019.2956143"},{"issue":"2","key":"4210_CR45","doi-asserted-by":"publisher","first-page":"1236","DOI":"10.1109\/TAFFC.2021.3122146","volume":"14","author":"F Ma","year":"2023","unstructured":"Ma, F., Sun, B., Li, S.: Facial expression recognition with visual transformers and attentional selective fusion. IEEE Trans. Affect. Comput. 14(2), 1236\u20131248 (2023). https:\/\/doi.org\/10.1109\/TAFFC.2021.3122146","journal-title":"IEEE Trans. Affect. Comput."},{"key":"4210_CR46","doi-asserted-by":"publisher","unstructured":"Moon, Jiyong, Hyeryung Jang, and Seongsik Park, \"SimFLE: Simple Facial Landmark Encoding for Self-Supervised Facial Expression Recognition in the Wild,\" IEEE Transactions on Affective Computing 2024. https:\/\/doi.org\/10.1109\/TAFFC.2024.3470980","DOI":"10.1109\/TAFFC.2024.3470980"},{"key":"4210_CR47","unstructured":"Van der Maaten, Laurens, and Geoffrey Hinton, \"Visualizing data using t-SNE,\" Journal of machine learning research, 2008, 9(11). https:\/\/jmlr.org\/papers"},{"issue":"2","key":"4210_CR48","doi-asserted-by":"publisher","first-page":"593","DOI":"10.1109\/TAFFC.2023.3285231","volume":"15","author":"F Ma","year":"2024","unstructured":"Ma, F., Sun, B., Li, S.: Transformer-augmented network with online label correction for facial expression recognition. IEEE Trans. Affect. Comput. 15(2), 593\u2013605 (2024). https:\/\/doi.org\/10.1109\/TAFFC.2023.3285231","journal-title":"IEEE Trans. Affect. Comput."},{"key":"4210_CR49","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2025.112015","volume":"170","author":"X Wang","year":"2026","unstructured":"Wang, X., et al.: MHAN: multi-head hybrid attention network for facial expression recognition. Pattern Recogn. 170, 112015 (2026). https:\/\/doi.org\/10.1016\/j.patcog.2025.112015","journal-title":"Pattern Recogn."}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-04210-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-025-04210-2","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-04210-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,4]],"date-time":"2026-03-04T13:00:19Z","timestamp":1772629219000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-025-04210-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,1]]},"references-count":49,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,1]]}},"alternative-id":["4210"],"URL":"https:\/\/doi.org\/10.1007\/s00371-025-04210-2","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"value":"0178-2789","type":"print"},{"value":"1432-2315","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,12,1]]},"assertion":[{"value":"13 May 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 November 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 December 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interests"}}],"article-number":"1"}}