{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,30]],"date-time":"2026-03-30T10:09:22Z","timestamp":1774865362915,"version":"3.50.1"},"reference-count":48,"publisher":"Springer Science and Business Media LLC","issue":"22","license":[{"start":{"date-parts":[[2024,8,17]],"date-time":"2024-08-17T00:00:00Z","timestamp":1723852800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,8,17]],"date-time":"2024-08-17T00:00:00Z","timestamp":1723852800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"supported by the R&D project of Pazhou Lab","award":["2023K0605"],"award-info":[{"award-number":["2023K0605"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2024,11]]},"DOI":"10.1007\/s10489-024-05729-y","type":"journal-article","created":{"date-parts":[[2024,8,17]],"date-time":"2024-08-17T07:02:16Z","timestamp":1723878136000},"page":"11325-11341","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["FGLNet: frequency global and local context channel attention networks"],"prefix":"10.1007","volume":"54","author":[{"given":"Yunfei","family":"Liu","sequence":"first","affiliation":[]},{"given":"Yan","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Huaqiang","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8035-8824","authenticated-orcid":false,"given":"Junran","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,8,17]]},"reference":[{"key":"5729_CR1","doi-asserted-by":"publisher","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: 2016 IEEE conference on computer vision and pattern recognition (CVPR), pp 770\u2013778. https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"5729_CR2","doi-asserted-by":"publisher","unstructured":"Cao Y, Xu J, Lin S, Wei F, Hu H (2019) Gcnet: non-local networks meet squeeze-excitation networks and beyond. In: 2019 IEEE\/CVF international conference on computer vision workshop (ICCVW), pp 1971\u20131980. https:\/\/doi.org\/10.1109\/ICCVW.2019.00246","DOI":"10.1109\/ICCVW.2019.00246"},{"key":"5729_CR3","doi-asserted-by":"publisher","unstructured":"Duan K, Bai S, Xie L, Qi H, Huang Q, Tian Q (2023) Centernet++ for object detection. IEEE Trans Pattern Anal Mach Intell, pp 1\u201314. https:\/\/doi.org\/10.1109\/TPAMI.2023.3342120","DOI":"10.1109\/TPAMI.2023.3342120"},{"key":"5729_CR4","doi-asserted-by":"publisher","unstructured":"Wang X, Girshick R, Gupta A, He K (2018) Non-local neural networks. In: 2018 IEEE\/CVF conference on computer vision and pattern recognition, pp 7794\u20137803. https:\/\/doi.org\/10.1109\/CVPR.2018.00813","DOI":"10.1109\/CVPR.2018.00813"},{"issue":"8","key":"5729_CR5","doi-asserted-by":"publisher","first-page":"2011","DOI":"10.1109\/TPAMI.2019.2913372","volume":"42","author":"J Hu","year":"2020","unstructured":"Hu J, Shen L, Albanie S, Sun G, Wu E (2020) Squeeze-and-excitation networks. IEEE Trans Pattern Anal Mach Intell 42(8):2011\u20132023. https:\/\/doi.org\/10.1109\/TPAMI.2019.2913372","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"5729_CR6","doi-asserted-by":"publisher","unstructured":"Hong F, Kong L, Zhou H, Zhu X, Li H, Liu Z (2024) Unified 3d and 4d panoptic segmentation via dynamic shifting networks. IEEE Trans Pattern Anal Mach Intell, pp 1\u201316. https:\/\/doi.org\/10.1109\/TPAMI.2023.3349304","DOI":"10.1109\/TPAMI.2023.3349304"},{"key":"5729_CR7","doi-asserted-by":"publisher","unstructured":"Xie J, Cai Y, Chen J, Xu R, Wang J, Li Q (2024) Knowledge-augmented visual question answering with natural language explanation. IEEE Trans Image Process, pp 1\u20131. https:\/\/doi.org\/10.1109\/TIP.2024.3379900","DOI":"10.1109\/TIP.2024.3379900"},{"issue":"6","key":"5729_CR8","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2017","unstructured":"Krizhevsky A, Sutskever I, Hinton GE (2017) Imagenet classification with deep convolutional neural networks. Commun ACM 60(6):84\u201390. https:\/\/doi.org\/10.1145\/3065386","journal-title":"Commun ACM"},{"key":"5729_CR9","doi-asserted-by":"publisher","first-page":"2826","DOI":"10.1109\/TIP.2021.3055617","volume":"30","author":"Y Ding","year":"2021","unstructured":"Ding Y, Ma Z, Wen S, Xie J, Chang D, Si Z, Wu M, Ling H (2021) Ap-cnn: weakly supervised attention pyramid convolutional neural network for fine-grained visual classification. IEEE Trans Image Process 30:2826\u20132836. https:\/\/doi.org\/10.1109\/TIP.2021.3055617","journal-title":"IEEE Trans Image Process"},{"key":"5729_CR10","doi-asserted-by":"publisher","unstructured":"Yang G, Rota P, Alameda-Pineda X, Xu D, Ding M, Ricci E (2022) Variational structured attention networks for deep visual representation learning. IEEE Transactions on Image Processing, pp 1\u20131. https:\/\/doi.org\/10.1109\/TIP.2021.3137647","DOI":"10.1109\/TIP.2021.3137647"},{"key":"5729_CR11","doi-asserted-by":"publisher","first-page":"106090","DOI":"10.1016\/j.resconrec.2021.106090","volume":"178","author":"Z Chen","year":"2022","unstructured":"Chen Z, Yang J, Chen L, Jiao H (2022) Garbage classification system based on improved shufflenet v2. Resour Conserv Recycl 178:106090. https:\/\/doi.org\/10.1016\/j.resconrec.2021.106090","journal-title":"Resour Conserv Recycl"},{"key":"5729_CR12","doi-asserted-by":"publisher","unstructured":"Wang Q, Wu B, Zhu P, Li P, Zuo W, Hu Q (2020) Eca-net: efficient channel attention for deep convolutional neural networks. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 11531\u201311539. https:\/\/doi.org\/10.1109\/CVPR42600.2020.01155","DOI":"10.1109\/CVPR42600.2020.01155"},{"key":"5729_CR13","doi-asserted-by":"publisher","unstructured":"Woo S, Park J, Lee J-Y, Kweon IS (2018) Cbam: convolutional block attention module. In: Computer vision \u2013 ECCV 2018: 15th european conference, Munich, Germany, September 8\u201314, 2018, Proceedings, Part VII, pp 3\u201319. Springer, Berlin, Heidelberg. https:\/\/doi.org\/10.1007\/978-3-030-01234-2_1","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"5729_CR14","doi-asserted-by":"publisher","unstructured":"Fu J, Liu J, Tian H, Li Y, Bao Y, Fang Z, Lu H (2019) Dual attention network for scene segmentation. In: 2019 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 3141\u20133149.https:\/\/doi.org\/10.1109\/CVPR.2019.00326","DOI":"10.1109\/CVPR.2019.00326"},{"key":"5729_CR15","doi-asserted-by":"publisher","unstructured":"Sagar A (2022) Dmsanet: dual multi scale attention network. In: Image analysis and processing \u2013 iciap 2022: 21st international conference, Lecce, Italy, May 23\u201327, 2022, Proceedings, Part I, pp 633\u2013645. Springer, Berlin, Heidelberg. https:\/\/doi.org\/10.1007\/978-3-031-06427-2_53","DOI":"10.1007\/978-3-031-06427-2_53"},{"key":"5729_CR16","doi-asserted-by":"publisher","unstructured":"Hou Q, Zhou D, Feng J (2021) Coordinate attention for efficient mobile network design. In: 2021 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 13708\u201313717.https:\/\/doi.org\/10.1109\/CVPR46437.2021.01350","DOI":"10.1109\/CVPR46437.2021.01350"},{"key":"5729_CR17","unstructured":"Hu J, Shen L, Albanie S, Sun G, Vedaldi A (2018) Gather-excite: exploiting feature context in convolutional neural networks. In: Proceedings of the 32nd international conference on neural information processing systems. NIPS\u201918, pp 9423\u20139433. Curran Associates Inc., Red Hook, NY, USA"},{"key":"5729_CR18","doi-asserted-by":"publisher","first-page":"158","DOI":"10.1016\/j.neucom.2022.07.054","volume":"506","author":"H Liu","year":"2022","unstructured":"Liu H, Liu F, Fan X, Huang D (2022) Polarized self-attention: towards high-quality pixel-wise mapping. Neurocomputing 506:158\u2013167. https:\/\/doi.org\/10.1016\/j.neucom.2022.07.054","journal-title":"Neurocomputing"},{"key":"5729_CR19","doi-asserted-by":"publisher","unstructured":"Qin Z, Zhang P, Wu F, Li X (2021) Fcanet: frequency channel attention networks. In: 2021 IEEE\/CVF international conference on computer vision (ICCV), pp 763\u2013772. https:\/\/doi.org\/10.1109\/ICCV48922.2021.00082","DOI":"10.1109\/ICCV48922.2021.00082"},{"issue":"8","key":"5729_CR20","doi-asserted-by":"publisher","first-page":"3888","DOI":"10.1109\/TCSVT.2023.3234983","volume":"33","author":"L Hu","year":"2023","unstructured":"Hu L, Kong Y, Li J, Li X (2023) Effective local-global transformer for natural image matting. IEEE Trans Circ Syst Video Technol 33(8):3888\u20133898. https:\/\/doi.org\/10.1109\/TCSVT.2023.3234983","journal-title":"IEEE Trans Circ Syst Video Technol"},{"issue":"10","key":"5729_CR21","doi-asserted-by":"publisher","first-page":"12581","DOI":"10.1109\/TPAMI.2023.3282631","volume":"45","author":"K Li","year":"2023","unstructured":"Li K, Wang Y, Zhang J, Gao P, Song G, Liu Y, Li H, Qiao Y (2023) Uniformer: unifying convolution and self-attention for visual recognition. IEEE Trans Pattern Anal Mach Intell 45(10):12581\u201312600. https:\/\/doi.org\/10.1109\/TPAMI.2023.3282631","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"5729_CR22","doi-asserted-by":"publisher","unstructured":"Gao Z, Xie J, Wang Q, Li P (2019) Global second-order pooling convolutional networks. In: 2019 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 3019\u20133028. https:\/\/doi.org\/10.1109\/CVPR.2019.00314","DOI":"10.1109\/CVPR.2019.00314"},{"issue":"6","key":"5729_CR23","doi-asserted-by":"publisher","first-page":"6896","DOI":"10.1109\/TPAMI.2020.3007032","volume":"45","author":"Z Huang","year":"2023","unstructured":"Huang Z, Wang X, Wei Y, Huang L, Shi H, Liu W, Huang TS (2023) Ccnet: criss-cross attention for semantic segmentation. IEEE Trans Pattern Anal Mach Intell 45(6):6896\u20136908. https:\/\/doi.org\/10.1109\/TPAMI.2020.3007032","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"5729_CR24","unstructured":"Chen Y, Kalantidis Y, Li J, Yan S, Feng J (2018) A $$^{\\wedge }$$2-nets: Double attention networks. In: Bengio S, Wallach H, Larochelle H, Grauman K, Cesa-Bianchi N, Garnett R (eds.) Advances in neural information processing systems, vol 31. https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2018\/file\/e165421110ba03099a1c0393373c5b43-Paper.pdf"},{"key":"5729_CR25","doi-asserted-by":"publisher","unstructured":"Bello I, Zoph B, Le Q, Vaswani A, Shlens J (2019) Attention augmented convolutional networks. In: 2019 IEEE\/CVF international conference on computer vision (ICCV), pp 3285\u20133294. https:\/\/doi.org\/10.1109\/ICCV.2019.00338","DOI":"10.1109\/ICCV.2019.00338"},{"key":"5729_CR26","doi-asserted-by":"crossref","unstructured":"Zhao H, Zhang Y, Liu S, Shi J, Loy CC, Lin D, Jia J (2018) Psanet: point-wise spatial attention network for scene parsing. In: Ferrari V, Hebert M, Sminchisescu C, Weiss Y (eds) Computer vision - ECCV 2018. Springer, Cham, pp 270\u2013286","DOI":"10.1007\/978-3-030-01240-3_17"},{"key":"5729_CR27","doi-asserted-by":"publisher","unstructured":"Misra D, Nalamada T, Arasanipalai AU, Hou Q (2021) Rotate to attend: convolutional triplet attention module. In: 2021 IEEE winter conference on applications of computer vision (WACV), pp 3138\u20133147. https:\/\/doi.org\/10.1109\/WACV48630.2021.00318","DOI":"10.1109\/WACV48630.2021.00318"},{"issue":"2","key":"5729_CR28","doi-asserted-by":"publisher","first-page":"1489","DOI":"10.1109\/TPAMI.2022.3164083","volume":"45","author":"Y Li","year":"2023","unstructured":"Li Y, Yao T, Pan Y, Mei T (2023) Contextual transformer networks for visual recognition. IEEE Trans Pattern Anal Mach Intell 45(2):1489\u20131500. https:\/\/doi.org\/10.1109\/TPAMI.2022.3164083","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"5729_CR29","doi-asserted-by":"publisher","first-page":"3560","DOI":"10.1109\/TMM.2022.3162469","volume":"25","author":"S Zou","year":"2023","unstructured":"Zou S, Zuo X, Wang S, Qian Y, Guo C, Cheng L (2023) Human pose and shape estimation from single polarization images. IEEE Trans Multimed 25:3560\u20133572. https:\/\/doi.org\/10.1109\/TMM.2022.3162469","journal-title":"IEEE Trans Multimed"},{"key":"5729_CR30","doi-asserted-by":"publisher","unstructured":"Liu M, Wu S, Chen R, Lin Z, Wang Y, Meijering E ((2024)) Brain image segmentation for ultrascale neuron reconstruction via an adaptive dual-task learning network. IEEE Transactions on Medical Imaging, pp 1\u20131. https:\/\/doi.org\/10.1109\/TMI.2024.3367384","DOI":"10.1109\/TMI.2024.3367384"},{"key":"5729_CR31","doi-asserted-by":"publisher","unstructured":"Liu N, Nan K, Zhao W, Yao X, Han J (2023) Learning complementary spatial\u2013temporal transformer for video salient object detection. IEEE Transactions on Neural Networks and Learning Systems, pp 1\u201311. https:\/\/doi.org\/10.1109\/TNNLS.2023.3243246","DOI":"10.1109\/TNNLS.2023.3243246"},{"key":"5729_CR32","doi-asserted-by":"publisher","unstructured":"Zhang S, Yu W, Jiang F, Nie L, Yao H, Huang Q, Tao D (2024) Stereo image restoration via attention-guided correspondence learning. IEEE Trans Pattern Anal Mach Intell, pp 1\u201317. https:\/\/doi.org\/10.1109\/TPAMI.2024.3357709","DOI":"10.1109\/TPAMI.2024.3357709"},{"key":"5729_CR33","doi-asserted-by":"publisher","unstructured":"Fu J, Xie Q, Meng D, Xu Z (2024) Rotation equivariant proximal operator for deep unfolding methods in image restoration. IEEE Trans Pattern Anal Mach Intell, pp 1\u201317. https:\/\/doi.org\/10.1109\/TPAMI.2024.3383532","DOI":"10.1109\/TPAMI.2024.3383532"},{"key":"5729_CR34","doi-asserted-by":"publisher","first-page":"301","DOI":"10.1109\/TIP.2021.3130526","volume":"31","author":"J Kim","year":"2022","unstructured":"Kim J, Kim S, Kim ST, Ro YM (2022) Robust perturbation for visual explanation: Cross-checking mask optimization to avoid class distortion. IEEE Trans Image Process 31:301\u2013313. https:\/\/doi.org\/10.1109\/TIP.2021.3130526","journal-title":"IEEE Trans Image Process"},{"issue":"3","key":"5729_CR35","doi-asserted-by":"publisher","first-page":"1362","DOI":"10.1109\/TAI.2023.3289167","volume":"5","author":"C Ralekar","year":"2024","unstructured":"Ralekar C, Choudhary S, Gandhi TK, Chaudhury S (2024) Development of character recognition model inspired by visual explanations. IEEE Trans Artif Intell 5(3):1362\u20131372. https:\/\/doi.org\/10.1109\/TAI.2023.3289167","journal-title":"IEEE Trans Artif Intell"},{"issue":"1","key":"5729_CR36","doi-asserted-by":"publisher","first-page":"118","DOI":"10.1109\/TVCG.2021.3114861","volume":"28","author":"X Chu","year":"2022","unstructured":"Chu X, Xie X, Ye S, Lu H, Xiao H, Yuan Z, Zhu-Tian C, Zhang H, Wu Y (2022) Tivee: visual exploration and explanation of badminton tactics in immersive visualizations. IEEE Trans Vis Comput Graph 28(1):118\u2013128. https:\/\/doi.org\/10.1109\/TVCG.2021.3114861","journal-title":"IEEE Trans Vis Comput Graph"},{"issue":"2","key":"5729_CR37","doi-asserted-by":"publisher","first-page":"336","DOI":"10.1007\/s11263-019-01228-7","volume":"128","author":"RR Selvaraju","year":"2020","unstructured":"Selvaraju RR, Cogswell M, Das A, Vedantam R, Parikh D, Batra D (2020) Grad-cam: visual explanations from deep networks via gradient-based localization. Int J Comput Vision 128(2):336\u2013359. https:\/\/doi.org\/10.1007\/s11263-019-01228-7","journal-title":"Int J Comput Vision"},{"key":"5729_CR38","doi-asserted-by":"publisher","unstructured":"Sandler M, Howard A, Zhu M, Zhmoginov A, Chen L-C (2018) Mobilenetv2: inverted residuals and linear bottlenecks. In: 2018 IEEE\/CVF conference on computer vision and pattern recognition, pp 4510\u20134520. https:\/\/doi.org\/10.1109\/CVPR.2018.00474","DOI":"10.1109\/CVPR.2018.00474"},{"key":"5729_CR39","doi-asserted-by":"publisher","unstructured":"Zhou D, Hou Q, Chen Y, Feng J, Yan S (2020) Rethinking bottleneck structure for efficient mobile network design. In: Computer vision \u2013 ECCV 2020: 16th European conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part III, pp 680\u2013697. Springer, Berlin, Heidelberg (. https:\/\/doi.org\/10.1007\/978-3-030-58580-8_40","DOI":"10.1007\/978-3-030-58580-8_40"},{"key":"5729_CR40","unstructured":"Paszke A, Gross S, Massa F, Lerer A, Bradbury J, Chanan G, Killeen T, Lin Z, Gimelshein N, Antiga L, Desmaison A, K\u00f6pf A, Yang E, DeVito Z, Raison M, Tejani A, Chilamkurthy S, Steiner B, Fang L, Bai J, Chintala S (2019) PyTorch: An Imperative Style, High-performance Deep Learning Library. Curran Associates Inc., Red Hook, NY, USA"},{"key":"5729_CR41","doi-asserted-by":"publisher","unstructured":"Wightman R (2019) PyTorch Image Models. GitHub. https:\/\/doi.org\/10.5281\/zenodo.4414861","DOI":"10.5281\/zenodo.4414861"},{"key":"5729_CR42","doi-asserted-by":"publisher","unstructured":"Szegedy C, Vanhoucke V, Ioffe S, Shlens J, Wojna Z (2016) Rethinking the inception architecture for computer vision. In: 2016 IEEE conference on computer vision and pattern recognition (CVPR), pp 2818\u20132826. https:\/\/doi.org\/10.1109\/CVPR.2016.308","DOI":"10.1109\/CVPR.2016.308"},{"key":"5729_CR43","doi-asserted-by":"publisher","unstructured":"Russakovsky O, Deng J, Su H, Krause J, Satheesh S, Ma S, Huang Z, Karpathy A, Khosla A, Bernstein M, Berg AC, Fei-Fei L (2015) Imagenet large scale visual recognition challenge. Int J Comput Vision 115(3):211\u2013252. https:\/\/doi.org\/10.1007\/s11263-015-0816-y","DOI":"10.1007\/s11263-015-0816-y"},{"key":"5729_CR44","doi-asserted-by":"publisher","unstructured":"Zhang Q-L, Yang Y-B (2021) Sa-net: shuffle attention for deep convolutional neural networks. In: ICASSP 2021 - 2021 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp 2235\u20132239. https:\/\/doi.org\/10.1109\/ICASSP39728.2021.9414568","DOI":"10.1109\/ICASSP39728.2021.9414568"},{"key":"5729_CR45","doi-asserted-by":"publisher","unstructured":"Lee H, Kim H-E, Nam H (2019) Srm: a style-based recalibration module for convolutional neural networks. In: 2019 IEEE\/CVF international conference on computer vision (ICCV), pp 1854\u20131862. https:\/\/doi.org\/10.1109\/ICCV.2019.00194","DOI":"10.1109\/ICCV.2019.00194"},{"key":"5729_CR46","doi-asserted-by":"crossref","unstructured":"Zhang Q-L, Rao L, Yang Y (2021) Group-cam: group score-weighted visual explanations for deep convolutional networks. arXiv:2103.13859","DOI":"10.1109\/CVPRW50498.2020.00020"},{"key":"5729_CR47","doi-asserted-by":"crossref","unstructured":"Liu W, Anguelov D, Erhan D, Szegedy C, Reed S, Fu C-Y, Berg AC (2016) Ssd: single shot multibox detector. In: Leibe B, Matas J, Sebe N, Welling M(eds.) Computer Vision \u2013 ECCV 2016, pp 21\u201337. Springer, Cham","DOI":"10.1007\/978-3-319-46448-0_2"},{"key":"5729_CR48","unstructured":"Chen L, Papandreou G, Schroff F, Adam H (2017) Rethinking atrous convolution for semantic image segmentation. CoRR arXiv:1706.05587"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-024-05729-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-024-05729-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-024-05729-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,18]],"date-time":"2024-09-18T15:24:10Z","timestamp":1726673050000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-024-05729-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,17]]},"references-count":48,"journal-issue":{"issue":"22","published-print":{"date-parts":[[2024,11]]}},"alternative-id":["5729"],"URL":"https:\/\/doi.org\/10.1007\/s10489-024-05729-y","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,8,17]]},"assertion":[{"value":"4 August 2024","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 August 2024","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing of Interest"}},{"value":"The authors declare that they have no conflict of interest.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical Approval"}}]}}