{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T11:04:56Z","timestamp":1773486296276,"version":"3.50.1"},"reference-count":50,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T00:00:00Z","timestamp":1769644800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T00:00:00Z","timestamp":1769644800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Fundamental Research Funds for the Universities of Henan Province","award":["NSFRF2204444"],"award-info":[{"award-number":["NSFRF2204444"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Pattern Anal Applic"],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1007\/s10044-026-01616-z","type":"journal-article","created":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T20:22:20Z","timestamp":1769718140000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Dual-path collaborative distillation"],"prefix":"10.1007","volume":"29","author":[{"given":"Shihui","family":"Wang","sequence":"first","affiliation":[]},{"given":"Xinwei","family":"Li","sequence":"additional","affiliation":[]},{"given":"Bingfeng","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yi","family":"Yang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,29]]},"reference":[{"key":"1616_CR1","doi-asserted-by":"publisher","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision pattern recognition (CVPR), pp 770\u2013778. IEEE, Las Vegas, NV, USA. https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"1616_CR2","doi-asserted-by":"publisher","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. In: Proceedings of the IEEE\/CVF conference on computer vision pattern recognition (CVPR), pp 7132\u20137141. IEEE\/CVF, Salt Lake City, UT, USA. https:\/\/doi.org\/10.1109\/CVPR.2018.00745","DOI":"10.1109\/CVPR.2018.00745"},{"key":"1616_CR3","doi-asserted-by":"publisher","unstructured":"Ma N, Zhang X, Zheng H-T, Sun J (2018) ShuffleNet V2: practical guidelines for efficient CNN architecture design. In: Proceedings of the 15th European conference on computer vision (ECCV), pp 122\u2013138. Springer, Munich, Germany. https:\/\/doi.org\/10.1007\/978-3-030-01264-9_8","DOI":"10.1007\/978-3-030-01264-9_8"},{"key":"1616_CR4","doi-asserted-by":"crossref","unstructured":"He K, Gkioxari G, Doll\u00e1r P, Girshick R (2018) Mask R-CNN. Preprint at arXiv:1703.06870. Computer vision; Instance segmentation.","DOI":"10.1109\/ICCV.2017.322"},{"issue":"6","key":"1616_CR5","doi-asserted-by":"publisher","first-page":"1137","DOI":"10.1109\/TPAMI.2016.2577031","volume":"39","author":"S Ren","year":"2017","unstructured":"Ren S, He K, Girshick R, Sun J (2017) Faster R-CNN: towards real-time object detection with region proposal networks. IEEE Trans Pattern Anal Mach Intell 39(6):1137\u20131149. https:\/\/doi.org\/10.1109\/TPAMI.2016.2577031","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"1616_CR6","doi-asserted-by":"publisher","unstructured":"Long J, Shelhamer E, Darrell T (2015) Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE conference on computer vision pattern recognition (CVPR), pp 3431\u20133440. IEEE, Boston, MA, USA. https:\/\/doi.org\/10.1109\/CVPR.2015.7298965","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"1616_CR7","doi-asserted-by":"publisher","unstructured":"Zhao H, Shi J, Qi X, Wang X, Jia J (2017) Pyramid scene parsing network. In: Proceedings of the IEEE conference on computer vision pattern recognition (CVPR), pp 6230\u20136239. IEEE, Honolulu, HI, USA. https:\/\/doi.org\/10.1109\/CVPR.2017.660","DOI":"10.1109\/CVPR.2017.660"},{"key":"1616_CR8","unstructured":"Hinton GE, Vinyals O, Dean J (2015) Distilling the knowledge in a neural network. Extended version of NIPS 2014 Deep Learning Workshop paper. Preprint at arXiv:1503.02531"},{"issue":"2","key":"1616_CR9","doi-asserted-by":"publisher","first-page":"819","DOI":"10.1007\/s10044-020-00940-2","volume":"24","author":"SW Prakosa","year":"2021","unstructured":"Prakosa SW, Leu J-S, Chen Z-H (2021) Improving the accuracy of pruned network using knowledge distillation. Pattern Anal Appl 24(2):819\u2013830. https:\/\/doi.org\/10.1007\/s10044-020-00940-2","journal-title":"Pattern Anal Appl"},{"issue":"2","key":"1616_CR10","doi-asserted-by":"publisher","first-page":"94","DOI":"10.1007\/s10044-025-01450-9","volume":"28","author":"Y Wang","year":"2025","unstructured":"Wang Y, Wang Y, Rohra A, Yin B (2025) End-to-end model compression via pruning and knowledge distillation for lightweight image super resolution. Pattern Anal Appl 28(2):94. https:\/\/doi.org\/10.1007\/s10044-025-01450-9","journal-title":"Pattern Anal Appl"},{"key":"1616_CR11","doi-asserted-by":"publisher","unstructured":"Yang C, Xie L, Qiao S, Yuille AL (2019) Training deep neural networks in generations: a more tolerant teacher educates better students. In: Proceedings of the 33rd AAAI conference on artificial intelligence (AAAI), pp 5628\u20135635. AAAI Press, Honolulu, HI, USA. https:\/\/doi.org\/10.1609\/aaai.v33i01.33015628","DOI":"10.1609\/aaai.v33i01.33015628"},{"key":"1616_CR12","unstructured":"Park DY, Cha M-H, Jeong C, Kim DS, Han B (2021) Learning student-friendly teacher networks for knowledge distillation. Published at NeurIPS 2021: https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2021\/hash\/6e7d2da6d3953058db75714ac400b584-Abstract.html. Preprint at arXiv:2102.07650"},{"key":"1616_CR13","doi-asserted-by":"publisher","unstructured":"Cho JH, Hariharan B (2019) On the efficacy of knowledge distillation. In: Proceedings of the IEEE\/CVF international conference on computer vision (ICCV), pp 4793\u20134801. IEEE, Seoul, South Korea. https:\/\/doi.org\/10.1109\/ICCV.2019.00489","DOI":"10.1109\/ICCV.2019.00489"},{"issue":"4","key":"1616_CR14","doi-asserted-by":"publisher","first-page":"2251","DOI":"10.1109\/TCSVT.2021.3090902","volume":"32","author":"K Zhang","year":"2022","unstructured":"Zhang K, Zhang C, Li S, Zeng D, Ge S (2022) Student network learning via evolutionary knowledge distillation. IEEE Trans Circuits Syst Video Technol 32(4):2251\u20132263. https:\/\/doi.org\/10.1109\/TCSVT.2021.3090902","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"1616_CR15","doi-asserted-by":"publisher","unstructured":"Jin X, Peng B, Wu Y, Liu Y, Liu J, Liang D, Yan J, Hu X (2019) Knowledge distillation via route constrained optimization. In: Proceedings of the IEEE\/CVF international conference on computer vision (ICCV), pp 1345\u20131354. IEEE, Seoul, South Korea. https:\/\/doi.org\/10.1109\/ICCV.2019.00143","DOI":"10.1109\/ICCV.2019.00143"},{"key":"1616_CR16","doi-asserted-by":"publisher","unstructured":"Dong X, Huang O, Thulasiraman P, Mahanti A (2023) Improved knowledge distillation via teacher assistants for sentiment analysis. In: Proceedings of the IEEE symposium on serious on computational intelligence (SSCI), pp 300\u2013305. IEEE, Mexico City, Mexico. https:\/\/doi.org\/10.1109\/SSCI52147.2023.10371965","DOI":"10.1109\/SSCI52147.2023.10371965"},{"key":"1616_CR17","unstructured":"Stanton S, Izmailov P, Kirichenko P, Alemi AA, Wilson AG (2021) Does knowledge distillation really work?. Counterintuitive findings in knowledge distillation effectiveness. Published at NeurIPS 2021 DistShift Workshop. Preprint at arXiv:2106.05945"},{"key":"1616_CR18","doi-asserted-by":"publisher","unstructured":"Zhao B, Cui Q, Song R, Qiu Y, Liang J (2022) Decoupled knowledge distillation. In: Proceedings of the IEEE\/CVF conference on computer vision pattern recognition (CVPR), pp 11943\u201311952. IEEE\/CVF, New Orleans, LA, USA. https:\/\/doi.org\/10.1109\/CVPR52688.2022.01165","DOI":"10.1109\/CVPR52688.2022.01165"},{"key":"1616_CR19","doi-asserted-by":"publisher","unstructured":"Zhang L, Song J, Gao A, Chen J, Bao C, Ma K (2019) Be your own teacher: improve the performance of convolutional neural networks via self distillation. In: Proceeding of the IEEE\/CVF international conference on computer vision (ICCV), pp 3712\u20133721. IEEE, Seoul, South Korea. https:\/\/doi.org\/10.1109\/ICCV.2019.00381","DOI":"10.1109\/ICCV.2019.00381"},{"key":"1616_CR20","unstructured":"Romero A, Ballas N, Kahou SE, Chassang A, Gatta C, Bengio Y (2015) FitNets: hints for thin deep nets. In: Proceedings of 3rd international conference on learning representation (ICLR), San Diego, CA, USA, pp 1\u201312. arXiv:1412.6550. Workshop track, arXiv preprint 2014"},{"key":"1616_CR21","unstructured":"Zagoruyko S, Komodakis N (2017) Paying more attention to attention: improving the performance of convolutional neural networks via attention transfer. In: Proceedings of the 5th international conference on learning representation (ICLR), pp 1\u201313. arXiv preprint 2016, arXiv:1612.03928"},{"key":"1616_CR22","unstructured":"Huang Z, Wang N (2017) Like what you like: knowledge distillation via neuron selectivity transfer. In: Proceedings of the NeurIPS 2017 workshops: deep learning: bridging theory and practice, pp 1\u20138. arXiv:1703.00832"},{"key":"1616_CR23","doi-asserted-by":"publisher","unstructured":"Yim J, Joo D, Bae J, Kim J (2017) A gift from knowledge distillation: fast optimization, network minimization and transfer learning. In: Proceedings of the IEEE conference on computer vision pattern recognition (CVPR), pp 7130\u20137138. IEEE, Honolulu, HI, USA. https:\/\/doi.org\/10.1109\/CVPR.2017.754","DOI":"10.1109\/CVPR.2017.754"},{"key":"1616_CR24","doi-asserted-by":"publisher","unstructured":"Park W, Kim D, Lu Y, Cho M (2019) Relational knowledge distillation. In: Proceedings of the IEEE\/CVF conference on computer vision pattern recognition (CVPR), pp 3962\u20133971. IEEE\/CVF, Long Beach, CA, USA. https:\/\/doi.org\/10.1109\/CVPR.2019.00409","DOI":"10.1109\/CVPR.2019.00409"},{"key":"1616_CR25","doi-asserted-by":"publisher","unstructured":"Passalis N, Tefas A (2018) Learning deep representations with probabilistic knowledge transfer. In: Proceedings of the 15th European conference on computer vision (ECCV), pp 283\u2013299. Springer, Munich, Germany. https:\/\/doi.org\/10.1007\/978-3-030-01252-6_17","DOI":"10.1007\/978-3-030-01252-6_17"},{"key":"1616_CR26","doi-asserted-by":"publisher","unstructured":"Peng B, Jin X, Li D, Zhou S, Wu Y, Liu J, Zhang Z, Liu Y (2019) Correlation congruence for knowledge distillation. In: Proceedings of the IEEE\/CVF international conference on computer vision (ICCV), pp 5006\u20135015. IEEE, Seoul, South Korea.https:\/\/doi.org\/10.1109\/ICCV.2019.00511","DOI":"10.1109\/ICCV.2019.00511"},{"key":"1616_CR27","doi-asserted-by":"publisher","unstructured":"Tung F, Mori, G (2019) Similarity-preserving knowledge distillation. In: Proceedings of the IEEE\/CVF international conference on computer vision (ICCV), pp 1365\u20131374. IEEE, Seoul, South Korea. https:\/\/doi.org\/10.1109\/ICCV.2019.00145","DOI":"10.1109\/ICCV.2019.00145"},{"key":"1616_CR28","doi-asserted-by":"publisher","unstructured":"Phuong M, Lampert C (2019) Distillation-based training for multi-exit architectures. In: Proceedings of the IEEE\/CVF international conference on computer vision (ICCV), pp 1355\u20131364. IEEE, Seoul, South Korea. https:\/\/doi.org\/10.1109\/ICCV.2019.00144","DOI":"10.1109\/ICCV.2019.00144"},{"key":"1616_CR29","doi-asserted-by":"publisher","unstructured":"Hou Y, Ma Z, Liu C, Loy CC (2019) Learning lightweight lane detection CNNs by self attention distillation. In: Proceeding of the IEEE\/CVF international conference on computer vision (ICCV), pp 1013\u20131021. IEEE, Seoul, South Korea. https:\/\/doi.org\/10.1109\/ICCV.2019.00110","DOI":"10.1109\/ICCV.2019.00110"},{"key":"1616_CR30","doi-asserted-by":"publisher","unstructured":"Yun S, Park J, Lee K, Shin J (2020) Regularizing class-wise predictions via self-knowledge distillation. In: Proceedings of the IEEE\/CVF conference on computer vision pattern recognition (CVPR), pp 13873\u201313882. IEEE\/CVF, virtual conference, USA. https:\/\/doi.org\/10.1109\/CVPR42600.2020.01389","DOI":"10.1109\/CVPR42600.2020.01389"},{"key":"1616_CR31","unstructured":"Lee H, Hwang SJ, Shin J (2020) Self-supervised label augmentation via input transformations. Published at ICML 2020: https:\/\/proceedings.mlr.press\/v119\/lee20j.html. Preprint at arXiv:1910.05872"},{"key":"1616_CR32","unstructured":"Huang Z, Zou Y, Bhagavatula V, Huang D (2020) Comprehensive attention self-distillation for weakly-supervised object detection. In: Proceedings of the 34th conference on neural information process systems (NeurIPS), pp 16751\u201316761. NeurIPS. arXiv:2010.12023"},{"key":"1616_CR33","doi-asserted-by":"publisher","unstructured":"Liu B, Rao, Y, Lu J, Zhou J, Hsieh C-J (2020) MetaDistiller: Network self-boosting via meta-learned top-down distillation. In: Proceedings of the 16th European conference on computer vision (ECCV), pp 694\u2013709. Springer, Glasgow, UK (Virtual). https:\/\/doi.org\/10.1007\/978-3-030-58568-6_41","DOI":"10.1007\/978-3-030-58568-6_41"},{"key":"1616_CR34","doi-asserted-by":"crossref","unstructured":"Zagoruyko S, Komodakis N (2016) Wide residual networks. Published in BMVC 2016: http:\/\/www.bmva.org\/bmvc\/2016\/papers\/paper087\/index.html. Preprint at arXiv:1605.07146","DOI":"10.5244\/C.30.87"},{"key":"1616_CR35","unstructured":"Simonyan K, Zisserman A (2015) Very deep convolutional networks for large-scale image recognition. In: 3rd international conference on learning representation (ICLR), pp 1\u201314. OpenReview.net, San Diego, CA, USA. arXiv preprint: September 2014, arXiv:1409.1556"},{"key":"1616_CR36","doi-asserted-by":"publisher","unstructured":"Sandler M, Howard A, Zhu M, Zhmoginov A, Chen L-C (2018) MobileNetV2: inverted residuals and linear bottlenecks. In: Proceedings of the IEEE conference on computer vision pattern recognition (CVPR), pp 4510\u20134520. IEEE\/CVF, Salt Lake City, UT, USA. https:\/\/doi.org\/10.1109\/CVPR.2018.00474","DOI":"10.1109\/CVPR.2018.00474"},{"key":"1616_CR37","doi-asserted-by":"publisher","unstructured":"Zhang X, Zhou X, Lin M, Sun J (2018) ShuffleNet: An extremely efficient convolutional neural network for mobile devices. In: Proceedings of the IEEE conference on computer vision pattern recognition (CVPR), pp 6848\u20136856. IEEE\/CVF, Salt Lake City, UT, USA. https:\/\/doi.org\/10.1109\/CVPR.2018.00716","DOI":"10.1109\/CVPR.2018.00716"},{"key":"1616_CR38","doi-asserted-by":"publisher","unstructured":"Krizhevsky A (2009) Learning multiple layers of features from tiny images. Technical report TR-2009-1, University of Toronto, Toronto, Ontario, Canada. https:\/\/doi.org\/10.5281\/zenodo.7435161. Dataset: CIFAR-10 and CIFAR-100, https:\/\/www.cs.toronto.edu\/~kriz\/learning-features-2009-TR.pdf","DOI":"10.5281\/zenodo.7435161"},{"issue":"3","key":"1616_CR39","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky O, Deng J, Su H, Krause J, Satheesh S, Ma S, Huang Z, Karpathy A, Khosla A, Bernstein M, Berg AC, Fei-Fei L (2015) ImageNet large scale visual recognition challenge. Int J Comput Vis 115(3):211\u2013252. https:\/\/doi.org\/10.1007\/s11263-015-0816-y","journal-title":"Int J Comput Vis"},{"key":"1616_CR40","unstructured":"Tian Y, Krishnan D, Isola P (2020) Contrastive representation distillation. In: 8th international conference on learning representation (ICLR 2020), pp 1\u201316. OpenReview.net, Addis Ababa, Ethiopia. arXiv:1910.10699. Workshop paper at ICLR 2020"},{"key":"1616_CR41","doi-asserted-by":"publisher","unstructured":"Heo B, Kim J, Yun S, Park H, Kwak N, Choi JY (2019) A comprehensive overhaul of feature distillation. In: Proceedings of the IEEE international conference on computer vision (ICCV), pp 1921\u20131930. IEEE, Seoul, South Korea. https:\/\/doi.org\/10.1109\/ICCV.2019.00201","DOI":"10.1109\/ICCV.2019.00201"},{"key":"1616_CR42","doi-asserted-by":"publisher","unstructured":"Chen P, Liu S, Zhao H, Jia J (2021) Distilling knowledge via knowledge review. In: Proceedings of the IEEE conference on computer vision pattern recognition (CVPR), pp 5006\u20135015. IEEE\/CVF, virtual conference, USA. https:\/\/doi.org\/10.1109\/CVPR46437.2021.00497","DOI":"10.1109\/CVPR46437.2021.00497"},{"key":"1616_CR43","unstructured":"Liu X, Li L, Li C, Yao A (2023) NORM: knowledge distillation via N-to-one representation matching. arXiv:2305.13803"},{"key":"1616_CR44","unstructured":"Li Z, Li X, Yang L, Zhao B, Song R, Luo L, Li J, Yang J (2022) Curriculum temperature for knowledge distillation. Preprint at arXiv:2211.16231"},{"key":"1616_CR45","doi-asserted-by":"publisher","unstructured":"Jin Y, Wang J, Lin D (2023) Multi-level logit distillation. In: Proceedings of the IEEE conference on computer vision pattern recognition (CVPR), pp 24276\u201324285. IEEE\/CVF, Vancouver, Canada. https:\/\/doi.org\/10.1109\/CVPR52729.2023.02325","DOI":"10.1109\/CVPR52729.2023.02325"},{"key":"1616_CR46","doi-asserted-by":"publisher","unstructured":"Sun S, Ren W, Li J, Wang R, Cao X (2024) Logit standardization in knowledge distillation. In: Proceedings of the IEEE conference on computer vision pattern recognition (CVPR), pp 15731\u201315740. IEEE\/CVF, Seattle, WA, USA. https:\/\/doi.org\/10.1109\/CVPR52733.2024.01489","DOI":"10.1109\/CVPR52733.2024.01489"},{"key":"1616_CR47","doi-asserted-by":"crossref","unstructured":"Wei S, Luo C, Luo Y (2024) Scale decoupled distillation. arXiv:2403.13512","DOI":"10.1109\/CVPR52733.2024.01512"},{"key":"1616_CR48","unstructured":"Xu L, Liu K, Liu J, Wang L, Xu L, Cheng J (2025) Local dense logit relations for enhanced knowledge distillation arXiv:2507.15911"},{"issue":"6","key":"1616_CR49","doi-asserted-by":"publisher","first-page":"11243","DOI":"10.1109\/TNNLS.2025.3525737","volume":"36","author":"H Zhang","year":"2025","unstructured":"Zhang H, Liu L, Zhang Y, Lei X, Hui F, Wen B (2025) DenseKD: dense knowledge distillation by exploiting region and sample importance. IEEE Trans Neural Netw Learn Syst 36(6):11243\u201311257. https:\/\/doi.org\/10.1109\/TNNLS.2025.3525737","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"1616_CR50","doi-asserted-by":"crossref","unstructured":"Wang T, Yuan L, Zhang X, Feng J (2019) Distilling object detectors with fine-grained feature imitation. arXiv:1906.03609","DOI":"10.1109\/CVPR.2019.00507"}],"container-title":["Pattern Analysis and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10044-026-01616-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10044-026-01616-z","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10044-026-01616-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T10:39:00Z","timestamp":1773484740000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10044-026-01616-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,1,29]]},"references-count":50,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,3]]}},"alternative-id":["1616"],"URL":"https:\/\/doi.org\/10.1007\/s10044-026-01616-z","relation":{},"ISSN":["1433-7541","1433-755X"],"issn-type":[{"value":"1433-7541","type":"print"},{"value":"1433-755X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,1,29]]},"assertion":[{"value":"11 July 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 January 2026","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 January 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Not applicable. This research does not involve human participants, animals, or human data.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}}],"article-number":"34"}}