{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T04:50:03Z","timestamp":1774500603868,"version":"3.50.1"},"reference-count":44,"publisher":"Springer Science and Business Media LLC","issue":"12","license":[{"start":{"date-parts":[[2024,4,25]],"date-time":"2024-04-25T00:00:00Z","timestamp":1714003200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,4,25]],"date-time":"2024-04-25T00:00:00Z","timestamp":1714003200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Supercomput"],"published-print":{"date-parts":[[2024,8]]},"DOI":"10.1007\/s11227-024-06125-6","type":"journal-article","created":{"date-parts":[[2024,4,25]],"date-time":"2024-04-25T06:01:47Z","timestamp":1714024907000},"page":"17269-17291","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":12,"title":["MS-HRNet: multi-scale high-resolution network for human pose estimation"],"prefix":"10.1007","volume":"80","author":[{"given":"Yanxia","family":"Wang","sequence":"first","affiliation":[]},{"given":"Renjie","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Hu","family":"Shi","sequence":"additional","affiliation":[]},{"given":"Dan","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,4,25]]},"reference":[{"key":"6125_CR1","doi-asserted-by":"crossref","unstructured":"Papandreou G, Zhu T, Kanazawa N, Toshev A, Tompson J, Bregler C, Murphy K (2017) Towards Accurate Multi-person Pose Estimation in the Wild, 4903\u20134911","DOI":"10.1109\/CVPR.2017.395"},{"key":"6125_CR2","doi-asserted-by":"crossref","unstructured":"Kocabas M, Karagoz S, Akbas E (2018) Multiposenet: Fast Multi-person Pose Estimation Using Pose Residual Network, 417\u2013433","DOI":"10.1007\/978-3-030-01252-6_26"},{"key":"6125_CR3","doi-asserted-by":"crossref","unstructured":"Cao Z, Simon T, Wei S-E, Sheikh Y (2017) Realtime Multi-person 2D Pose Estimation Using Part Affinity Fields, 7291\u20137299","DOI":"10.1109\/CVPR.2017.143"},{"key":"6125_CR4","doi-asserted-by":"crossref","unstructured":"Toshev A, Szegedy C (2014) Deeppose: Human Pose Estimation via Deep Neural Networks, 1653\u20131660","DOI":"10.1109\/CVPR.2014.214"},{"key":"6125_CR5","doi-asserted-by":"crossref","unstructured":"Tompson J, Goroshin R, Jain A, LeCun Y, Bregler C (2015) Efficient Object Localization Using Convolutional Networks, 648\u2013656","DOI":"10.1109\/CVPR.2015.7298664"},{"key":"6125_CR6","doi-asserted-by":"crossref","unstructured":"Newell A, Yang K, Deng J (2016) Stacked Hourglass Networks for Human Pose Estimation, 483\u2013499. Springer","DOI":"10.1007\/978-3-319-46484-8_29"},{"key":"6125_CR7","doi-asserted-by":"crossref","unstructured":"Ronneberger O, Fischer P, Brox T (2015) U-net: Convolutional Networks for Biomedical Image Segmentation. In: Medical Image Computing and Computer-assisted intervention\u2013MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18, pp. 234\u2013241. Springer","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"6125_CR8","doi-asserted-by":"crossref","unstructured":"Noh H, Hong S, Han B (2015) Learning Deconvolution Network for Semantic Segmentation, 1520\u20131528","DOI":"10.1109\/ICCV.2015.178"},{"key":"6125_CR9","doi-asserted-by":"publisher","first-page":"16142","DOI":"10.1109\/ACCESS.2023.3244789","volume":"11","author":"AO Ige","year":"2023","unstructured":"Ige AO, Tomar NK, Aranuwa FO, Oriola O, Akingbesote AO, Noor MHM, Mazzara M, Aribisala BS (2023) Convsegnet: automated polyp segmentation from colonoscopy using context feature refinement with multiple convolutional kernel sizes. IEEE Access 11:16142\u201316155","journal-title":"IEEE Access"},{"issue":"5","key":"6125_CR10","doi-asserted-by":"publisher","first-page":"2005","DOI":"10.1007\/s00371-022-02460-y","volume":"39","author":"J Xu","year":"2023","unstructured":"Xu J, Liu W, Xing W, Wei X (2023) Mspenet: multi-scale adaptive fusion and position enhancement network for human pose estimation. Vis Comput 39(5):2005\u20132019","journal-title":"Vis Comput"},{"key":"6125_CR11","doi-asserted-by":"crossref","unstructured":"Sun K, Xiao B, Liu D, Wang J (2019) Deep High-Resolution Representation Learning for Human Pose Estimation, 5693\u20135703","DOI":"10.1109\/CVPR.2019.00584"},{"key":"6125_CR12","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep Residual Learning for Image Recognition, 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"6125_CR13","doi-asserted-by":"crossref","unstructured":"Huang G, Liu Z, Van Der\u00a0Maaten L, Weinberger KQ (2017) Densely Connected Convolutional Networks, 4700\u20134708","DOI":"10.1109\/CVPR.2017.243"},{"key":"6125_CR14","unstructured":"Tan M, Le Q (2019) Efficientnet: Rethinking Model Scaling for Convolutional Neural Networks, 6105\u20136114. PMLR"},{"key":"6125_CR15","doi-asserted-by":"crossref","unstructured":"Zhang X, Zhou X, Lin M, Sun J (2018) Shufflenet: An Extremely Efficient Convolutional Neural Network for Mobile Devices, 6848\u20136856","DOI":"10.1109\/CVPR.2018.00716"},{"key":"6125_CR16","doi-asserted-by":"crossref","unstructured":"Hou Q, Zhou D, Feng J (2021) Coordinate Attention for Efficient Mobile Network Design, 13713\u201313722","DOI":"10.1109\/CVPR46437.2021.01350"},{"key":"6125_CR17","doi-asserted-by":"publisher","first-page":"107579","DOI":"10.1016\/j.compag.2022.107579","volume":"204","author":"Y Qiao","year":"2023","unstructured":"Qiao Y, Guo Y, He D (2023) Cattle body detection based on YOLOv5-ASFF for precision livestock farming. Comput Electron Agric 204:107579","journal-title":"Comput Electron Agric"},{"key":"6125_CR18","doi-asserted-by":"crossref","unstructured":"Dantone M, Gall J, Leistner C, Van\u00a0Gool L (2013) Human Pose Estimation Using Body Parts Dependent Joint Regressors, 3041\u20133048","DOI":"10.1109\/CVPR.2013.391"},{"key":"6125_CR19","doi-asserted-by":"publisher","first-page":"55","DOI":"10.1023\/B:VISI.0000042934.15159.49","volume":"61","author":"PF Felzenszwalb","year":"2005","unstructured":"Felzenszwalb PF, Huttenlocher DP (2005) Pictorial structures for object recognition. Int J Comput Vision 61:55\u201379","journal-title":"Int J Comput Vision"},{"key":"6125_CR20","doi-asserted-by":"crossref","unstructured":"Newell A, Yang K, Den J (2016) Stacked Hourglass Networks for Human Pose Estimation, 483\u2013499. Springer","DOI":"10.1007\/978-3-319-46484-8_29"},{"key":"6125_CR21","doi-asserted-by":"crossref","unstructured":"Ke L, Chang M-C, Qi H, Lyu S (2018) Multi-scale Structure-aware Network for Human Pose Estimation, 713\u2013728","DOI":"10.1109\/ICIP.2018.8451114"},{"key":"6125_CR22","doi-asserted-by":"crossref","unstructured":"Chu X, Yang W, Ouyang W, Ma C, Yuille AL, Wang X (2017) Multi-context Attention for Human Pose Estimation, 1831\u20131840","DOI":"10.1109\/CVPR.2017.601"},{"key":"6125_CR23","first-page":"1","volume":"72","author":"G Yue","year":"2023","unstructured":"Yue G, Li S, Cong R, Zhou T, Lei B, Wang T (2023) Attention-guided pyramid context network for polyp segmentation in colonoscopy images. IEEE Trans Instrum Meas 72:1\u201313","journal-title":"IEEE Trans Instrum Meas"},{"key":"6125_CR24","doi-asserted-by":"crossref","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-Excitation Networks, 7132\u20137141","DOI":"10.1109\/CVPR.2018.00745"},{"key":"6125_CR25","doi-asserted-by":"crossref","unstructured":"Woo S, Park J, Lee J-Y, Kweon IS (2018) CBAM: Convolutional Block Attention Module, 3\u201319","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"6125_CR26","doi-asserted-by":"crossref","unstructured":"Liu Z, Mao H, Wu C-Y, Feichtenhofer C, Darrell T, Xie S (2022) A Convnet for the 2020s, 11976\u201311986","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"6125_CR27","doi-asserted-by":"crossref","unstructured":"Liu Z, Lin Y, Cao Y, Hu H, Wei Y, Zhang Z, Lin S, Guo B (2021) Swin Transformer: Hierarchical Vision Transformer Using Shifted Windows, 10012\u201310022","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"6125_CR28","unstructured":"Luo W, Li Y, Urtasun R, Zemel R (2016) Understanding the effective receptive field in deep convolutional neural networks. Adv Neural Inf Process Syst 29"},{"key":"6125_CR29","doi-asserted-by":"crossref","unstructured":"Zhu X, Cheng D, Zhang Z, Lin S, Dai J (2019) An Empirical Study of Spatial Attention Mechanisms in Deep Networks, 6688\u20136697","DOI":"10.1109\/ICCV.2019.00679"},{"key":"6125_CR30","unstructured":"Ramachandran P, Parmar N, Vaswani A, Bello I, Levskaya A, Shlens J (2019) Stand-alone self-attention in vision models. Adv Neural Inf Process Syst 32"},{"key":"6125_CR31","doi-asserted-by":"crossref","unstructured":"Vaswani A, Ramachandran P, Srinivas A, Parmar N, Hechtman B, Shlens J (2021) Scaling Local Self-attention for Parameter Efficient Visual Backbones, 12894\u201312904","DOI":"10.1109\/CVPR46437.2021.01270"},{"key":"6125_CR32","unstructured":"Bertasius G, Wang H, Torresani L (2021) Is space-time attention all you need for video understanding?. ICML 2(3), 4"},{"key":"6125_CR33","doi-asserted-by":"crossref","unstructured":"Howard A, Zhmoginov A, Chen L-C, Sandler M, Zhu M (2018) Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation, 4510\u20134520","DOI":"10.1109\/CVPR.2018.00474"},{"key":"6125_CR34","doi-asserted-by":"crossref","unstructured":"Chen Y, Dai X, Chen D, Liu M, Dong X, Yuan L, Liu Z (2022) Mobile-Former: Bridging Mobilenet and Transformer, 5270\u20135279","DOI":"10.1109\/CVPR52688.2022.00520"},{"key":"6125_CR35","doi-asserted-by":"crossref","unstructured":"Howard A, Sandler M, Chu G, Chen L-C, Chen B, Tan M, Wang W, Zhu Y, Pang R, Vasudevan V et al. (2019) Searching for Mobilenetv3, 1314\u20131324","DOI":"10.1109\/ICCV.2019.00140"},{"key":"6125_CR36","doi-asserted-by":"crossref","unstructured":"Lin T-Y, Maire M, Belongie S, Hays J, Perona P, Ramanan D, Doll\u00e1r P, Zitnick CL (2014) Microsoft Coco: Common Objects in Context, 740\u2013755. Springer","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"6125_CR37","doi-asserted-by":"crossref","unstructured":"Andriluka M, Pishchulin L, Gehler P, Schiele B (2014) 2D Human Pose Estimation: New Benchmark and State of the Art Analysis, 3686\u20133693","DOI":"10.1109\/CVPR.2014.471"},{"key":"6125_CR38","unstructured":"Loshchilov I, Hutter F (2018) Fixing Weight Decay Regularization in Adam"},{"key":"6125_CR39","doi-asserted-by":"crossref","unstructured":"Xiao B, Wu H, Wei Y (2018) Simple Baselines for Human Pose Estimation and Tracking, 466\u2013481","DOI":"10.1007\/978-3-030-01231-1_29"},{"key":"6125_CR40","doi-asserted-by":"crossref","unstructured":"Li Y, Zhang S, Wang Z, Yang S, Yang W, Xia S-T, Zhou E (2021) Tokenpose: Learning Keypoint Tokens for Human Pose Estimation, 11313\u201311322","DOI":"10.1109\/ICCV48922.2021.01112"},{"key":"6125_CR41","doi-asserted-by":"crossref","unstructured":"Chen Y, Wang Z, Peng Y, Zhang Z, Yu G, Sun J (2018) Cascaded Pyramid Network for Multi-person Pose Estimation, 7103\u20137112","DOI":"10.1109\/CVPR.2018.00742"},{"key":"6125_CR42","doi-asserted-by":"crossref","unstructured":"Xiong Z, Wang C, Li Y, Luo Y, Cao Y (2022) Swin-pose: Swin Transformer Based Human Pose Estimation, 228\u2013233. IEEE","DOI":"10.1109\/MIPR54900.2022.00048"},{"issue":"1","key":"6125_CR43","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1007\/s00138-022-01352-4","volume":"34","author":"Y Li","year":"2023","unstructured":"Li Y, Liu R, Wang X, Wang R (2023) Human pose estimation based on lightweight basicblock. Mach Vis Appl 34(1):3","journal-title":"Mach Vis Appl"},{"key":"6125_CR44","doi-asserted-by":"crossref","unstructured":"Liu H, Wu J, He R (2023) Idpnet: a light-weight network and its variants for human pose estimation. J Supercomput 1\u201323","DOI":"10.1007\/s11227-023-05691-5"}],"container-title":["The Journal of Supercomputing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11227-024-06125-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11227-024-06125-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11227-024-06125-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,17]],"date-time":"2024-11-17T00:15:35Z","timestamp":1731802535000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11227-024-06125-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,25]]},"references-count":44,"journal-issue":{"issue":"12","published-print":{"date-parts":[[2024,8]]}},"alternative-id":["6125"],"URL":"https:\/\/doi.org\/10.1007\/s11227-024-06125-6","relation":{},"ISSN":["0920-8542","1573-0484"],"issn-type":[{"value":"0920-8542","type":"print"},{"value":"1573-0484","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4,25]]},"assertion":[{"value":"4 April 2024","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 April 2024","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}}]}}