{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,24]],"date-time":"2025-08-24T00:02:21Z","timestamp":1755993741119,"version":"3.44.0"},"reference-count":49,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62372083","62072074","62076054","62027827","62002047"],"award-info":[{"award-number":["62372083","62072074","62076054","62027827","62002047"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Sichuan Science and Technology Support Plan","award":["2024NSFTD0005","2023YFS0020","2023YFS0197","2023YFG0148"],"award-info":[{"award-number":["2024NSFTD0005","2023YFS0020","2023YFS0197","2023YFG0148"]}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["ZYGX2021YGLH212","ZYGX2022YGRH012"],"award-info":[{"award-number":["ZYGX2021YGLH212","ZYGX2022YGRH012"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"name":"China Mobile (Chengdu) Industrial Research Fund","award":["G2339131001001T"],"award-info":[{"award-number":["G2339131001001T"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Artif. Intell."],"published-print":{"date-parts":[[2024,11]]},"DOI":"10.1109\/tai.2024.3440220","type":"journal-article","created":{"date-parts":[[2024,8,8]],"date-time":"2024-08-08T14:55:18Z","timestamp":1723128918000},"page":"5403-5417","source":"Crossref","is-referenced-by-count":0,"title":["SSpose: Self-Supervised Spatial-Aware Model for Human Pose Estimation"],"prefix":"10.1109","volume":"5","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5294-0094","authenticated-orcid":false,"given":"Linfang","family":"Yu","sequence":"first","affiliation":[{"name":"Network and Data Security Key Laboratory of Sichuan Province, University of Electronic Science and Technology of China, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7857-9719","authenticated-orcid":false,"given":"Zhen","family":"Qin","sequence":"additional","affiliation":[{"name":"Network and Data Security Key Laboratory of Sichuan Province and the School of Information and Software Engineering, University of Electronic Science and Technology of China, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6315-4240","authenticated-orcid":false,"given":"Liqun","family":"Xu","sequence":"additional","affiliation":[{"name":"China Mobile (Chengdu) Industrial Research Institute, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6745-6377","authenticated-orcid":false,"given":"Zhiguang","family":"Qin","sequence":"additional","affiliation":[{"name":"Network and Data Security Key Laboratory of Sichuan Province and the School of Information and Software Engineering, University of Electronic Science and Technology of China, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9208-5336","authenticated-orcid":false,"given":"Kim-Kwang Raymond","family":"Choo","sequence":"additional","affiliation":[{"name":"Department of Information Systems and Cyber Security, University of Texas at San Antonio, San Antonio, TX, USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s40747-023-01173-6"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.4103\/singaporemedj.smj-2023-189"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2024.103131"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.214"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.512"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.284"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01231-1_29"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00584"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.511"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_29"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/tetci.2021.3100641"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/tai.2023.3266418"},{"key":"ref14","first-page":"5998","article-title":"Attention is all you need","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Vaswani","year":"2017"},{"key":"ref15","first-page":"3104","article-title":"Sequence to sequence learning with neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"27","author":"Sutskever","year":"2014"},{"article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","year":"2019","author":"Devlin","key":"ref16"},{"year":"2023","key":"ref17","article-title":"GPT-4 technical report"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","year":"2021","author":"Dosovitskiy","key":"ref18"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"article-title":"Submanifold sparse convolutional networks","year":"2017","author":"Graham","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00908"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/tai.2024.3394797"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/tai.2023.3326795"},{"key":"ref24","first-page":"2286","article-title":"ConViT: Improving vision transformers with soft convolutional inductive biases","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","volume":"139,","author":"D\u2019Ascoli","year":"2021"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2023.3282631"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01625"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref29","first-page":"38571","article-title":"ViTPose: Simple vision transformer baselines for human pose estimation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Xu","year":"2022"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00198"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01159"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01112"},{"article-title":"HRFormer: High-resolution transformer for dense prediction","year":"2021","author":"Yuan","key":"ref33"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/s11063-022-10794-w"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00950"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/jbhi.2024.3423797"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/tmm.2024.3363660"},{"issue":"8","key":"ref39","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"key":"ref40","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Brown","year":"2020"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"ref42","first-page":"61072","article-title":"Gradient-based visual explanation for transformer-based CLIP","volume-title":"Proc. 41st Int. Conf. Mach. Learn.","author":"ZHAO","year":"2024"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.471"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.5244\/C.24.12"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2009.5206848"},{"article-title":"On the convergence of ADAM and beyond","year":"2019","author":"Reddi","key":"ref47"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.195"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00712"}],"container-title":["IEEE Transactions on Artificial Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/9078688\/10751744\/10631686.pdf?arnumber=10631686","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T01:09:19Z","timestamp":1755911359000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10631686\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11]]},"references-count":49,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/tai.2024.3440220","relation":{},"ISSN":["2691-4581"],"issn-type":[{"type":"electronic","value":"2691-4581"}],"subject":[],"published":{"date-parts":[[2024,11]]}}}