{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,25]],"date-time":"2026-02-25T17:00:27Z","timestamp":1772038827939,"version":"3.50.1"},"reference-count":42,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"wangyiju","award":["2021A1515310004"],"award-info":[{"award-number":["2021A1515310004"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3455749","type":"journal-article","created":{"date-parts":[[2024,9,6]],"date-time":"2024-09-06T18:19:02Z","timestamp":1725646742000},"page":"134133-134143","source":"Crossref","is-referenced-by-count":3,"title":["Enhanced Industrial Action Recognition Through Self-Supervised Visual Transformers"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-6736-4792","authenticated-orcid":false,"given":"Yao","family":"Xiao","sequence":"first","affiliation":[{"name":"College of Computer Science, Yangtze University, Jingzhou, Hubei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8485-9063","authenticated-orcid":false,"given":"Hua","family":"Xiang","sequence":"additional","affiliation":[{"name":"College of Computer Science, Yangtze University, Jingzhou, Hubei, China"}]},{"given":"Tongxi","family":"Wang","sequence":"additional","affiliation":[{"name":"College of Computer Science, Yangtze University, Jingzhou, Hubei, China"}]},{"given":"Yiju","family":"Wang","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence and Data Science, Guangzhou Xinhua University, Guangzhou, China"}]}],"member":"263","reference":[{"issue":"4","key":"ref1","first-page":"49","article-title":"Process quality control and management in the production of electronic products, (in Chinese)","author":"Li","year":"2011","journal-title":"Electron. Qual."},{"key":"ref2","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014","journal-title":"arXiv:1409.1556"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00813"},{"key":"ref5","article-title":"An image is worth 16\\times16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv:2010.11929"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00320"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.607"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33018545"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00413"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_47"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICMEW63481.2024.10645477"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01426"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46466-4_5"},{"key":"ref15","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018"},{"key":"ref16","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref18","first-page":"10078","article-title":"VideoMAE: Masked autoencoders are data-efficient learners for self-supervised video pre-training","volume-title":"Proc. Adv. neural Inf. Process. Syst.","volume":"35","author":"Tong"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01432"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00220"},{"key":"ref21","first-page":"1","article-title":"Two-stream convolutional networks for action recognition in videos","volume-title":"Proc. Adv. neural Inf. Process. Syst.","volume":"27","author":"Simonyan"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00630"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00675"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2102.05095"},{"key":"ref27","first-page":"10347","article-title":"Training data-efficient image transformers & distillation through attention","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Touvron"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref29","article-title":"BEiT: BERT pre-training of image transformers","author":"Bao","year":"2021","journal-title":"arXiv:2106.08254"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00611"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00222"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1212.0402"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"ref34","article-title":"The kinetics human action video dataset","author":"Kay","year":"2017","journal-title":"arXiv:1705.06950"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00565"},{"key":"ref37","article-title":"UniFormerV2: Spatiotemporal learning by arming image ViTs with video UniFormer","author":"Li","year":"2022","journal-title":"arXiv:2211.09552"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01398"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00193"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00689"},{"key":"ref41","first-page":"5679","article-title":"Self-supervised co-training for video representation learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Han"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i2.16189"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10380310\/10668864.pdf?arnumber=10668864","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,2]],"date-time":"2024-10-02T18:31:46Z","timestamp":1727893906000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10668864\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3455749","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}