{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T16:26:48Z","timestamp":1774456008337,"version":"3.50.1"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,7,18]]},"DOI":"10.1109\/ijcnn55064.2022.9892021","type":"proceedings-article","created":{"date-parts":[[2022,9,30]],"date-time":"2022-09-30T19:56:04Z","timestamp":1664567764000},"page":"1-6","source":"Crossref","is-referenced-by-count":18,"title":["SPRNet: Sitting Posture Recognition Using improved Vision Transformer"],"prefix":"10.1109","author":[{"given":"Yi","family":"Fang","sequence":"first","affiliation":[{"name":"Ningbo University,Faculty of Electrical Engineering and Computer Science,Ningbo,China"}]},{"given":"Shoudong","family":"Shi","sequence":"additional","affiliation":[{"name":"Ningbo University,Faculty of Electrical Engineering and Computer Science,Ningbo,China"}]},{"given":"Jingsen","family":"Fang","sequence":"additional","affiliation":[{"name":"Ningbo University,Faculty of Electrical Engineering and Computer Science,Ningbo,China"}]},{"given":"Wenting","family":"Yin","sequence":"additional","affiliation":[{"name":"Ningbo University,Faculty of Electrical Engineering and Computer Science,Ningbo,China"}]}],"member":"263","reference":[{"key":"ref10","article-title":"Beyond self-attention: External attention using two linear layers for visual tasks","author":"guo","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/JSEN.2020.2980207"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/SENSORS47087.2021.9639463"},{"key":"ref13","first-page":"1v","article-title":"A sitting posture surveillance system based on image processing technology","volume":"1","author":"mu","year":"2010","journal-title":"2010 2nd International Conference on Computer Engineering and Technology"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.143"},{"key":"ref15","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref16","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"radford","year":"2019","journal-title":"OpenAIRE blog"},{"key":"ref17","first-page":"213","article-title":"End-to-end object detection with transformers","author":"carion","year":"2020","journal-title":"European Conference on Computer Vision"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01159"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2020.3019280"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CIBCB.2016.7758131"},{"key":"ref6","article-title":"Mixed transformer u-net for medical image segmentation","author":"wang","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1088\/1757-899X\/677\/3\/032057"},{"key":"ref8","article-title":"A survey of transformers","author":"lin","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref7","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"0","journal-title":"Advances in neural information processing systems"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.math.2013.05.005"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICII.2019.00058"},{"key":"ref9","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"dosovitskiy","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref21","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2014","journal-title":"ArXiv Preprint"}],"event":{"name":"2022 International Joint Conference on Neural Networks (IJCNN)","location":"Padua, Italy","start":{"date-parts":[[2022,7,18]]},"end":{"date-parts":[[2022,7,23]]}},"container-title":["2022 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9891857\/9889787\/09892021.pdf?arnumber=9892021","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,14]],"date-time":"2022-10-14T20:52:22Z","timestamp":1665780742000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9892021\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,18]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/ijcnn55064.2022.9892021","relation":{},"subject":[],"published":{"date-parts":[[2022,7,18]]}}}