{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,18]],"date-time":"2026-01-18T02:01:16Z","timestamp":1768701676056,"version":"3.49.0"},"reference-count":44,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2022,4,29]],"date-time":"2022-04-29T00:00:00Z","timestamp":1651190400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,4,29]],"date-time":"2022-04-29T00:00:00Z","timestamp":1651190400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100010248","name":"Zhejiang Province Public Welfare Technology Application Research Project","doi-asserted-by":"publisher","award":["LGF21F020008"],"award-info":[{"award-number":["LGF21F020008"]}],"id":[{"id":"10.13039\/501100010248","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004731","name":"Natural Science Foundation of Zhejiang Province","doi-asserted-by":"publisher","award":["LZ20F020001"],"award-info":[{"award-number":["LZ20F020001"]}],"id":[{"id":"10.13039\/501100004731","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2023,1]]},"DOI":"10.1007\/s10489-022-03543-y","type":"journal-article","created":{"date-parts":[[2022,4,29]],"date-time":"2022-04-29T14:10:05Z","timestamp":1651241405000},"page":"1535-1547","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":64,"title":["TransGait: Multimodal-based gait recognition with set transformer"],"prefix":"10.1007","volume":"53","author":[{"given":"Guodong","family":"Li","sequence":"first","affiliation":[]},{"given":"Lijun","family":"Guo","sequence":"additional","affiliation":[]},{"given":"Rong","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Jiangbo","family":"Qian","sequence":"additional","affiliation":[]},{"given":"Shangce","family":"Gao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,4,29]]},"reference":[{"key":"3543_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.cviu.2018.01.007","volume":"167","author":"P Connor","year":"2018","unstructured":"Connor P, Ross A (2018) Biometric recognition by gait: A survey of modalities and features. Comput Vis Image Underst 167:1\u201327","journal-title":"Comput Vis Image Underst"},{"key":"3543_CR2","unstructured":"Sepas-Moghaddam A, Etemad A (2021) Deep gait recognition:, A survey. arXiv preprint arXiv:2102.09546"},{"key":"3543_CR3","doi-asserted-by":"crossref","unstructured":"Lin B, Zhang S, Bao F (2020) Gait recognition with multiple-temporal-scale 3d convolutional neural network. In: Proceedings of the 28th ACM international conference on multimedia, pp 3054\u20133062","DOI":"10.1145\/3394171.3413861"},{"key":"3543_CR4","unstructured":"Yu S, Tan D, Tan T (2006) A framework for evaluating the effect of view angle, clothing and carrying condition on gait recognition. In: 18Th international conference on pattern recognition (ICPR\u201906), vol 4, pp 441\u2013444"},{"key":"3543_CR5","unstructured":"Verlekar T (2019) Gait analysis in unconstrained environments. PhD thesis, Ph. D. dissertation, Electrical and Computer Engineering, Instituto Superior"},{"key":"3543_CR6","doi-asserted-by":"crossref","unstructured":"Fan C, Peng Y, Cao C, Liu X, Hou S, Chi J, Huang Y, Li Q, He Z (2020) Gaitpart: Temporal part-based model for gait recognition. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 14225\u201314233","DOI":"10.1109\/CVPR42600.2020.01423"},{"key":"3543_CR7","doi-asserted-by":"publisher","first-page":"1001","DOI":"10.1109\/TIP.2019.2926208","volume":"29","author":"Y Zhang","year":"2019","unstructured":"Zhang Y, Huang Y, Yu S, Wang L (2019) Cross-view gait recognition by discriminative feature learning. IEEE Trans Image Process 29:1001\u20131015","journal-title":"IEEE Trans Image Process"},{"key":"3543_CR8","doi-asserted-by":"crossref","unstructured":"Hou S, Cao C, Liu X, Huang Y (2020) Gait lateral network: Learning discriminative and compact representations for gait recognition. In: European conference on computer vision, pp 382\u2013398","DOI":"10.1007\/978-3-030-58545-7_22"},{"key":"3543_CR9","doi-asserted-by":"crossref","unstructured":"Chao H, He Y, Zhang J, Feng J (2019) Gaitset: regarding gait as a set for cross-view gait recognition. In: Proceedings of the AAAI conference on artificial intelligence, vol 33, pp 8126\u20138133","DOI":"10.1609\/aaai.v33i01.33018126"},{"issue":"1","key":"3543_CR10","doi-asserted-by":"publisher","first-page":"260","DOI":"10.1109\/TCSVT.2020.2975671","volume":"31","author":"C Xu","year":"2020","unstructured":"Xu C, Makihara Y, Li X, Yagi Y, Lu J (2020) Cross-view gait recognition using pairwise spatial transformer networks. IEEE Trans Circuits Syst Video Technol 31(1):260\u2013274","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"3543_CR11","doi-asserted-by":"crossref","unstructured":"Qin H, Chen Z, Guo Q, Wu QJ, Lu M (2021) Rpnet: Gait recognition with relationships between each body-parts. IEEE Transactions on Circuits and Systems for Video Technology","DOI":"10.1109\/TCSVT.2021.3095290"},{"issue":"3","key":"3543_CR12","doi-asserted-by":"publisher","first-page":"734","DOI":"10.1109\/TCSVT.2019.2893736","volume":"30","author":"X Ben","year":"2019","unstructured":"Ben X, Gong C, Zhang P, Yan R, Wu Q, Meng W (2019) Coupled bilinear discriminant projection for cross-view gait recognition. IEEE Trans Circuits Syst Video Technol 30(3):734\u2013747","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"3543_CR13","doi-asserted-by":"crossref","unstructured":"Xu W (2021) Graph-optimized coupled discriminant projections for cross-view gait recognition. Appl Intell, 1\u201313","DOI":"10.1007\/s10489-021-02322-5"},{"key":"3543_CR14","unstructured":"Li N, Zhao X, Ma C (2020) A model-based gait recognition method based on gait graph convolutional networks and joints relationship pyramid mapping. arXiv preprint arXiv:2005.08625"},{"key":"3543_CR15","doi-asserted-by":"publisher","first-page":"107069","DOI":"10.1016\/j.patcog.2019.107069","volume":"98","author":"R Liao","year":"2020","unstructured":"Liao R, Yu S, An W, Huang Y (2020) A model-based gait recognition method with body pose and human prior knowledge. Pattern Recogn 98:107069","journal-title":"Pattern Recogn"},{"issue":"4","key":"3543_CR16","doi-asserted-by":"publisher","first-page":"421","DOI":"10.1109\/TBIOM.2020.3008862","volume":"2","author":"W An","year":"2020","unstructured":"An W, Yu S, Makihara Y, Wu X, Xu C, Yu Y, Liao R, Yagi Y (2020) Performance evaluation of model-based gait on multi-view very large population database with pose sequences. IEEE Transactions on Biometrics, Behavior, and Identity Science 2(4):421\u2013430","journal-title":"IEEE Transactions on Biometrics, Behavior, and Identity Science"},{"key":"3543_CR17","doi-asserted-by":"publisher","first-page":"19196","DOI":"10.1109\/ACCESS.2020.2967845","volume":"8","author":"K Jun","year":"2020","unstructured":"Jun K, Lee D-W, Lee K, Lee S, Kim MS (2020) Feature extraction using an rnn autoencoder for skeleton-based abnormal gait recognition. IEEE Access 8:19196\u201319207","journal-title":"IEEE Access"},{"issue":"4","key":"3543_CR18","doi-asserted-by":"publisher","first-page":"1008935","DOI":"10.1371\/journal.pcbi.1008935","volume":"17","author":"J Stenum","year":"2021","unstructured":"Stenum J, Rossi C, Roemmich RT (2021) Two-dimensional video-based analysis of human gait using pose estimation. PLoS Computational Biology 17(4):1008935","journal-title":"PLoS Computational Biology"},{"key":"3543_CR19","doi-asserted-by":"crossref","unstructured":"Rao H, Wang S, Hu X, Tan M, Guo Y, Cheng J, Liu X, Hu B (2021) A self-supervised gait encoding approach with locality-awareness for 3d skeleton based person re-identification. IEEE Transactions on Pattern Analysis and Machine Intelligence","DOI":"10.1109\/TPAMI.2021.3092833"},{"key":"3543_CR20","doi-asserted-by":"crossref","unstructured":"Feng Y, Li Y, Luo J (2016) Learning effective gait features using lstm. In: 2016 23Rd international conference on pattern recognition (ICPR), pp 325\u2013330","DOI":"10.1109\/ICPR.2016.7899654"},{"key":"3543_CR21","doi-asserted-by":"crossref","unstructured":"Li X, Makihara Y, Xu C, Yagi Y, Yu S, Ren M (2020) End-to-end model-based gait recognition. In: Proceedings of the Asian conference on computer vision","DOI":"10.1007\/978-3-030-69535-4_1"},{"key":"3543_CR22","doi-asserted-by":"crossref","unstructured":"Zhao L, Guo L, Zhang R, Xie X, Ye X (2021) mmgaitset: multimodal based gait recognition for countering carrying and clothing changes. Appl Intell, pp 1\u201314","DOI":"10.1007\/s10489-021-02484-2"},{"issue":"2","key":"3543_CR23","doi-asserted-by":"publisher","first-page":"316","DOI":"10.1109\/TPAMI.2006.38","volume":"28","author":"J Han","year":"2005","unstructured":"Han J, Bhanu B (2005) Individual recognition using gait energy image. IEEE Trans Pattern Anal Mach Intell 28(2):316\u2013322","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"3543_CR24","doi-asserted-by":"crossref","unstructured":"Bashir K, Xiang T, Gong S (2009) Gait recognition using gait entropy image","DOI":"10.1049\/ic.2009.0230"},{"key":"3543_CR25","doi-asserted-by":"crossref","unstructured":"Sepas-Moghaddam A, Etemad A (2020) View-invariant gait recognition with attentive recurrent learning of partial representations. IEEE Transactions on Biometrics, Behavior, and Identity Science","DOI":"10.1109\/TBIOM.2020.3031470"},{"issue":"01","key":"3543_CR26","doi-asserted-by":"publisher","first-page":"1950027","DOI":"10.1142\/S0129065719500278","volume":"30","author":"X Wang","year":"2020","unstructured":"Wang X, Yan WQ (2020) Human gait recognition based on frame-by-frame gait energy images and convolutional long short-term memory. International Journal of Neural Systems 30(01):1950027","journal-title":"International Journal of Neural Systems"},{"key":"3543_CR27","doi-asserted-by":"crossref","unstructured":"Wolf T, Babaee M, Rigoll G (2016) Multi-view gait recognition using 3d convolutional neural networks. In: 2016 IEEE international conference on image processing (ICIP), pp 4165\u20134169","DOI":"10.1109\/ICIP.2016.7533144"},{"key":"3543_CR28","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser L, Polosukhin I (2017) Attention is all you need. arXiv preprint arXiv:1706.03762"},{"key":"3543_CR29","unstructured":"Devlin J, Chang M-W, Lee K, Toutanova K (2018) Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805"},{"key":"3543_CR30","doi-asserted-by":"crossref","unstructured":"Khan S, Naseer M, Hayat M, Zamir SW, Khan FS, Shah M (2021) Transformers in vision: A survey. arXiv preprint arXiv:2101.01169","DOI":"10.1145\/3505244"},{"key":"3543_CR31","doi-asserted-by":"crossref","unstructured":"Girdhar R, Carreira J, Doersch C, Zisserman A (2019) Video action transformer network. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 244\u2013253","DOI":"10.1109\/CVPR.2019.00033"},{"key":"3543_CR32","doi-asserted-by":"crossref","unstructured":"Plizzari C, Cannici M, Matteucci M (2020) Spatial temporal transformer network for skeleton-based action recognition. arXiv preprint arXiv:2008.07404","DOI":"10.1007\/978-3-030-68796-0_50"},{"key":"3543_CR33","unstructured":"Liu Z, Luo S, Li W, Lu J, Wu Y, Li C, Yang L (2020) Convtransformer:, A convolutional transformer network for video frame synthesis. arXiv preprint arXiv:2011.10185"},{"key":"3543_CR34","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A, Weissenborn D, Zhai X, Unterthiner T, Dehghani M, Minderer M, Heigold G, Gelly S et al (2020) An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929"},{"key":"3543_CR35","doi-asserted-by":"crossref","unstructured":"Liu Z, Lin Y, Cao Y, Hu H, Wei Y, Zhang Z, Lin S, Guo B (2021) Swin transformer: Hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103.14030","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"3543_CR36","doi-asserted-by":"crossref","unstructured":"Yao L, Kusakunniran W, Wu Q, Xu J, Zhang J (2021) Collaborative feature learning for gait recognition under cloth changes. IEEE Transactions on Circuits and Systems for Video Technology","DOI":"10.1109\/TCSVT.2021.3112564"},{"key":"3543_CR37","doi-asserted-by":"crossref","unstructured":"Wei S. -E., Ramakrishna V, Kanade T, Sheikh Y (2016) Convolutional pose machines. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 4724\u20134732","DOI":"10.1109\/CVPR.2016.511"},{"key":"3543_CR38","doi-asserted-by":"crossref","unstructured":"Fu Y, Wei Y, Zhou Y, Shi H, Huang G, Wang X, Yao Z, Huang T (2019) Horizontal pyramid matching for person re-identification. In: Proceedings of the AAAI conference on artificial intelligence, vol 33, pp 8295\u20138302","DOI":"10.1609\/aaai.v33i01.33018295"},{"key":"3543_CR39","doi-asserted-by":"crossref","unstructured":"Sun Y, Zheng L, Yang Y, Tian Q, Wang S (2018) Beyond part models: Person retrieval with refined part pooling (and a strong convolutional baseline). In: Proceedings of the European Conference on Computer Vision (ECCV), pp 480\u2013496","DOI":"10.1007\/978-3-030-01225-0_30"},{"key":"3543_CR40","doi-asserted-by":"crossref","unstructured":"Wang G, Yuan Y, Chen X, Li J, Zhou X (2018) Learning discriminative features with multiple granularities for person re-identification. In: Proceedings of the 26th ACM international conference on multimedia, pp 274\u2013282","DOI":"10.1145\/3240508.3240552"},{"key":"3543_CR41","first-page":"1097","volume":"25","author":"A Krizhevsky","year":"2012","unstructured":"Krizhevsky A, Sutskever I, Hinton GE (2012) Imagenet classification with deep convolutional neural networks. Advances in Neural Information Processing Systems 25:1097\u20131105","journal-title":"Advances in Neural Information Processing Systems"},{"key":"3543_CR42","unstructured":"Hermans A, Beyer L, Leibe B (2017) In defense of the triplet loss for person re-identification. arXiv:1703.07737"},{"key":"3543_CR43","unstructured":"Zhu Z, Guo X, Yang T, Huang J, Deng J, Huang G, Du D, Lu J, Zhou J (2021) Gait recognition in the wild: A benchmark. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp 14789\u201314799"},{"key":"3543_CR44","doi-asserted-by":"crossref","unstructured":"Shiraga K, Makihara Y, Muramatsu D, Echigo T, Yagi Y (2016) Geinet: view-invariant gait recognition using a convolutional neural network. In: IEEE International Conference on Biometrics (ICB), pp 1\u20138","DOI":"10.1109\/ICB.2016.7550060"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-03543-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-022-03543-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-03543-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,23]],"date-time":"2024-09-23T09:45:37Z","timestamp":1727084737000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-022-03543-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,4,29]]},"references-count":44,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2023,1]]}},"alternative-id":["3543"],"URL":"https:\/\/doi.org\/10.1007\/s10489-022-03543-y","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,4,29]]},"assertion":[{"value":"22 March 2022","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 April 2022","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}