{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T21:40:23Z","timestamp":1770846023839,"version":"3.50.1"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,5]],"date-time":"2025-10-05T00:00:00Z","timestamp":1759622400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,5]],"date-time":"2025-10-05T00:00:00Z","timestamp":1759622400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,5]]},"DOI":"10.1109\/smc58881.2025.11343662","type":"proceedings-article","created":{"date-parts":[[2026,1,28]],"date-time":"2026-01-28T20:54:44Z","timestamp":1769633684000},"page":"540-545","source":"Crossref","is-referenced-by-count":0,"title":["Integration of BVG-LS into a Deep Neural Network Architecture Designed for EEG Signal Classification"],"prefix":"10.1109","author":[{"given":"Takuto","family":"Fukushima","sequence":"first","affiliation":[{"name":"Meiji University,Graduate School of Science and Technology,Department of Computer Science,Kawasaki,Japan,214-8571"}]},{"given":"Ryusuke","family":"Miyamoto","sequence":"additional","affiliation":[{"name":"Meiji University,School of Science and Technology,Department of Computer Science,Kawasaki,Japan,214-8571"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1111\/j.1469-8986.2006.00456.x"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-84996-272-8_6"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.3390\/app7121239"},{"issue":"2","key":"ref4","first-page":"82","article-title":"Games, Game-play, and BCI: The State of the Art","volume-title":"T-CIAIG","volume":"5","author":"Marshall"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.chb.2013.09.018"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-021-06352-5"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-96-0901-7_4"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1161\/01.CTR.101.23.e215"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TBME.2004.827072"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01385"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-96-0966-6_12"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.5244\/C.30.87"},{"key":"ref13","article-title":"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale","volume-title":"Int. Conf. Learn. Represent","author":"Dosovitskiy"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01055"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-021-01555-8"},{"key":"ref17","volume-title":"Fractals everywhere.","author":"Barnsley","year":"2014"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2723009"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1126\/science.aab3050"},{"key":"ref21","article-title":"Partial Fine-Tuning: A Successor to Full Fine-Tuning for Vision Transformers","author":"Ye","year":"2023"},{"key":"ref22","article-title":"Surgical fine-tuning improves adaptation to distribution shifts","volume-title":"Int. Conf. Learn. Represent","author":"Lee"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00494"},{"key":"ref24","article-title":"Adam: A Method for Stochastic Optimization","volume-title":"Int. Conf. Learn. Represent","author":"Kingma"},{"key":"ref25","article-title":"BEiT: BERT Pre-Training of Image Transformers","volume-title":"Int. Conf. Learn. Represent","author":"Bao"},{"key":"ref26","article-title":"Clip itself is a strong fine-tuner: Achieving 85.7% and 88.0% top-1 accuracy with vit-b and vit-l on imagenet","author":"Dong","year":"2022","journal-title":"arXiv"},{"key":"ref27","article-title":"DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection","volume-title":"Int. Conf. Learn. Represent","author":"Zhang"},{"key":"ref28","article-title":"Vision Transformer Adapter for Dense Predictions","volume-title":"Int. Conf. Learn. Represent","author":"Chen"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00135"}],"event":{"name":"2025 IEEE International Conference on Systems, Man, and Cybernetics (SMC)","location":"Vienna, Austria","start":{"date-parts":[[2025,10,5]]},"end":{"date-parts":[[2025,10,8]]}},"container-title":["2025 IEEE International Conference on Systems, Man, and Cybernetics (SMC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11342430\/11342431\/11343662.pdf?arnumber=11343662","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T20:52:22Z","timestamp":1770843142000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11343662\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,5]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/smc58881.2025.11343662","relation":{},"subject":[],"published":{"date-parts":[[2025,10,5]]}}}