{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T19:01:26Z","timestamp":1772823686527,"version":"3.50.1"},"reference-count":44,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Science and Engineering Research Board-Department of Science and Technology (SERB-DST), Government of India","award":["CRG\/2020\/005465"],"award-info":[{"award-number":["CRG\/2020\/005465"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1109\/tnnls.2024.3359716","type":"journal-article","created":{"date-parts":[[2024,2,27]],"date-time":"2024-02-27T14:22:33Z","timestamp":1709043753000},"page":"4625-4638","source":"Crossref","is-referenced-by-count":4,"title":["GSSTU: Generative Spatial Self-Attention Transformer Unit for Enhanced Video Prediction"],"prefix":"10.1109","volume":"36","author":[{"given":"Binit","family":"Singh","sequence":"first","affiliation":[{"name":"Department of Computer Science and Engineering, IIT (Banaras Hindu University) Varanasi, Varanasi, India"}]},{"given":"Divij","family":"Singh","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, IIT (Banaras Hindu University) Varanasi, Varanasi, India"}]},{"given":"Rohan","family":"Kaushal","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, IIT (Banaras Hindu University) Varanasi, Varanasi, India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9675-8104","authenticated-orcid":false,"given":"Agrya","family":"Halder","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, IIT (Banaras Hindu University) Varanasi, Varanasi, India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5805-6563","authenticated-orcid":false,"given":"Pratik","family":"Chattopadhyay","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, IIT (Banaras Hindu University) Varanasi, Varanasi, India"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1406.1078"},{"key":"ref3","first-page":"802","article-title":"Convolutional LSTM network: A machine learning approach for precipitation nowcasting","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Shi"},{"key":"ref4","article-title":"Stochastic adversarial video prediction","author":"Lee","year":"2018","journal-title":"arXiv:1804.01523"},{"key":"ref5","article-title":"Deep multi-scale video prediction beyond mean square error","volume-title":"Proc. 4th Int. Conf. Learn. Represent.","author":"Mathieu"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00191"},{"key":"ref7","article-title":"Stochastic variational video prediction","volume-title":"Proc. 6th Int. Conf. Learn. Represent.","author":"Babaeizadeh"},{"key":"ref8","first-page":"1174","article-title":"Stochastic video generation with a learned prior","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Denton"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00770"},{"key":"ref10","first-page":"81","article-title":"High fidelity video prediction with large stochastic recurrent neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Villegas"},{"key":"ref11","first-page":"92","article-title":"Unsupervised learning of object structure and dynamics from videos","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Minderer"},{"key":"ref12","first-page":"3233","article-title":"Stochastic latent residual video prediction","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Franceschi"},{"key":"ref13","article-title":"Diverse video generation using a Gaussian process trigger","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Shrivastava"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01446"},{"key":"ref15","article-title":"Video (language) modeling: A baseline for generative models of natural videos","author":"Ranzato","year":"2014","journal-title":"arXiv:1412.6604"},{"key":"ref16","first-page":"1925","article-title":"Modeling deep temporal dependencies with recurrent grammar cells","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"27","author":"Michalski"},{"key":"ref17","first-page":"843","article-title":"Unsupervised learning of video representations using LSTMs","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Srivastava"},{"key":"ref18","first-page":"5617","article-title":"Deep learning for precipitation nowcasting: A benchmark and a new model","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Shi"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/W14-4012"},{"key":"ref20","first-page":"3560","article-title":"Learning to generate long-term future via hierarchical prediction","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Villegas"},{"key":"ref21","first-page":"6038","article-title":"Hierarchical long-term video prediction without supervision","volume-title":"Proc. Intl. Conf. Mach. Learn.","author":"Villegas"},{"key":"ref22","first-page":"11570","article-title":"Variational temporal abstraction","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Kim"},{"key":"ref23","first-page":"879","article-title":"PredRNN: Recurrent neural networks for predictive learning using spatiotemporal LSTMs","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Wang"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3165153"},{"key":"ref25","article-title":"Eidetic 3D LSTM: A model for video prediction and beyond","volume-title":"Proc. Intl. Conf. Learn. Represent.","author":"Wang"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3247103"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3175605"},{"key":"ref28","article-title":"Pyramid self-attention polymerization learning for semi-supervised skeleton-based action recognition","author":"Xu","year":"2023","journal-title":"arXiv:2302.02327"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3222871"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6819"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00307"},{"key":"ref32","first-page":"26950","article-title":"MAU: A motion-aware unit for video prediction and beyond","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NeurIPS)","author":"Chang"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR56361.2022.9956707"},{"key":"ref34","article-title":"VideoGPT: Video generation using VQ-VAE and transformers","author":"Yan","year":"2021","journal-title":"arXiv:2104.10157"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref36","article-title":"MSNet: Mutual suppression network for disentangled video representations","author":"Lee","year":"2018","journal-title":"arXiv:1804.04810"},{"key":"ref37","article-title":"Efficient and information-preserving future frame prediction and beyond","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Yu"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01149"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00317"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2011.6115889"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2004.1334462"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2007.4409092"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/PETS-WINTER.2009.5399556"},{"issue":"86","key":"ref44","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"van der Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10908444\/10449424.pdf?arnumber=10449424","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,5]],"date-time":"2025-12-05T18:39:23Z","timestamp":1764959963000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10449424\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3]]},"references-count":44,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2024.3359716","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3]]}}}