{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T14:29:44Z","timestamp":1766068184686,"version":"3.43.0"},"reference-count":87,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"8","license":[{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"National Research Foundation, Singapore, under its AI Singapore Program","award":["AISG2-RP-2021-025"],"award-info":[{"award-number":["AISG2-RP-2021-025"]}]},{"name":"National Research Foundation, Singapore, under its National Research Foundation Fellowship","award":["NRF-NRFF15-2023-0001"],"award-info":[{"award-number":["NRF-NRFF15-2023-0001"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62472138"],"award-info":[{"award-number":["62472138"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"Agency for Science, Technology, and Research","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Start-Up Grant from Nanyang Technological University"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,8]]},"DOI":"10.1109\/tnnls.2025.3546269","type":"journal-article","created":{"date-parts":[[2025,3,21]],"date-time":"2025-03-21T20:41:51Z","timestamp":1742589711000},"page":"15070-15084","source":"Crossref","is-referenced-by-count":1,"title":["Unveiling the Tapestry: The Interplay of Generalization and Forgetting in Continual Learning"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1889-1409","authenticated-orcid":false,"given":"Zenglin","family":"Shi","sequence":"first","affiliation":[{"name":"College of Computing and Data Science, Nanyang Technological University (NTU), Jurong West, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7665-0822","authenticated-orcid":false,"given":"Jie","family":"Jing","sequence":"additional","affiliation":[{"name":"NTU, Jurong West, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7224-6726","authenticated-orcid":false,"given":"Ying","family":"Sun","sequence":"additional","affiliation":[{"name":"A*STAR, Fusionopolis, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4103-3824","authenticated-orcid":false,"given":"Joo-Hwee","family":"Lim","sequence":"additional","affiliation":[{"name":"A*STAR, Fusionopolis, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2694-7097","authenticated-orcid":false,"given":"Mengmi","family":"Zhang","sequence":"additional","affiliation":[{"name":"NTU, Jurong West, Singapore"}]}],"member":"263","reference":[{"article-title":"Shape-texture debiased neural network training","volume-title":"Proc. ICLR","author":"Li","key":"ref1"},{"key":"ref2","article-title":"AugMix: A simple data processing method to improve robustness and uncertainty","author":"Hendrycks","year":"2019","journal-title":"arXiv:1912.02781"},{"key":"ref3","article-title":"Shape or texture: Understanding discriminative features in CNNs","author":"Islam","year":"2021","journal-title":"arXiv:2101.11604"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/s41583-023-00705-w"},{"volume-title":"Self-supervised Continual Learning","year":"2023","author":"Thakral","key":"ref5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3511808.3557698"},{"key":"ref7","first-page":"1818","article-title":"Meta-learning representations for continual learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Javed"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1611835114"},{"key":"ref9","first-page":"3987","article-title":"Continual learning through synaptic intelligence","volume-title":"Proc. ICML","author":"Zenke"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01219-9_9"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2773081"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01258-8_15"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00046"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58565-5_6"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW53098.2021.00390"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1080\/09540099550039318"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.587"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00092"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01226"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2023\/334"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20050-2_40"},{"key":"ref22","article-title":"Learning to learn without forgetting by maximizing transfer and minimizing interference","author":"Riemer","year":"2018","journal-title":"arXiv:1810.11910"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3294495"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2023.05.006"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/s0079-7421(08)60536-8"},{"key":"ref26","article-title":"An empirical investigation of catastrophic forgetting in gradient-based neural networks","author":"Goodfellow","year":"2013","journal-title":"arXiv:1312.6211"},{"key":"ref27","article-title":"Three scenarios for continual learning","author":"van de Ven","year":"2019","journal-title":"arXiv:1904.07734"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1503.02531"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553517"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i11.17159"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2024.106163"},{"article-title":"Continual learning with deep generative replay","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Shin","key":"ref32"},{"key":"ref33","article-title":"Pseudo-Recursal: Solving the catastrophic forgetting problem in deep neural networks","author":"Atkinson","year":"2018","journal-title":"arXiv:1802.03875"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00121"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9534437"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW53098.2021.00400"},{"key":"ref37","first-page":"6582","article-title":"FeCAM: Exploiting the heterogeneity of class distributions in exemplar-free continual learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Goswami"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00390"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00581"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.753"},{"key":"ref41","first-page":"12648","article-title":"Random path selection for continual learning","volume-title":"Proc. NeurIPS","author":"Rajasegaran"},{"key":"ref42","article-title":"Compacting, picking and growing for unforgetting continual learning","volume-title":"Proc. NeurIPS","volume":"32","author":"Hung"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00303"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01141"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4899-7687-1_79"},{"key":"ref46","article-title":"Improved regularization of convolutional neural networks with cutout","author":"DeVries","year":"2017","journal-title":"arXiv:1708.04552"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00020"},{"key":"ref48","article-title":"ImageNet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness","author":"Geirhos","year":"2018","journal-title":"arXiv:1811.12231"},{"key":"ref49","first-page":"19000","article-title":"The origins and prevalence of texture bias in convolutional neural networks","volume-title":"Proc. NeurIPS","volume":"33","author":"Hermann"},{"key":"ref50","article-title":"Generalizing to unseen domains via adversarial data augmentation","volume-title":"Proc. NeurIPS","volume":"31","author":"Volpi"},{"key":"ref51","first-page":"11987","article-title":"Improved OOD generalization via adversarial training and pretraing","volume-title":"Proc. ICML","author":"Yi"},{"key":"ref52","article-title":"Improving robustness without sacrificing accuracy with patch Gaussian augmentation","author":"Lopes","year":"2019","journal-title":"arXiv:1906.02611"},{"article-title":"Robustness may be at odds with accuracy","volume-title":"Proc. ICLR","author":"Tsipras","key":"ref53"},{"key":"ref54","first-page":"8828","article-title":"Informative dropout for robust representation learning: A shape-bias perspective","volume-title":"Proc. ICML","author":"Shi"},{"key":"ref55","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3367329"},{"key":"ref57","article-title":"Benchmarking neural network robustness to common corruptions and perturbations","author":"Hendrycks","year":"2019","journal-title":"arXiv:1903.12261"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58536-5_31"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1016\/j.jbiosc.2016.01.007"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1098\/rspb.1980.0020"},{"key":"ref62","first-page":"187","article-title":"How much data are augmentations worth? An investigation into scaling laws, invariance, and implicit regularization","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Geiping"},{"key":"ref63","article-title":"Salient ImageNet: How to discover spurious features in deep learning?","author":"Singla","year":"2021","journal-title":"arXiv:2110.04301"},{"key":"ref64","first-page":"38516","article-title":"On feature learning in the presence of spurious correlations","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Izmailov"},{"key":"ref65","first-page":"12857","article-title":"Examining and combating spurious features under distribution shift","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","volume":"139","author":"Zhou"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1145\/3442188.3445883"},{"key":"ref67","article-title":"Visualizing the loss landscape of neural nets","volume-title":"Proc. NeurIPS","volume":"31","author":"Li"},{"key":"ref68","article-title":"Deep ensembles: A loss landscape perspective","author":"Fort","year":"2019","journal-title":"arXiv:1912.02757"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.167"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"volume-title":"Painter by Numbers. WikiArt","year":"2016","author":"Nichol","key":"ref71"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-11018-5_32"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00860"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-87237-3_46"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58529-7_16"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00907"},{"article-title":"Learning multiple layers of features from tiny images","year":"2009","author":"Krizhevsky","key":"ref77"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00823"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01560"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1145\/3065386"},{"key":"ref81","first-page":"16165","article-title":"Beyond not-forgetting: Continual learning with backward knowledge transfer","volume-title":"Proc. NeurIPS","volume":"35","author":"Lin"},{"key":"ref82","article-title":"Don\u2019t forget, there is more than forgetting: New metrics for continual learning","author":"D\u00edaz-Rodr\u00edguez","year":"2018","journal-title":"arXiv:1810.13166"},{"article-title":"Sharpness-aware minimization for efficiently improving generalization","volume-title":"Proc. ICLR","author":"Foret","key":"ref83"},{"key":"ref84","article-title":"Improving vision transformers for incremental learning","author":"Yu","year":"2021","journal-title":"arXiv:2112.06103"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/38.946629"},{"article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","volume-title":"Proc. ICLR","author":"Dosovitskiy","key":"ref86"},{"key":"ref87","article-title":"Are convolutional neural networks or transformers more like human vision?","author":"Tuli","year":"2021","journal-title":"arXiv:2105.07197"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/5962385\/11114436\/10937268.pdf?arnumber=10937268","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,6]],"date-time":"2025-08-06T18:00:42Z","timestamp":1754503242000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10937268\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8]]},"references-count":87,"journal-issue":{"issue":"8"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2025.3546269","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"type":"print","value":"2162-237X"},{"type":"electronic","value":"2162-2388"}],"subject":[],"published":{"date-parts":[[2025,8]]}}}