{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,28]],"date-time":"2026-01-28T12:37:29Z","timestamp":1769603849308,"version":"3.49.0"},"reference-count":24,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["82172033"],"award-info":[{"award-number":["82172033"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Signal Process. Lett."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/lsp.2024.3484289","type":"journal-article","created":{"date-parts":[[2024,10,21]],"date-time":"2024-10-21T17:27:43Z","timestamp":1729531663000},"page":"3034-3038","source":"Crossref","is-referenced-by-count":1,"title":["Efficient Training Acceleration via Sample-Wise Dynamic Probabilistic Pruning"],"prefix":"10.1109","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7397-3941","authenticated-orcid":false,"given":"Feicheng","family":"Huang","sequence":"first","affiliation":[{"name":"Key Laboratory of Multimedia Trusted Perception and Efficient Computing, Ministry of Education of China, Xiamen University, Xiamen, China"}]},{"given":"Wenbo","family":"Zhou","sequence":"additional","affiliation":[{"name":"Key Laboratory of Multimedia Trusted Perception and Efficient Computing, Ministry of Education of China, Xiamen University, Xiamen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3913-9400","authenticated-orcid":false,"given":"Yue","family":"Huang","sequence":"additional","affiliation":[{"name":"Key Laboratory of Multimedia Trusted Perception and Efficient Computing, Ministry of Education of China, Xiamen University, Xiamen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2288-5287","authenticated-orcid":false,"given":"Xinghao","family":"Ding","sequence":"additional","affiliation":[{"name":"Key Laboratory of Multimedia Trusted Perception and Efficient Computing, Ministry of Education of China, Xiamen University, Xiamen, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Scaling laws for neural language models","author":"Kaplan","year":"2020"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2021.3101670"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2021.3054315"},{"key":"ref4","first-page":"1","article-title":"Dataset pruning: Reducing training data by examining generalization influence","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Yang","year":"2023"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02477"},{"key":"ref6","first-page":"1","article-title":"An empirical study of example forgetting during deep neural network learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Toneva","year":"2019"},{"key":"ref7","first-page":"20596","article-title":"Deep learning on a data diet: Finding important examples early in training","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Paul","year":"2021"},{"key":"ref8","first-page":"1","article-title":"Trivial or impossibledichotomous data difficulty masks model differences (on imagenet and beyond)","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Meding","year":"2022"},{"key":"ref9","first-page":"19523","article-title":"Beyond neural scaling laws: Beating power law scaling via data pruning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Sorscher","year":"2022"},{"key":"ref10","first-page":"1","article-title":"Selection via proxy: Efficient data selection for deep learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Coleman","year":"2020"},{"key":"ref11","first-page":"1","article-title":"Infobatch: Lossless training speed up by unbiased dynamic data pruning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Qin","year":"2024"},{"key":"ref12","first-page":"1","article-title":"Coverage-centric coreset selection for high pruning rates","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zheng","year":"2023"},{"key":"ref13","first-page":"1","article-title":"Moderate coreset: A universal method of data selection for real-world data-efficient deep learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Xia","year":"2023"},{"key":"ref14","first-page":"1","article-title":"Active learning for convolutional neural networks: A core-set approach","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Sener","year":"2018"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i9.16988"},{"key":"ref16","first-page":"14879","article-title":"Coresets via bilevel optimization for continual learning and streaming","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Borsos","year":"2020"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4899-7502-7_79-1"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2024.3401036"},{"key":"ref19","first-page":"1","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"issue":"7","key":"ref20","first-page":"16","article-title":"Tiny imagenet visual recognition challenge","volume":"7","author":"Le","year":"2015","journal-title":"CS 231 N"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1117\/12.2520589"},{"key":"ref23","first-page":"1885","article-title":"Understanding black-box predictions via influence functions","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Koh","year":"2017"},{"key":"ref24","first-page":"6950","article-title":"Coresets for data-efficient training of machine learning models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mirzasoleiman","year":"2020"}],"container-title":["IEEE Signal Processing Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/97\/10380231\/10723806.pdf?arnumber=10723806","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T01:05:48Z","timestamp":1732669548000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10723806\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/lsp.2024.3484289","relation":{},"ISSN":["1070-9908","1558-2361"],"issn-type":[{"value":"1070-9908","type":"print"},{"value":"1558-2361","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}