{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T12:19:33Z","timestamp":1773490773135,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":38,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819570744","type":"print"},{"value":"9789819570751","type":"electronic"}],"license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-7075-1_11","type":"book-chapter","created":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T11:12:24Z","timestamp":1773486744000},"page":"171-187","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["EDGM: Efficient and\u00a0Dynamic Generative Model for\u00a0Dataset Distillation"],"prefix":"10.1007","author":[{"given":"Gaoyuan","family":"Ma","sequence":"first","affiliation":[]},{"given":"Yonghui","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Wei","family":"He","sequence":"additional","affiliation":[]},{"given":"Lizhen","family":"Cui","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,3,15]]},"reference":[{"key":"11_CR1","doi-asserted-by":"crossref","unstructured":"Cazenavette, G., Wang, T., Torralba, A., Efros, A.A., Zhu, J.Y.: Dataset distillation by matching training trajectories (2022). https:\/\/arxiv.org\/abs\/2203.11932","DOI":"10.1109\/CVPR52688.2022.01045"},{"key":"11_CR2","doi-asserted-by":"crossref","unstructured":"Chang, T.Y., Lu, C.J.: Tinygan: Distilling Biggan for conditional image generation. In: Proceedings of the Asian conference on computer vision (2020)","DOI":"10.1007\/978-3-030-69538-5_31"},{"key":"11_CR3","unstructured":"CS231N, S.U.: Tiny imagenet visual recognition challenge (2015). http:\/\/cs231n.stanford.edu\/tiny-imagenet-200.zip"},{"key":"11_CR4","unstructured":"Cui, J., Li, Z., Ma, X., Bi, X., Luo, Y., Shen, Z.: Dataset distillation via committee voting (2025). https:\/\/arxiv.org\/abs\/2501.07575"},{"key":"11_CR5","unstructured":"Cui, J., Wang, R., Si, S., Hsieh, C.J.: Scaling up dataset distillation to imagenet-1k with constant memory (2023). https:\/\/arxiv.org\/abs\/2211.10586"},{"key":"11_CR6","doi-asserted-by":"crossref","unstructured":"Cui, X., Qin, Y., Zhou, W., Li, H., Li, H.: Optical: Leveraging optimal transport for contribution allocation in dataset distillation. In: Proceedings of the Computer Vision and Pattern Recognition Conference, pp. 15245\u201315254 (2025)","DOI":"10.1109\/CVPR52734.2025.01420"},{"key":"11_CR7","doi-asserted-by":"publisher","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255 (2009). https:\/\/doi.org\/10.1109\/CVPR.2009.5206848","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"11_CR8","doi-asserted-by":"crossref","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, volume 1 (long and short papers), pp. 4171\u20134186 (2019)","DOI":"10.18653\/v1\/N19-1423"},{"key":"11_CR9","doi-asserted-by":"publisher","first-page":"119443","DOI":"10.52202\/079017-3796","volume":"37","author":"J Du","year":"2024","unstructured":"Du, J., Hu, J., Huang, W., Zhou, J.T., et al.: Diversity-driven synthesis: enhancing dataset distillation through directed weight adjustment. Adv. Neural. Inf. Process. Syst. 37, 119443\u2013119465 (2024)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"11_CR10","unstructured":"Gu, J., et al.: Efficient dataset distillation via minimax diffusion (2024). https:\/\/arxiv.org\/abs\/2311.15529"},{"key":"11_CR11","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local nash equilibrium. In: Neural Information Processing Systems (2017). https:\/\/api.semanticscholar.org\/CorpusID:326772"},{"key":"11_CR12","doi-asserted-by":"crossref","unstructured":"Kabkab, M., Samangouei, P., Chellappa, R.: Task-aware compressed sensing with generative adversarial networks. In: Proceedings of the AAAI Conference on Artificial Intelligence (2018)","DOI":"10.1609\/aaai.v32i1.11883"},{"key":"11_CR13","doi-asserted-by":"crossref","unstructured":"Kanekar, B., Savarkar, A., Ramakrishnan, G., Jadhav, K.: Integrations: Informed subset selection based generation for medical imaging in resource constrained setting. In: 2025 IEEE 22nd International Symposium on Biomedical Imaging (ISBI), pp.\u00a01\u20134. IEEE (2025)","DOI":"10.1109\/ISBI60581.2025.10980935"},{"key":"11_CR14","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4401\u20134410 (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"11_CR15","unstructured":"Khatib, S.K.A., ElHagry, A., Shao, S., Shen, Z.: Od3: optimization-free dataset distillation for object detection (2025). https:\/\/arxiv.org\/abs\/2506.01942"},{"key":"11_CR16","unstructured":"Kingma, D.P., Welling, M., et\u00a0al.: Auto-encoding variational bayes (2013)"},{"key":"11_CR17","unstructured":"Krizhevsky, A., Hinton, G.: Learning multiple layers of features from tiny images. Handbook Syst. Autoimmune Diseases 1(4) (2009)"},{"key":"11_CR18","doi-asserted-by":"crossref","unstructured":"Li, M., et al.: Dataset distillation in medical imaging: a feasibility study (2025). https:\/\/arxiv.org\/abs\/2407.14429","DOI":"10.1117\/12.3047345"},{"key":"11_CR19","doi-asserted-by":"crossref","unstructured":"Liu, Y., Gu, J., Wang, K., Zhu, Z., Jiang, W., You, Y.: Dream: efficient dataset distillation by representative matching. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 17314\u201317324 (2023)","DOI":"10.1109\/ICCV51070.2023.01588"},{"key":"11_CR20","unstructured":"Nguyen, T., Novak, R., Xiao, L., Lee, J.: Dataset distillation with infinitely wide convolutional networks (2022). https:\/\/arxiv.org\/abs\/2107.13034"},{"key":"11_CR21","unstructured":"Oord, A.v.d., et al.: Wavenet: a generative model for raw audio. arXiv preprint arXiv:1609.03499 (2016)"},{"issue":"8","key":"11_CR22","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I., et al.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019)","journal-title":"OpenAI blog"},{"key":"11_CR23","doi-asserted-by":"crossref","unstructured":"Sajedi, A., Khaki, S., Amjadian, E., Liu, L.Z., Lawryshyn, Y.A., Plataniotis, K.N.: Datadam: efficient dataset distillation with attention matching. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 17097\u201317107 (2023)","DOI":"10.1109\/ICCV51070.2023.01568"},{"key":"11_CR24","unstructured":"Sajjadi, M.S.M., Bachem, O., Lucic, M., Bousquet, O., Gelly, S.: Assessing generative models via precision and recall (2018). https:\/\/arxiv.org\/abs\/1806.00035"},{"key":"11_CR25","unstructured":"Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., Chen, X.: Improved techniques for training GANs. In: Advances in Neural Information Processing Systems (NeurIPS), pp. 2234\u20132242 (2016). https:\/\/papers.nips.cc\/paper\/6125-improved-techniques-for-training-gans"},{"key":"11_CR26","doi-asserted-by":"crossref","unstructured":"Su, D., Hou, J., Gao, W., Tian, Y., Tang, B.: D4: Dataset distillation via disentangled diffusion model. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5809\u20135818 (2024)","DOI":"10.1109\/CVPR52733.2024.00555"},{"key":"11_CR27","doi-asserted-by":"crossref","unstructured":"Sun, P., Shi, B., Yu, D., Lin, T.: On the diversity and realism of distilled dataset: an efficient dataset distillation paradigm. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9390\u20139399 (2024)","DOI":"10.1109\/CVPR52733.2024.00897"},{"key":"11_CR28","unstructured":"Sutskever, I., Vinyals, O., Le, Q.V.: Sequence to sequence learning with neural networks. Adv. Neural Inf. Process. Syst. 27 (2014)"},{"key":"11_CR29","doi-asserted-by":"crossref","unstructured":"Wang, K., Gu, J., Zhang, H., Zhou, D., Zhu, Z., Jiang, W., You, Y.: Dim: Distilling dataset into generative model. In: European Conference on Computer Vision, pp. 42\u201359. Springer (2025)","DOI":"10.1007\/978-3-031-93806-1_4"},{"key":"11_CR30","unstructured":"Wang, K., et al.: Cafe: learning to condense dataset by aligning features (2022). https:\/\/arxiv.org\/abs\/2203.01531"},{"key":"11_CR31","doi-asserted-by":"crossref","unstructured":"Wang, S., et al.: Dataset distillation with neural characteristic function: a minmax perspective. In: Proceedings of the Computer Vision and Pattern Recognition Conference, pp. 25570\u201325580 (2025)","DOI":"10.1109\/CVPR52734.2025.02381"},{"key":"11_CR32","unstructured":"Wang, T., Zhu, J., Torralba, A., Efros, A.A.: Dataset distillation. arXiv preprint arXiv:1811.10959 (2018)"},{"key":"11_CR33","unstructured":"Xue, E., Li, Y., Liu, H., Wang, P., Shen, Y., Wang, H.: Towards adversarially robust dataset distillation by curvature regularization (2025). https:\/\/arxiv.org\/abs\/2403.10045"},{"key":"11_CR34","first-page":"73582","volume":"36","author":"Z Yin","year":"2023","unstructured":"Yin, Z., Xing, E., Shen, Z.: Squeeze, recover and relabel: Dataset condensation at imagenet scale from a new perspective. Adv. Neural. Inf. Process. Syst. 36, 73582\u201373603 (2023)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"11_CR35","doi-asserted-by":"crossref","unstructured":"Zhang, L., et al.: Accelerating dataset distillation via model augmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11950\u201311959 (2023)","DOI":"10.1109\/CVPR52729.2023.01150"},{"key":"11_CR36","unstructured":"Zhao, B., Bilen, H.: Dataset condensation with differentiable Siamese augmentation (2021). https:\/\/arxiv.org\/abs\/2102.08259"},{"key":"11_CR37","unstructured":"Zhao, B., Mopuri, K.R., Bilen, H.: Dataset condensation with gradient matching (2021). https:\/\/arxiv.org\/abs\/2006.05929"},{"key":"11_CR38","unstructured":"Zhou, Y., Nezhadarya, E., Ba, J.: Dataset distillation using neural feature regression (2022). https:\/\/arxiv.org\/abs\/2206.00719"}],"container-title":["Lecture Notes in Computer Science","PRICAI 2025: Trends in Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-7075-1_11","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T11:12:28Z","timestamp":1773486748000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-7075-1_11"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"ISBN":["9789819570744","9789819570751"],"references-count":38,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-7075-1_11","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"15 March 2026","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRICAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Pacific Rim International Conference on Artificial Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Wellington","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"New Zealand","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 November 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 November 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"pricai2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.pricai.org\/2025\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}