{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T15:45:29Z","timestamp":1743003929144,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":34,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819784899"},{"type":"electronic","value":"9789819784905"}],"license":[{"start":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T00:00:00Z","timestamp":1730937600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T00:00:00Z","timestamp":1730937600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-97-8490-5_33","type":"book-chapter","created":{"date-parts":[[2024,11,6]],"date-time":"2024-11-06T09:16:05Z","timestamp":1730884565000},"page":"462-476","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Task-Aware Few-Shot Image Generation via Dynamic Local Distribution Estimation and Sampling"],"prefix":"10.1007","author":[{"given":"Zheng","family":"Gu","sequence":"first","affiliation":[]},{"given":"Wenbin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Tianyu","family":"Ding","sequence":"additional","affiliation":[]},{"given":"Zhengli","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Jing","family":"Huo","sequence":"additional","affiliation":[]},{"given":"Kuihua","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Yang","family":"Gao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,7]]},"reference":[{"key":"33_CR1","doi-asserted-by":"crossref","unstructured":"Antoniou, A., Storkey, A., Edwards, H.: Data augmentation generative adversarial networks. arXiv preprint arXiv:1711.04340 (2017)","DOI":"10.1007\/978-3-030-01424-7_58"},{"key":"33_CR2","unstructured":"Bartunov, S., Vetrov, D.: Few-shot generative modelling with generative matching networks. In: International Conference on Artificial Intelligence and Statistics, pp. 670\u2013678 (2018)"},{"key":"33_CR3","volume-title":"Scoremix: A scalable augmentation strategy for training gans with limited data","author":"J Cao","year":"2022","unstructured":"Cao, J., Luo, M., Yu, J., Yang, M.H., He, R.: Scoremix: A scalable augmentation strategy for training gans with limited data. IEEE Trans. Pattern Anal. Mach, Intell (2022)"},{"key":"33_CR4","doi-asserted-by":"crossref","unstructured":"Cao, Q., Shen, L., Xie, W., Parkhi, O.M., Zisserman, A.: Vggface2: A dataset for recognising faces across pose and age. In: Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition (FG), pp. 67\u201374. IEEE (2018)","DOI":"10.1109\/FG.2018.00020"},{"key":"33_CR5","unstructured":"Clou\u00e2tre, L., Demers, M.: Figr: Few-shot image generation with reptile. arXiv preprint arXiv:1901.02199 (2019)"},{"key":"33_CR6","doi-asserted-by":"crossref","unstructured":"Ding, G., Han, X., Wang, S., Wu, S., Jin, X., Tu, D., Huang, Q.: Attribute group editing for reliable few-shot image generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11194\u201311203 (2022)","DOI":"10.1109\/CVPR52688.2022.01091"},{"key":"33_CR7","doi-asserted-by":"crossref","unstructured":"Duan, Y., Niu, L., Hong, Y., Zhang, L.: Weditgan: Few-shot image generation via latent space relocation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a038, pp. 1653\u20131661 (2024)","DOI":"10.1609\/aaai.v38i2.27932"},{"key":"33_CR8","unstructured":"Finn, C., Abbeel, P., Levine, S.: Model-agnostic meta-learning for fast adaptation of deep networks. In: Proceedings of the International Conference on Machine Learning (ICML), pp. 1126\u20131135 (2017)"},{"key":"33_CR9","unstructured":"Giannone, G., Nielsen, D., Winther, O.: Few-shot diffusion models. arXiv preprint arXiv:2205.15463 (2022)"},{"key":"33_CR10","unstructured":"Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. In: Proceedings of the Advances in Neural Information Processing Systems (NeurIPS), pp. 2672\u20132680 (2014)"},{"key":"33_CR11","doi-asserted-by":"crossref","unstructured":"Gu, Z., Li, W., Huo, J., Wang, L., Gao, Y.: Lofgan: Fusing local representations for few-shot image generation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.00835"},{"key":"33_CR12","unstructured":"Hong, Y., Niu, L., Zhang, J., Liang, J., Zhang, L.: Deltagan: Towards diverse few-shot image generation with sample-specific delta. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2020)"},{"key":"33_CR13","doi-asserted-by":"crossref","unstructured":"Hong, Y., Niu, L., Zhang, J., Zhang, L.: Matchinggan: Matching-based few-shot image generation. In: Proceedings of the International Conference on Multimedia and Expo (ICME) (2020)","DOI":"10.1109\/ICME46284.2020.9102917"},{"key":"33_CR14","doi-asserted-by":"crossref","unstructured":"Hong, Y., Niu, L., Zhang, J., Zhang, L.: Few-shot image generation using discrete content representation. In: Proceedings of the ACM International Conference on Multimedia (ACM MM), pp. 2796\u20132804 (2022)","DOI":"10.1145\/3503161.3548158"},{"key":"33_CR15","doi-asserted-by":"crossref","unstructured":"Hong, Y., Niu, L., Zhang, J., Zhao, W., Fu, C., Zhang, L.: F2gan: Fusing-and-filling gan for few-shot image generation. In: Proceedings of the ACM International Conference on Multimedia (ACM MM), pp. 2535\u20132543 (2020)","DOI":"10.1145\/3394171.3413561"},{"key":"33_CR16","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4401\u20134410 (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"33_CR17","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational bayes. In: Proceedings of the International Conference on Learning Representations (ICLR) (2014)"},{"key":"33_CR18","doi-asserted-by":"crossref","unstructured":"Li, W., Wang, L., Huo, J., Shi, Y., Gao, Y., Luo, J.: Asymmetric distribution measure for few-shot learning. In: Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI), pp. 2957\u20132963 (2020)","DOI":"10.24963\/ijcai.2020\/409"},{"key":"33_CR19","unstructured":"Liang, W., Liu, Z., Liu, C.: Dawson: A domain adaptive few shot generation framework. arXiv preprint arXiv:2001.00576 (2020)"},{"key":"33_CR20","first-page":"16331","volume":"34","author":"H Ling","year":"2021","unstructured":"Ling, H., Kreis, K., Li, D., Kim, S.W., Torralba, A., Fidler, S.: Editgan: High-precision semantic image editing. Adv. Neural Inf. Process. Syst. (NeurIPS) 34, 16331\u201316345 (2021)","journal-title":"Adv. Neural Inf. Process. Syst. (NeurIPS)"},{"key":"33_CR21","doi-asserted-by":"crossref","unstructured":"Liu, M.Y., Huang, X., Mallya, A., Karras, T., Aila, T., Lehtinen, J., Kautz, J.: Few-shot unsupervised image-to-image translation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 10551\u201310560 (2019)","DOI":"10.1109\/ICCV.2019.01065"},{"key":"33_CR22","doi-asserted-by":"crossref","unstructured":"Liu, X., Wang, B., Wang, H., Yi, L.: Few-shot physically-aware articulated mesh generation via hierarchical deformation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 854\u2013864 (2023)","DOI":"10.1109\/ICCV51070.2023.00085"},{"key":"33_CR23","doi-asserted-by":"crossref","unstructured":"Nilsback, M.E., Zisserman, A.: Automated flower classification over a large number of classes. In: Proceedings of the Indian Conference on Computer Vision, Graphics and Image Processing (ICVGIP), pp. 722\u2013729. IEEE (2008)","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"33_CR24","unstructured":"Odena, A., Olah, C., Shlens, J.: Conditional image synthesis with auxiliary classifier gans. In: Proceedings of the International Conference on Machine Learning (ICML), pp. 2642\u20132651. PMLR (2017)"},{"key":"33_CR25","doi-asserted-by":"crossref","unstructured":"Ojha, U., Li, Y., Lu, J., Efros, A.A., Lee, Y.J., Shechtman, E., Zhang, R.: Few-shot image generation via cross-domain correspondence. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10743\u201310752 (2021)","DOI":"10.1109\/CVPR46437.2021.01060"},{"key":"33_CR26","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"33_CR27","unstructured":"Santoro, A., Bartunov, S., Botvinick, M., Wierstra, D., Lillicrap, T.: Meta-learning with memory-augmented neural networks. In: Proceedings of the International Conference on Machine Learning (ICML), pp. 1842\u20131850 (2016)"},{"key":"33_CR28","doi-asserted-by":"crossref","unstructured":"Seo, J., Kang, J.S., Park, G.M.: Lfs-gan: Lifelong few-shot image generation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 11356\u201311366 (2023)","DOI":"10.1109\/ICCV51070.2023.01043"},{"key":"33_CR29","unstructured":"Snell, J., Swersky, K., Zemel, R., Zemel, R.: Prototypical networks for few-shot learning. In: Proceedings of the Conference on Neural Information Processing Systems (NeurIPS), pp. 4077\u20134087 (2017)"},{"key":"33_CR30","unstructured":"Vinyals, O., Blundell, C., Lillicrap, T., Wierstra, D., et\u00a0al.: Matching networks for one shot learning. In: Proceedings of the Advances in Neural Information Processing Systems (NeurIPS), vol.\u00a029 (2016)"},{"key":"33_CR31","doi-asserted-by":"crossref","unstructured":"Yang, M., Wang, Z., Chi, Z., Feng, W.: Wavegan: Frequency-aware gan for high-fidelity few-shot image generation. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 1\u201317. Springer (2022)","DOI":"10.1007\/978-3-031-19784-0_1"},{"key":"33_CR32","doi-asserted-by":"crossref","unstructured":"Zhao, Y., Ding, H., Huang, H., Cheung, N.M.: A closer look at few-shot image generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9140\u20139150 (2022)","DOI":"10.1109\/CVPR52688.2022.00893"},{"key":"33_CR33","doi-asserted-by":"crossref","unstructured":"Zheng, C., Liu, B., Zhang, H., Xu, X., He, S.: Where is my spot? Few-shot image generation via latent subspace optimization. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3272\u20133281 (2023)","DOI":"10.1109\/CVPR52729.2023.00319"},{"key":"33_CR34","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV), pp. 2223\u20132232 (2017)","DOI":"10.1109\/ICCV.2017.244"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-97-8490-5_33","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,6]],"date-time":"2024-11-06T09:17:37Z","timestamp":1730884657000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-97-8490-5_33"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,7]]},"ISBN":["9789819784899","9789819784905"],"references-count":34,"URL":"https:\/\/doi.org\/10.1007\/978-981-97-8490-5_33","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,11,7]]},"assertion":[{"value":"7 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision  (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Urumqi","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 October 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"7","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/2024.prcv.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}