{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,14]],"date-time":"2026-01-14T12:24:49Z","timestamp":1768393489027,"version":"3.49.0"},"publisher-location":"Singapore","reference-count":31,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819556786","type":"print"},{"value":"9789819556793","type":"electronic"}],"license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-5679-3_22","type":"book-chapter","created":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T18:36:59Z","timestamp":1768329419000},"page":"315-329","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["FairMoE: Decoupled Expert Learning for\u00a0Unbiased Customized Face Generation"],"prefix":"10.1007","author":[{"given":"Yu","family":"Han","sequence":"first","affiliation":[]},{"given":"Wenhao","family":"Li","sequence":"additional","affiliation":[]},{"given":"Feng","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Shan","family":"You","sequence":"additional","affiliation":[]},{"given":"Yi","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Chang","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Xiu","family":"Su","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,14]]},"reference":[{"key":"22_CR1","first-page":"36479","volume":"35","author":"C Saharia","year":"2022","unstructured":"Saharia, C., et al.: Photorealistic text-to-image diffusion models with deep language understanding. Adv. Neural. Inf. Process. Syst. 35, 36479\u201336494 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"22_CR2","unstructured":"Ramesh, A., et al.: Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125 1.2 3 (2022)"},{"key":"22_CR3","unstructured":"Goodfellow, I.J., et al.: Generative adversarial nets. Adv. Neural Inf. Process. Syst. 27 (2014)"},{"key":"22_CR4","doi-asserted-by":"crossref","unstructured":"Lin, H., et al.: Cat: cross attention in vision transformer. 2022 IEEE International Conference on Multimedia and Expo (ICME), IEEE (2022)","DOI":"10.1109\/ICME52920.2022.9859720"},{"key":"22_CR5","unstructured":"Ramesh, A., et al.: Zero-shot text-to-image generation. In: International Conference on Machine Learning, Pmlr (2021)"},{"key":"22_CR6","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"22_CR7","doi-asserted-by":"crossref","unstructured":"Karras, T., et al.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2020)","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"22_CR8","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Adv. Neural. Inf. Process. Syst. 33, 6840\u20136851 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"22_CR9","unstructured":"Team, K.: Kolors: effective training of diffusion model for photorealistic text-to-image synthesis. arXiv preprint (2024)"},{"key":"22_CR10","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, PmLR (2021)"},{"key":"22_CR11","doi-asserted-by":"crossref","unstructured":"Rombach, R., et al.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"22_CR12","unstructured":"Podell, D., et al.: SDXL: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952 (2023)"},{"key":"22_CR13","unstructured":"Gal, R., et al.: An image is worth one word: personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618 (2022)"},{"key":"22_CR14","doi-asserted-by":"crossref","unstructured":"Ruiz, N., et al.: Dreambooth: fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2023)","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"22_CR15","doi-asserted-by":"crossref","unstructured":"Kumari, N., et al.: Multi-concept customization of text-to-image diffusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2023)","DOI":"10.1109\/CVPR52729.2023.00192"},{"key":"22_CR16","unstructured":"Ye, H., et al.: Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721 (2023)"},{"key":"22_CR17","unstructured":"Yuan, G., et al.: Inserting anybody in diffusion models via celeb basis. arXiv preprint arXiv:2306.00926 (2023)"},{"key":"22_CR18","unstructured":"Hu, E.J., et al.: LoRA: low-rank adaptation of large language models. ICLR 1.2: 3 (2022)"},{"key":"22_CR19","unstructured":"Shazeer, N., et al.: Outrageously large neural networks: the sparsely-gated mixture-of-experts layer. arXiv preprint arXiv:1701.06538 (2017)"},{"key":"22_CR20","doi-asserted-by":"crossref","unstructured":"Li, X., Hou, X., Loy, C.C.: When stylegan meets stable diffusion: a w+ adapter for personalized image generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2024)","DOI":"10.1109\/CVPR52733.2024.00213"},{"key":"22_CR21","doi-asserted-by":"crossref","unstructured":"Karkkainen, K., Joo, J.: Fairface: face attribute dataset for balanced race, gender, and age for bias measurement and mitigation. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision (2021)","DOI":"10.1109\/WACV48630.2021.00159"},{"key":"22_CR22","doi-asserted-by":"crossref","unstructured":"Tov, O., et al.: Designing an encoder for stylegan image manipulation. ACM Trans. Graph. (TOG) 40(4), 1\u201314 (2021)","DOI":"10.1145\/3450626.3459838"},{"key":"22_CR23","unstructured":"Li, J., et al.: Blip-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In: International Conference on Machine Learning, PMLR (2023)"},{"key":"22_CR24","unstructured":"Radford, A., et al.: Improving language understanding by generative pre-training (2018)"},{"issue":"10","key":"22_CR25","doi-asserted-by":"publisher","first-page":"1499","DOI":"10.1109\/LSP.2016.2603342","volume":"23","author":"K Zhang","year":"2016","unstructured":"Zhang, K., et al.: Joint face detection and alignment using multitask cascaded convolutional networks. IEEE Sig. Process. Lett. 23(10), 1499\u20131503 (2016)","journal-title":"IEEE Sig. Process. Lett."},{"key":"22_CR26","first-page":"337","volume":"3","author":"RA Jacobs","year":"1993","unstructured":"Jacobs, R.A.: Adaptive mixture of local experts. Neural Comput. 3, 337\u2013345 (1993)","journal-title":"Neural Comput."},{"issue":"2","key":"22_CR27","doi-asserted-by":"publisher","first-page":"181","DOI":"10.1162\/neco.1994.6.2.181","volume":"6","author":"MI Jordan","year":"1994","unstructured":"Jordan, M.I., Jacobs, R.A.: Hierarchical mixtures of experts and the EM algorithm. Neural Comput. 6(2), 181\u2013214 (1994)","journal-title":"Neural Comput."},{"key":"22_CR28","doi-asserted-by":"crossref","unstructured":"Wei, Y., et al.: Elite: encoding visual concepts into textual embeddings for customized text-to-image generation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (2023)","DOI":"10.1109\/ICCV51070.2023.01461"},{"key":"22_CR29","unstructured":"Hao, S., Han, K., Zhao, S., et al.: Vico: plug-and-play visual condition for personalized text-to-image generation. arXiv preprint arXiv:2306.00971 (2023)"},{"key":"22_CR30","unstructured":"Hua, M., Liu, J., Ding, F., et al.: Dreamtuner: single image is enough for subject-driven generation. arXiv preprint arXiv:2312.13691 (2023)"},{"key":"22_CR31","doi-asserted-by":"crossref","unstructured":"Xiao, G., Yin, T., Freeman, W.T., et al.: Fastcomposer: tuning-free multi-subject image generation with localized attention. Int. J. Comput. Vis. 1\u201320 (2024)","DOI":"10.1007\/s11263-024-02227-z"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-5679-3_22","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T18:37:04Z","timestamp":1768329424000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-5679-3_22"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"ISBN":["9789819556786","9789819556793"],"references-count":31,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-5679-3_22","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"14 January 2026","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision  (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Shanghai","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 October 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 October 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/2025.prcv.cn\/index.asp","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}