{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T06:02:20Z","timestamp":1773727340022,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":35,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T00:00:00Z","timestamp":1730073600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"the Research Grants Council of the Hong Kong Special Administrative Region, China","award":["No. CUHK 14206921"],"award-info":[{"award-number":["No. CUHK 14206921"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,10,28]]},"DOI":"10.1145\/3664647.3681433","type":"proceedings-article","created":{"date-parts":[[2024,10,26]],"date-time":"2024-10-26T06:59:41Z","timestamp":1729925981000},"page":"3781-3789","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":16,"title":["New Job, New Gender? Measuring the Social Bias in Image Generation Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9803-8204","authenticated-orcid":false,"given":"Wenxuan","family":"Wang","sequence":"first","affiliation":[{"name":"The Chinese University of Hong Kong, Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8748-4479","authenticated-orcid":false,"given":"Haonan","family":"Bai","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3446-0083","authenticated-orcid":false,"given":"Jen-tse","family":"Huang","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-6739-4675","authenticated-orcid":false,"given":"Yuxuan","family":"Wan","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5896-7669","authenticated-orcid":false,"given":"Youliang","family":"Yuan","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Shenzhen, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2379-3333","authenticated-orcid":false,"given":"Haoyi","family":"Qiu","sequence":"additional","affiliation":[{"name":"University of California, Los Angeles, Los Angeles, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8509-6595","authenticated-orcid":false,"given":"Nanyun","family":"Peng","sequence":"additional","affiliation":[{"name":"University of California, Los Angeles, Los Angeles, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3666-5798","authenticated-orcid":false,"given":"Michael","family":"Lyu","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Hong Kong, China"}]}],"member":"320","published-online":{"date-parts":[[2024,10,28]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.gebnlp-1.27"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_3_1","volume-title":"Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency","author":"Bianchi Federico","year":"2022","unstructured":"Federico Bianchi, Pratyusha Kalluri, Esin Durmus, Faisal Ladhak, Myra Cheng, Debora Nozza, Tatsunori Hashimoto, Dan Jurafsky, James Y. Zou, and Aylin Caliskan. 2022. Easily Accessible Text-to-Image Generation Amplifies Demographic Stereotypes at Large Scale. Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency (2022). https:\/\/api.semanticscholar.org\/CorpusID:253383708"},{"key":"e_1_3_2_1_4_1","volume-title":"Bowman","author":"Bordia Shikha","year":"2019","unstructured":"Shikha Bordia and Samuel R. Bowman. 2019. Identifying and Reducing Gender Bias in Word-Level Language Models. In North American Chapter of the Association for Computational Linguistics."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2018.00020"},{"key":"e_1_3_2_1_6_1","volume-title":"Fairness Testing: A Comprehensive Survey and Analysis of Trends. ArXiv","author":"Chen Zhenpeng","year":"2022","unstructured":"Zhenpeng Chen, J Zhang, Max Hort, Federica Sarro, and Mark Harman. 2022. Fairness Testing: A Comprehensive Survey and Analysis of Trends. ArXiv, Vol. abs\/2207.10223 (2022). https:\/\/api.semanticscholar.org\/CorpusID:250920488"},{"key":"e_1_3_2_1_7_1","volume-title":"DALL-Eval: Probing the Reasoning Skills and Social Biases of Text-to-Image Generative Transformers. ArXiv","author":"Cho Jaemin","year":"2022","unstructured":"Jaemin Cho, Abhaysinh Zala, and Mohit Bansal. 2022. DALL-Eval: Probing the Reasoning Skills and Social Biases of Text-to-Image Generative Transformers. ArXiv (2022)."},{"key":"e_1_3_2_1_8_1","unstructured":"Aiyub Dawood. 2023. Number of Midjourney Users and Statistics. https:\/\/www.mlyearning.org\/midjourney-users-statistics\/. Accessed: 2023-08-01."},{"key":"e_1_3_2_1_9_1","first-page":"181","article-title":"Gender bias and legal profession: A discussion of why there are still so few women on the bench. U. Md. LJ Race","volume":"4","author":"Durant Leah V","year":"2004","unstructured":"Leah V Durant. 2004. Gender bias and legal profession: A discussion of why there are still so few women on the bench. U. Md. LJ Race, Religion, Gender & Class, Vol. 4 (2004), 181.","journal-title":"Religion, Gender & Class"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.397"},{"key":"e_1_3_2_1_11_1","unstructured":"Ian J. Goodfellow Jean Pouget-Abadie Mehdi Mirza Bing Xu David Warde-Farley Sherjil Ozair Aaron C. Courville and Yoshua Bengio. 2014. Generative adversarial networks. In NIPS."},{"key":"e_1_3_2_1_12_1","volume-title":"Bias Mitigation for Machine Learning Classifiers: A Comprehensive Survey. ArXiv","author":"Hort Max","year":"2022","unstructured":"Max Hort, Zhenpeng Chen, J Zhang, Federica Sarro, and Mark Harman. 2022. Bias Mitigation for Machine Learning Classifiers: A Comprehensive Survey. ArXiv, Vol. abs\/2207.07068 (2022). https:\/\/api.semanticscholar.org\/CorpusID:250526377"},{"key":"e_1_3_2_1_13_1","volume-title":"Llama 2: Open Foundation and Fine-Tuned Chat Models. ArXiv","author":"Touvron Hugo","year":"2023","unstructured":"et al. Hugo Touvron. 2023. Llama 2: Open Foundation and Fine-Tuned Chat Models. ArXiv, Vol. abs\/2307.09288 (2023). https:\/\/api.semanticscholar.org\/CorpusID:259950998"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/3449113"},{"key":"e_1_3_2_1_15_1","volume-title":"International Conference on Machine Learning. PMLR, 6565--6576","author":"Liang Paul Pu","year":"2021","unstructured":"Paul Pu Liang, Chiyu Wu, Louis-Philippe Morency, and Ruslan Salakhutdinov. 2021. Towards understanding and mitigating social biases in language models. In International Conference on Machine Learning. PMLR, 6565--6576."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548404"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/3457607"},{"key":"e_1_3_2_1_18_1","unstructured":"Dan Milmo and Alex Hern. 2024. Google chief admits 'biased' AI tool's photo diversity offended users. https:\/\/www.theguardian.com\/technology\/2024\/feb\/28\/google-chief-ai-tools-photo-diversity-offended-users Accessed: Mar-02--2024."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1007\/s00778-021-00697-y"},{"key":"e_1_3_2_1_20_1","volume-title":"Learning Transferable Visual Models From Natural Language Supervision. In International Conference on Machine Learning. https:\/\/api.semanticscholar.org\/CorpusID:231591445","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning Transferable Visual Models From Natural Language Supervision. In International Conference on Machine Learning. https:\/\/api.semanticscholar.org\/CorpusID:231591445"},{"key":"e_1_3_2_1_21_1","unstructured":"Robin Rombach Andreas Blattmann Dominik Lorenz Patrick Esser and Bj\u00f6rn Ommer. 2021. High-Resolution Image Synthesis with Latent Diffusion Models. arxiv: 2112.10752 [cs.CV]"},{"key":"e_1_3_2_1_22_1","volume-title":"High-Resolution Image Synthesis with Latent Diffusion Models. 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","author":"Rombach Robin","year":"2021","unstructured":"Robin Rombach, A. Blattmann, Dominik Lorenz, Patrick Esser, and Bj\u00f6rn Ommer. 2021. High-Resolution Image Synthesis with Latent Diffusion Models. 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2021), 10674--10685. https:\/\/api.semanticscholar.org\/CorpusID:245335280"},{"key":"e_1_3_2_1_23_1","volume-title":"Measuring social biases in grounded vision and language embeddings. arXiv preprint arXiv:2002.08911","author":"Ross Candace","year":"2020","unstructured":"Candace Ross, Boris Katz, and Andrei Barbu. 2020. Measuring social biases in grounded vision and language embeddings. arXiv preprint arXiv:2002.08911 (2020)."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.403"},{"key":"e_1_3_2_1_25_1","volume-title":"Social bias frames: Reasoning about social and power implications of language. ACL","author":"Sap Maarten","year":"2019","unstructured":"Maarten Sap, Saadia Gabriel, Lianhui Qin, Dan Jurafsky, Noah A Smith, and Yejin Choi. 2019. Social bias frames: Reasoning about social and power implications of language. ACL (2019)."},{"key":"e_1_3_2_1_26_1","unstructured":"Charles Hamilton Smith and Samuel Kneeland. [n. d.]. The natural history of the human species. https:\/\/api.semanticscholar.org\/CorpusID:162691300"},{"key":"e_1_3_2_1_27_1","volume-title":"Mitigating gender bias in natural language processing: Literature review. ACL","author":"Sun Tony","year":"2019","unstructured":"Tony Sun, Andrew Gaut, Shirlyn Tang, Yuxin Huang, Mai ElSherief, Jieyu Zhao, Diba Mirza, Elizabeth Belding, Kai-Wei Chang, and William Yang Wang. 2019. Mitigating gender bias in natural language processing: Literature review. ACL (2019)."},{"key":"e_1_3_2_1_28_1","volume-title":"Mitigating Gender Bias in Natural Language Processing: Literature Review. In Annual Meeting of the Association for Computational Linguistics. https:\/\/api.semanticscholar.org\/CorpusID:195316733","author":"Sun Tony","year":"2019","unstructured":"Tony Sun, Andrew Gaut, Shirlyn Tang, Yuxin Huang, Mai Elsherief, Jieyu Zhao, Diba Mirza, Elizabeth M. Belding-Royer, Kai-Wei Chang, and William Yang Wang. 2019. Mitigating Gender Bias in Natural Language Processing: Literature Review. In Annual Meeting of the Association for Computational Linguistics. https:\/\/api.semanticscholar.org\/CorpusID:195316733"},{"key":"e_1_3_2_1_29_1","volume-title":"Neural Discrete Representation Learning. NIPS","author":"van den Oord A\u00e4ron","year":"2017","unstructured":"A\u00e4ron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. 2017. Neural Discrete Representation Learning. NIPS (2017)."},{"key":"e_1_3_2_1_30_1","volume-title":"Lyu","author":"Wan Yuxuan","year":"2023","unstructured":"Yuxuan Wan, Wenxuan Wang, Pinjia He, Jiazhen Gu, Haonan Bai, and Michael R. Lyu. 2023. BiasAsker: Measuring the Bias in Conversational AI System. FSE (2023)."},{"key":"e_1_3_2_1_31_1","volume-title":"T2IAT: Measuring Valence and Stereotypical Biases in Text-to-Image Generation. ACL","author":"Wang Jialu","year":"2023","unstructured":"Jialu Wang, Xinyue Liu, Zonglin Di, Y. Liu, and Xin Eric Wang. 2023. T2IAT: Measuring Valence and Stereotypical Biases in Text-to-Image Generation. ACL (2023)."},{"key":"e_1_3_2_1_32_1","volume-title":"Zhaopeng Tu, and Michael R. Lyu.","author":"Wang Wenxuan","year":"2023","unstructured":"Wenxuan Wang, Wenxiang Jiao, Jingyuan Huang, Ruyi Dai, Jen tse Huang, Zhaopeng Tu, and Michael R. Lyu. 2023. Not All Countries Celebrate Thanksgiving: On the Cultural Dominance in Large Language Models. ArXiv, Vol. abs\/2310.12481 (2023). https:\/\/api.semanticscholar.org\/CorpusID:264305810"},{"key":"e_1_3_2_1_33_1","volume-title":"Courtney Anne De Thomas, and Jennifer M Weller","author":"Webster Craig S.","year":"2022","unstructured":"Craig S. Webster, S Taylor, Courtney Anne De Thomas, and Jennifer M Weller. 2022. Social bias, discrimination and inequity in healthcare: mechanisms, implications and recommendations. BJA education (2022)."},{"key":"e_1_3_2_1_34_1","unstructured":"Yanzhe Zhang Lu Jiang Greg Turk and Diyi Yang. 2023. Auditing Gender Presentation Differences in Text-to-Image Models. arxiv: 2302.03675 [cs.CV]"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548396"}],"event":{"name":"MM '24: The 32nd ACM International Conference on Multimedia","location":"Melbourne VIC Australia","acronym":"MM '24","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 32nd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3681433","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3664647.3681433","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:57:47Z","timestamp":1750294667000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3681433"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,28]]},"references-count":35,"alternative-id":["10.1145\/3664647.3681433","10.1145\/3664647"],"URL":"https:\/\/doi.org\/10.1145\/3664647.3681433","relation":{},"subject":[],"published":{"date-parts":[[2024,10,28]]},"assertion":[{"value":"2024-10-28","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}