{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T17:24:24Z","timestamp":1771953864412,"version":"3.50.1"},"publisher-location":"Cham","reference-count":61,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031732416","type":"print"},{"value":"9783031732423","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73242-3_20","type":"book-chapter","created":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T09:15:43Z","timestamp":1730106943000},"page":"352-370","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["SegGen: Supercharging Segmentation Models with\u00a0Text2Mask and\u00a0Mask2Img Synthesis"],"prefix":"10.1007","author":[{"given":"Hanrong","family":"Ye","sequence":"first","affiliation":[]},{"given":"Jason","family":"Kuen","sequence":"additional","affiliation":[]},{"given":"Qing","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Zhe","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Brian","family":"Price","sequence":"additional","affiliation":[]},{"given":"Dan","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,29]]},"reference":[{"key":"20_CR1","unstructured":"Baranchuk, D., Rubachev, I., Voynov, A., Khrulkov, V., Babenko, A.: Label-efficient semantic segmentation with diffusion models. In: ICLR (2022)"},{"key":"20_CR2","unstructured":"Brock, A., Donahue, J., Simonyan, K.: Large scale GAN training for high fidelity natural image synthesis. In: ICLR (2019)"},{"key":"20_CR3","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"20_CR4","doi-asserted-by":"crossref","unstructured":"Chen, K., et\u00a0al.: Hybrid task cascade for instance segmentation. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00511"},{"key":"20_CR5","unstructured":"Chen, L.C., Papandreou, G., Kokkinos, I., Murphy, K., Yuille, A.L.: Semantic image segmentation with deep convolutional nets and fully connected CRFs. In: ICLR (2015)"},{"key":"20_CR6","doi-asserted-by":"crossref","unstructured":"Chen, T., Li, L., Saxena, S., Hinton, G., Fleet, D.J.: A generalist framework for panoptic segmentation of images and videos. arXiv preprint arXiv:2210.06366 (2022)","DOI":"10.1109\/ICCV51070.2023.00090"},{"key":"20_CR7","doi-asserted-by":"crossref","unstructured":"Cheng, B., Misra, I., Schwing, A.G., Kirillov, A., Girdhar, R.: Masked-attention mask transformer for universal image segmentation. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"20_CR8","unstructured":"Cheng, B., Schwing, A.G., Kirillov, A.: Per-pixel classification is not all you need for semantic segmentation. In: NeurIPS (2021)"},{"key":"20_CR9","unstructured":"Dhariwal, P., Nichol, A.: Diffusion models beat GANs on image synthesis. In: NeruIPS (2021)"},{"key":"20_CR10","doi-asserted-by":"crossref","unstructured":"Everingham, M., Eslami, S.A., Van\u00a0Gool, L., Williams, C.K., Winn, J., Zisserman, A.: The PASCAL visual object classes challenge: a retrospective. IJCV (2015)","DOI":"10.1007\/s11263-014-0733-5"},{"key":"20_CR11","doi-asserted-by":"crossref","unstructured":"Fang, Y., et al.: Instances as queries. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00683"},{"key":"20_CR12","unstructured":"Forever, A.: Kandinsky-2. GitHub repository (2023). https:\/\/github.com\/ai-forever\/Kandinsky-2"},{"key":"20_CR13","doi-asserted-by":"crossref","unstructured":"Goodfellow, I., et al.: Generative adversarial networks. CACM (2020)","DOI":"10.1145\/3422622"},{"key":"20_CR14","doi-asserted-by":"crossref","unstructured":"Gupta, A., Dollar, P., Girshick, R.: LVIS: a dataset for large vocabulary instance segmentation. In: ICCV (2019)","DOI":"10.1109\/CVPR.2019.00550"},{"key":"20_CR15","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask R-CNN. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"20_CR16","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"20_CR17","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: NeruIPS (2020)"},{"key":"20_CR18","unstructured":"Huang, L., Chen, D., Liu, Y., Yujun, S., Zhao, D., Jingren, Z.: Composer: creative and controllable image synthesis with composable conditions. arXiv preprint arxiv:2302.09778 (2023)"},{"key":"20_CR19","doi-asserted-by":"crossref","unstructured":"Jain, J., Li, J., Chiu, M.T., Hassani, A., Orlov, N., Shi, H.: OneFormer: one transformer to rule universal image segmentation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00292"},{"key":"20_CR20","doi-asserted-by":"crossref","unstructured":"Ji, Y., et al.: DDP: diffusion model for dense visual prediction. arXiv preprint arXiv:2303.17559 (2023)","DOI":"10.1109\/ICCV51070.2023.01987"},{"key":"20_CR21","doi-asserted-by":"crossref","unstructured":"Kang, M., et al.: Scaling up GANs for text-to-image synthesis. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00976"},{"key":"20_CR22","unstructured":"Karras, T., Aittala, M., Aila, T., Laine, S.: Elucidating the design space of diffusion-based generative models. In: NeruIPS (2022)"},{"key":"20_CR23","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"20_CR24","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114 (2013)"},{"key":"20_CR25","doi-asserted-by":"crossref","unstructured":"Kirillov, A., He, K., Girshick, R., Rother, C., Doll\u00e1r, P.: Panoptic segmentation. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00963"},{"key":"20_CR26","doi-asserted-by":"crossref","unstructured":"Li, D., Ling, H., Kim, S.W., Kreis, K., Fidler, S., Torralba, A.: BigDatasetGAN: synthesizing imagenet with pixel-wise annotations. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.02064"},{"key":"20_CR27","doi-asserted-by":"crossref","unstructured":"Li, F., et al.: Mask DINO: towards a unified transformer-based framework for object detection and segmentation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00297"},{"key":"20_CR28","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In: ICML (2023)"},{"key":"20_CR29","doi-asserted-by":"crossref","unstructured":"Li, X., Zhong, Z., Wu, J., Yang, Y., Lin, Z., Liu, H.: Expectation-maximization attention networks for semantic segmentation. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00926"},{"key":"20_CR30","unstructured":"Li, X., et al.: Transformer-based visual segmentation: a survey. arXiv preprint arXiv:2304.09854 (2023)"},{"key":"20_CR31","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: GLIGEN: open-set grounded text-to-image generation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02156"},{"key":"20_CR32","doi-asserted-by":"crossref","unstructured":"Li, Z., Zhou, Q., Zhang, X., Zhang, Y., Wang, Y., Xie, W.: Open-vocabulary object segmentation with diffusion models. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00705"},{"key":"20_CR33","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"20_CR34","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. arXiv:2103.14030 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"20_CR35","doi-asserted-by":"crossref","unstructured":"Long, J., Shelhamer, E., Darrell, T.: Fully convolutional networks for semantic segmentation. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"20_CR36","doi-asserted-by":"crossref","unstructured":"Mou, C., et al.: T2I-adapter: learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453 (2023)","DOI":"10.1609\/aaai.v38i5.28226"},{"key":"20_CR37","unstructured":"Nguyen, Q., Vu, T., Tran, A., Nguyen, K.: Dataset diffusion: diffusion-based synthetic dataset generation for pixel-level semantic segmentation. arXiv preprint arXiv:2309.14303 (2023)"},{"key":"20_CR38","doi-asserted-by":"crossref","unstructured":"Pnvr, K., Singh, B., Ghosh, P., Siddiquie, B., Jacobs, D.: LD-ZNet: a latent diffusion approach for text-based image segmentation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00384"},{"key":"20_CR39","unstructured":"Podell, D., et al.: SDXL: improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952 (2023)"},{"key":"20_CR40","doi-asserted-by":"crossref","unstructured":"Qi, L., et al.: Open world entity segmentation. TPAMI (2022)","DOI":"10.1109\/TPAMI.2022.3227513"},{"key":"20_CR41","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"20_CR42","doi-asserted-by":"crossref","unstructured":"Saharia, C., et\u00a0al.: Photorealistic text-to-image diffusion models with deep language understanding. In: NeruIPS (2022)","DOI":"10.1145\/3528233.3530757"},{"key":"20_CR43","unstructured":"Sauer, A., Karras, T., Laine, S., Geiger, A., Aila, T.: StyleGAN-T: unlocking the power of GANs for fast large-scale text-to-image synthesis. In: ICML (2023)"},{"key":"20_CR44","unstructured":"Sohl-Dickstein, J., Weiss, E., Maheswaranathan, N., Ganguli, S.: Deep unsupervised learning using nonequilibrium thermodynamics. In: ICML (2015)"},{"key":"20_CR45","unstructured":"Wang, H., Cao, J., Anwer, R.M., Xie, J., Khan, F.S., Pang, Y.: DFormer: diffusion-guided transformer for universal image segmentation. arXiv preprint arXiv:2306.03437 (2023)"},{"key":"20_CR46","unstructured":"Wang, J., et al: Deep high-resolution representation learning for visual recognition. TPAMI (2019)"},{"key":"20_CR47","unstructured":"Wu, W., et al.: DatasetDM: synthesizing data with perception annotations using diffusion models. arXiv preprint arXiv:2308.06160 (2023)"},{"key":"20_CR48","doi-asserted-by":"crossref","unstructured":"Wu, W., Zhao, Y., Shou, M.Z., Zhou, H., Shen, C.: DiffuMask: synthesizing images with pixel-level annotations for semantic segmentation using diffusion models. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00117"},{"key":"20_CR49","doi-asserted-by":"crossref","unstructured":"Xie, J., Li, W., Li, X., Liu, Z., Ong, Y.S., Loy, C.C.: MosaicFusion: diffusion models as data augmenters for large vocabulary instance segmentation. arXiv preprint arXiv:2309.13042 (2023)","DOI":"10.1007\/s11263-024-02223-3"},{"key":"20_CR50","doi-asserted-by":"crossref","unstructured":"Xu, J., Liu, S., Vahdat, A., Byeon, W., Wang, X., De\u00a0Mello, S.: Open-vocabulary panoptic segmentation with text-to-image diffusion models. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00289"},{"key":"20_CR51","unstructured":"Yang, L., Xu, X., Kang, B., Shi, Y., Zhao, H.: Freemask: synthetic images with dense annotations make stronger segmentation models. In: NeurIPS (2023)"},{"key":"20_CR52","unstructured":"Ye, H., Xu, D.: Taskprompter: spatial-channel multi-task prompting for dense scene understanding. In: ICLR (2023)"},{"key":"20_CR53","doi-asserted-by":"crossref","unstructured":"Zeng, Y., et al.: Scenecomposer: any-level semantic image synthesis. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02152"},{"key":"20_CR54","doi-asserted-by":"crossref","unstructured":"Zhang, L., Anyi, R., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"20_CR55","unstructured":"Zhang, W., Pang, J., Chen, K., Loy, C.C.: K-net: towards unified image segmentation. In: NeurIPS (2021)"},{"key":"20_CR56","doi-asserted-by":"crossref","unstructured":"Zhang, Y., et al.: DatasetGAN: efficient labeled data factory with minimal human effort. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01001"},{"key":"20_CR57","doi-asserted-by":"crossref","unstructured":"Zhao, H., Shi, J., Qi, X., Wang, X., Jia, J.: Pyramid scene parsing network. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.660"},{"key":"20_CR58","unstructured":"Zhao, S., et al.: Uni-controlnet: all-in-one control to text-to-image diffusion models. In: NeurIPS (2023)"},{"key":"20_CR59","doi-asserted-by":"crossref","unstructured":"Zhao, W., Rao, Y., Liu, Z., Liu, B., Zhou, J., Lu, J.: Unleashing text-to-image diffusion models for visual perception. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00527"},{"key":"20_CR60","unstructured":"Zhou, B., Zhao, H., Puig, X., Fidler, S., Barriuso, A., Torralba, A.: Scene parsing challenge 2016 (2016). http:\/\/sceneparsing.csail.mit.edu\/index_challenge.html"},{"key":"20_CR61","doi-asserted-by":"crossref","unstructured":"Zhou, B., Zhao, H., Puig, X., Fidler, S., Barriuso, A., Torralba, A.: Scene parsing through ADE20K dataset. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.544"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73242-3_20","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T09:37:55Z","timestamp":1730108275000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73242-3_20"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,29]]},"ISBN":["9783031732416","9783031732423"],"references-count":61,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73242-3_20","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,29]]},"assertion":[{"value":"29 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}