{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T02:16:16Z","timestamp":1771467376413,"version":"3.50.1"},"publisher-location":"Cham","reference-count":52,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031728471","type":"print"},{"value":"9783031728488","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,29]],"date-time":"2024-11-29T00:00:00Z","timestamp":1732838400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,29]],"date-time":"2024-11-29T00:00:00Z","timestamp":1732838400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72848-8_7","type":"book-chapter","created":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T13:35:15Z","timestamp":1732800915000},"page":"108-124","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["Expanding Scene Graph Boundaries: Fully Open-Vocabulary Scene Graph Generation via\u00a0Visual-Concept Alignment and\u00a0Retention"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7344-1101","authenticated-orcid":false,"given":"Zuyao","family":"Chen","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7877-5728","authenticated-orcid":false,"given":"Jinlin","family":"Wu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0791-189X","authenticated-orcid":false,"given":"Zhen","family":"Lei","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2648-3875","authenticated-orcid":false,"given":"Zhaoxiang","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6720-234X","authenticated-orcid":false,"given":"Chang Wen","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,29]]},"reference":[{"key":"7_CR1","doi-asserted-by":"crossref","unstructured":"Chen, S., Jin, Q., Wang, P., Wu, Q.: Say as you wish: fine-grained control of image caption generation with abstract scene graphs. In: CVPR, pp. 9959\u20139968 (2020)","DOI":"10.1109\/CVPR42600.2020.00998"},{"key":"7_CR2","doi-asserted-by":"crossref","unstructured":"Chen, T., Yu, W., Chen, R., Lin, L.: Knowledge-embedded routing network for scene graph generation. In: CVPR, pp. 6163\u20136171 (2019)","DOI":"10.1109\/CVPR.2019.00632"},{"key":"7_CR3","unstructured":"Chen, X., et al.: Microsoft COCO captions: data collection and evaluation server. CoRR abs\/1504.00325 (2015)"},{"key":"7_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"104","DOI":"10.1007\/978-3-030-58577-8_7","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y-C Chen","year":"2020","unstructured":"Chen, Y.-C., et al.: UNITER: UNiversal image-TExt representation learning. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 104\u2013120. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_7"},{"key":"7_CR5","doi-asserted-by":"crossref","unstructured":"Chiou, M., Ding, H., Yan, H., Wang, C., Zimmermann, R., Feng, J.: Recovering the unbiased scene graphs from the biased ones. In: ACMMM, pp. 1581\u20131590 (2021)","DOI":"10.1145\/3474085.3475297"},{"key":"7_CR6","unstructured":"Devlin, J., Chang, M., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: NAACL-HLT, pp. 4171\u20134186 (2019)"},{"key":"7_CR7","doi-asserted-by":"crossref","unstructured":"Du, Y., Wei, F., Zhang, Z., Shi, M., Gao, Y., Li, G.: Learning to prompt for open-vocabulary object detection with vision-language model. In: CVPR, pp. 14064\u201314073 (2022)","DOI":"10.1109\/CVPR52688.2022.01369"},{"key":"7_CR8","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"540","DOI":"10.1007\/978-3-031-20059-5_31","volume-title":"ECCV 2022","author":"G Ghiasi","year":"2022","unstructured":"Ghiasi, G., Gu, X., Cui, Y., Lin, T.: Scaling open-vocabulary image segmentation with image-level labels. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13696, pp. 540\u2013557. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20059-5_31"},{"key":"7_CR9","doi-asserted-by":"crossref","unstructured":"Gu, J., Joty, S.R., Cai, J., Zhao, H., Yang, X., Wang, G.: Unpaired image captioning via scene graph alignments. In: ICCV, pp. 10322\u201310331 (2019)","DOI":"10.1109\/ICCV.2019.01042"},{"key":"7_CR10","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"56","DOI":"10.1007\/978-3-031-19815-1_4","volume-title":"ECCV 2022","author":"T He","year":"2022","unstructured":"He, T., Gao, L., Song, J., Li, Y.: Towards open-vocabulary scene graph generation with prompt-based finetuning. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13688, pp. 56\u201373. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19815-1_4"},{"key":"7_CR11","doi-asserted-by":"crossref","unstructured":"Johnson, J., Gupta, A., Fei-Fei, L.: Image generation from scene graphs. In: CVPR, pp. 1219\u20131228 (2018)","DOI":"10.1109\/CVPR.2018.00133"},{"key":"7_CR12","doi-asserted-by":"crossref","unstructured":"Kenfack, F.K., Siddiky, F.A., Balint-Benczedi, F., Beetz, M.: Robotvqa - a scene-graph- and deep-learning-based visual question answering system for robot manipulation. In: IROS, pp. 9667\u20139674 (2020)","DOI":"10.1109\/IROS45743.2020.9341186"},{"key":"7_CR13","doi-asserted-by":"crossref","unstructured":"Knyazev, B., de\u00a0Vries, H., Cangea, C., Taylor, G.W., Courville, A.C., Belilovsky, E.: Generative compositional augmentations for scene graph prediction. In: ICCV, pp. 15807\u201315817 (2021)","DOI":"10.1109\/ICCV48922.2021.01553"},{"key":"7_CR14","doi-asserted-by":"crossref","unstructured":"Lee, S., Kim, J., Oh, Y., Jeon, J.H.: Visual question answering over scene graph. In: GC, pp. 45\u201350 (2019)","DOI":"10.1109\/GC46384.2019.00015"},{"key":"7_CR15","doi-asserted-by":"crossref","unstructured":"Li, L.H., et al.: Grounded language-image pre-training. In: CVPR, pp. 10955\u201310965 (2022)","DOI":"10.1109\/CVPR52688.2022.01069"},{"key":"7_CR16","doi-asserted-by":"crossref","unstructured":"Li, R., Zhang, S., He, X.: SGTR: end-to-end scene graph generation with transformer. In: CVPR, pp. 19464\u201319474 (2022)","DOI":"10.1109\/CVPR52688.2022.01888"},{"key":"7_CR17","doi-asserted-by":"crossref","unstructured":"Li, R., Zhang, S., Lin, D., Chen, K., He, X.: From pixels to graphs: open-vocabulary scene graph generation with vision-language models. In: CVPR, pp. 28076\u201328086 (2024)","DOI":"10.1109\/CVPR52733.2024.02652"},{"key":"7_CR18","doi-asserted-by":"crossref","unstructured":"Li, R., Zhang, S., Wan, B., He, X.: Bipartite graph network with adaptive message passing for unbiased scene graph generation. In: CVPR, pp. 11109\u201311119 (2021)","DOI":"10.1109\/CVPR46437.2021.01096"},{"key":"7_CR19","doi-asserted-by":"crossref","unstructured":"Li, X., Chen, L., Ma, W., Yang, Y., Xiao, J.: Integrating object-aware and interaction-aware knowledge for weakly supervised scene graph generation. In: ACMMM, pp. 4204\u20134213 (2022)","DOI":"10.1145\/3503161.3548164"},{"key":"7_CR20","doi-asserted-by":"crossref","unstructured":"Lin, T., Goyal, P., Girshick, R.B., He, K., Doll\u00e1r, P.: Focal loss for dense object detection. In: ICCV, pp. 2999\u20133007 (2017)","DOI":"10.1109\/ICCV.2017.324"},{"key":"7_CR21","doi-asserted-by":"crossref","unstructured":"Lin, X., Ding, C., Zhan, Y., Li, Z., Tao, D.: Hl-net: heterophily learning network for scene graph generation. In: CVPR, pp. 19454\u201319463 (2022)","DOI":"10.1109\/CVPR52688.2022.01887"},{"key":"7_CR22","doi-asserted-by":"crossref","unstructured":"Liu, H., Yan, N., Mortazavi, M.S., Bhanu, B.: Fully convolutional scene graph generation. In: CVPR, pp. 11546\u201311556 (2021)","DOI":"10.1109\/CVPR46437.2021.01138"},{"key":"7_CR23","doi-asserted-by":"crossref","unstructured":"Liu, S., et al.: Grounding DINO: marrying DINO with grounded pre-training for open-set object detection. CoRR abs\/2303.05499 (2023)","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"7_CR24","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: ICCV, pp. 9992\u201310002 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"7_CR25","unstructured":"Mao, J.: Scene graph parser (2022). https:\/\/github.com\/vacancy\/SceneGraphParser"},{"key":"7_CR26","doi-asserted-by":"crossref","unstructured":"Nguyen, K., Tripathi, S., Du, B., Guha, T., Nguyen, T.Q.: In defense of scene graphs for image captioning. In: ICCV, pp. 1387\u20131396 (2021)","DOI":"10.1109\/ICCV48922.2021.00144"},{"key":"7_CR27","doi-asserted-by":"crossref","unstructured":"Nuthalapati, S.V., et al.: Lightweight visual question answering using scene graphs. In: CIKM, pp. 3353\u20133357 (2021)","DOI":"10.1145\/3459637.3482218"},{"key":"7_CR28","unstructured":"Ordonez, V., Kulkarni, G., Berg, T.L.: Im2text: describing images using 1 million captioned photographs. In: NeurIPS, pp. 1143\u20131151 (2011)"},{"key":"7_CR29","doi-asserted-by":"crossref","unstructured":"Plummer, B.A., Wang, L., Cervantes, C.M., Caicedo, J.C., Hockenmaier, J., Lazebnik, S.: Flickr30k entities: collecting region-to-phrase correspondences for richer image-to-sentence models. In: ICCV, pp. 2641\u20132649 (2015)","DOI":"10.1109\/ICCV.2015.303"},{"key":"7_CR30","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: ICML, pp. 8748\u20138763 (2021)"},{"key":"7_CR31","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. NeurIPS 28 (2015)"},{"key":"7_CR32","doi-asserted-by":"crossref","unstructured":"Rezatofighi, H., Tsoi, N., Gwak, J., Sadeghian, A., Reid, I.D., Savarese, S.: Generalized intersection over union: a metric and a loss for bounding box regression. In: CVPR, pp. 658\u2013666 (2019)","DOI":"10.1109\/CVPR.2019.00075"},{"key":"7_CR33","doi-asserted-by":"crossref","unstructured":"Suhail, M., et al.: Energy-based learning for scene graph generation. In: CVPR, pp. 13936\u201313945 (2021)","DOI":"10.1109\/CVPR46437.2021.01372"},{"key":"7_CR34","doi-asserted-by":"crossref","unstructured":"Tang, K., Niu, Y., Huang, J., Shi, J., Zhang, H.: Unbiased scene graph generation from biased training. In: CVPR, pp. 3713\u20133722 (2020)","DOI":"10.1109\/CVPR42600.2020.00377"},{"key":"7_CR35","doi-asserted-by":"crossref","unstructured":"Tang, K., Zhang, H., Wu, B., Luo, W., Liu, W.: Learning to compose dynamic tree structures for visual contexts. In: CVPR, pp. 6619\u20136628 (2019)","DOI":"10.1109\/CVPR.2019.00678"},{"key":"7_CR36","doi-asserted-by":"crossref","unstructured":"Teney, D., Liu, L., van\u00a0den Hengel, A.: Graph-structured representations for visual question answering. In: CVPR, pp. 3233\u20133241 (2017)","DOI":"10.1109\/CVPR.2017.344"},{"key":"7_CR37","doi-asserted-by":"crossref","unstructured":"Wang, D., Beck, D., Cohn, T.: On the role of scene graphs in image captioning. In: LANTERN@EMNLP-IJCNLP, pp. 29\u201334 (2019)","DOI":"10.18653\/v1\/D19-6405"},{"key":"7_CR38","unstructured":"Wang, M., Xing, J., Liu, Y.: Actionclip: a new paradigm for video action recognition. CoRR abs\/2109.08472 (2021)"},{"key":"7_CR39","doi-asserted-by":"crossref","unstructured":"Wu, J., et\u00a0al.: Towards open vocabulary learning: a survey. IEEE TPAMI (2024)","DOI":"10.1109\/TPAMI.2024.3361862"},{"key":"7_CR40","doi-asserted-by":"crossref","unstructured":"Wu, S., Zhang, W., Jin, S., Liu, W., Loy, C.C.: Aligning bag of regions for open-vocabulary object detection. In: CVPR, pp. 15254\u201315264 (2023)","DOI":"10.1109\/CVPR52729.2023.01464"},{"key":"7_CR41","doi-asserted-by":"crossref","unstructured":"Xu, D., Zhu, Y., Choy, C.B., Fei-Fei, L.: Scene graph generation by iterative message passing. In: CVPR, pp. 3097\u20133106 (2017)","DOI":"10.1109\/CVPR.2017.330"},{"key":"7_CR42","unstructured":"Yang, L., et al.: Diffusion-based scene graph to image generation with masked contrastive pre-training. CoRR abs\/2211.11138 (2022)"},{"key":"7_CR43","doi-asserted-by":"crossref","unstructured":"Yang, X., Tang, K., Zhang, H., Cai, J.: Auto-encoding scene graphs for image captioning. In: CVPR, pp. 10685\u201310694 (2019)","DOI":"10.1109\/CVPR.2019.01094"},{"key":"7_CR44","doi-asserted-by":"crossref","unstructured":"Ye, K., Kovashka, A.: Linguistic structures as weak supervision for visual scene graph generation. In: CVPR, pp. 8289\u20138299 (2021)","DOI":"10.1109\/CVPR46437.2021.00819"},{"key":"7_CR45","doi-asserted-by":"crossref","unstructured":"Zareian, A., Rosa, K.D., Hu, D.H., Chang, S.: Open-vocabulary object detection using captions. In: CVPR, pp. 14393\u201314402 (2021)","DOI":"10.1109\/CVPR46437.2021.01416"},{"key":"7_CR46","doi-asserted-by":"crossref","unstructured":"Zellers, R., Yatskar, M., Thomson, S., Choi, Y.: Neural motifs: scene graph parsing with global context. In: CVPR, pp. 5831\u20135840 (2018)","DOI":"10.1109\/CVPR.2018.00611"},{"key":"7_CR47","doi-asserted-by":"crossref","unstructured":"Zhang, J., Shih, K.J., Elgammal, A., Tao, A., Catanzaro, B.: Graphical contrastive losses for scene graph parsing. In: CVPR, pp. 11535\u201311543 (2019)","DOI":"10.1109\/CVPR.2019.01180"},{"key":"7_CR48","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Pan, Y., Yao, T., Huang, R., Mei, T., Chen, C.W.: Learning to generate language-supervised and open-vocabulary scene graph using pre-trained visual-semantic space. In: CVPR, pp. 2915\u20132924 (2023)","DOI":"10.1109\/CVPR52729.2023.00285"},{"key":"7_CR49","doi-asserted-by":"crossref","unstructured":"Zhong, Y., Shi, J., Yang, J., Xu, C., Li, Y.: Learning to generate scene graph from natural language supervision. In: ICCV, pp. 1823\u20131834 (2021)","DOI":"10.1109\/ICCV48922.2021.00184"},{"key":"7_CR50","doi-asserted-by":"crossref","unstructured":"Zhong, Y., et al.: Regionclip: region-based language-image pretraining. In: CVPR, pp. 16772\u201316782 (2022)","DOI":"10.1109\/CVPR52688.2022.01629"},{"key":"7_CR51","unstructured":"Zhu, C., Chen, L.: A survey on open-vocabulary detection and segmentation: past, present, and future. CoRR abs\/2307.09220 (2023)"},{"key":"7_CR52","unstructured":"Zhu, X., Su, W., Lu, L., Li, B., Wang, X., Dai, J.: Deformable DETR: deformable transformers for end-to-end object detection. In: ICLR (2021)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72848-8_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T14:05:27Z","timestamp":1732802727000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72848-8_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,29]]},"ISBN":["9783031728471","9783031728488"],"references-count":52,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72848-8_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,29]]},"assertion":[{"value":"29 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}