{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T16:07:04Z","timestamp":1771344424503,"version":"3.50.1"},"reference-count":121,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T00:00:00Z","timestamp":1768953600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T00:00:00Z","timestamp":1768953600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1007\/s11263-026-02729-y","type":"journal-article","created":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T11:26:30Z","timestamp":1768994790000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["GREx: Generalized Referring Expression Segmentation, Comprehension, and Generation"],"prefix":"10.1007","volume":"134","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4868-6526","authenticated-orcid":false,"given":"Henghui","family":"Ding","sequence":"first","affiliation":[]},{"given":"Chang","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Shuting","family":"He","sequence":"additional","affiliation":[]},{"given":"Xudong","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Yu-Gang","family":"Jiang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,21]]},"reference":[{"key":"2729_CR1","doi-asserted-by":"crossref","unstructured":"Kazemzadeh,S., Ordonez,V., Matten,M., & Berg,T.(2014). ReferItGame: Referring to objects in photographs of natural scenes, in Proc. of the Conf. on Empirical Methods in Natural Language Process., pp. 787\u2013798.","DOI":"10.3115\/v1\/D14-1086"},{"key":"2729_CR2","doi-asserted-by":"crossref","unstructured":"Yu,L., Poirson,P., Yang,S., Berg,A. C., & Berg,T. L. (2016).Modeling context in referring expressions, in Proc. Eur. Conf. Comput. Vis. Springer, pp.69\u201385.","DOI":"10.1007\/978-3-319-46475-6_5"},{"key":"2729_CR3","doi-asserted-by":"crossref","unstructured":"Mao,J.,Huang,J., Toshev,A., Camburu,O., Yuille,O., & Murphy,K.(2016). Generation and comprehension of unambiguous object descriptions, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 11\u201320.","DOI":"10.1109\/CVPR.2016.9"},{"key":"2729_CR4","doi-asserted-by":"crossref","unstructured":"Ding,H., Liu,C., Wang,S., & Jiang,X.(2023). VLT: Vision-language transformer and query generation for referring segmentation, IEEE Trans. Pattern Anal. Mach. Intell., 45(6).","DOI":"10.1109\/TPAMI.2022.3217852"},{"key":"2729_CR5","doi-asserted-by":"crossref","unstructured":"Yu,L., Lin,Z., Shen,X., Yang,J., Lu,X., Bansal,M., & Berg,T. L.(2018). Mattnet: Modular attention network for referring expression comprehension, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 1307\u20131315.","DOI":"10.1109\/CVPR.2018.00142"},{"key":"2729_CR6","doi-asserted-by":"crossref","unstructured":"Yu,L.,Tan,H., Bansal,M., & Berg,T. L.(2017). A joint speaker-listener-reinforcer model for referring expressions, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 7282\u20137290.","DOI":"10.1109\/CVPR.2017.375"},{"key":"2729_CR7","doi-asserted-by":"crossref","unstructured":"Wu,J., Li,X., Xu,S., Yuan,H., Ding,H., Yang,Y., Li,X., Zhang,J., Tong,Y., Jiang,X., Ghanem,B., & Tao,D.(2024). Towards open vocabulary learning: A survey, IEEE Trans. Pattern Anal. Mach. Intell.,46(7), 5092\u20135113.","DOI":"10.1109\/TPAMI.2024.3361862"},{"key":"2729_CR8","doi-asserted-by":"crossref","unstructured":"Liu,C., Ding,H., & Jiang,X.(2023). GRES: Generalized referring expression segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 23\u00a0592\u201323\u00a0601.","DOI":"10.1109\/CVPR52729.2023.02259"},{"key":"2729_CR9","doi-asserted-by":"crossref","unstructured":"Hu,Y., Wang,Q., Shao,W., Xie,E., Li,Z., Han,J., & Luo,P.(2023). Beyond one-to-one: Rethinking the referring image segmentation, in Proc. IEEE Int. Conf. Comput. Vis., pp. 4067\u20134077.","DOI":"10.1109\/ICCV51070.2023.00376"},{"key":"2729_CR10","doi-asserted-by":"crossref","unstructured":"Wu,Y., Zhang,Z., Xie,C., Zhu,F., & Zhao,R.(2023). Advancing referring expression segmentation beyond single image, in Proc. IEEE Int. Conf. Comput. Vis. , pp. 2628\u20132638.","DOI":"10.1109\/ICCV51070.2023.00248"},{"key":"2729_CR11","doi-asserted-by":"crossref","unstructured":"Wu,C., Lin,Z., Cohen,S., Bui,T., & Maji,S.(2020). Phrasecut: Language-based image segmentation in the wild, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 10\u00a0216\u201310\u00a0225.","DOI":"10.1109\/CVPR42600.2020.01023"},{"key":"2729_CR12","unstructured":"Grubinger,M., Clough,P., M\u00fcller,H., & Deselaers,T.(2006). The iapr tc-12 benchmark: A new evaluation resource for visual information systems, in International workshop ontoImage, 2."},{"key":"2729_CR13","doi-asserted-by":"crossref","unstructured":"Lin,T.-Y., Maire,M., Belongie,S., Hays,J., Perona,P., Ramanan,D., Doll\u00e1r,P., & Zitnick,C. L.(2014). Microsoft coco: Common objects in context, in Proc. Eur. Conf. Comput. Vis. Springer, pp. 740\u2013755.","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"2729_CR14","doi-asserted-by":"crossref","unstructured":"Krishna,R., Zhu,Y., Groth,O., Johnson,J., Hata,K., Kravitz,J., Chen,S., Kalantidis,Y., Li,L.-J., & Shamma,D. A. et al.(2017). Visual genome: Connecting language and vision using crowdsourced dense image annotations, Int. J. Comput. Vis.,123(1), 32\u201373.","DOI":"10.1007\/s11263-016-0981-7"},{"key":"2729_CR15","doi-asserted-by":"crossref","unstructured":"Hu,R., Xu,H., Rohrbach,M., Feng,J., Saenko,K., & Darrell,T.(2016). Natural language object retrieval, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 4555\u20134564.","DOI":"10.1109\/CVPR.2016.493"},{"key":"2729_CR16","doi-asserted-by":"crossref","unstructured":"Hu,R., Rohrbach,M., & Darrell,T.(2016). Segmentation from natural language expressions, in Proc. Eur. Conf. Comput. Vis. Springer, pp. 108\u2013124.","DOI":"10.1007\/978-3-319-46448-0_7"},{"key":"2729_CR17","doi-asserted-by":"crossref","unstructured":"Escalante,H. J., Hern\u00e1ndez,C. A., Gonzalez,J. A., L\u00f3pez-L\u00f3pez,A., Montes,M., Morales,E. F., Sucar,L. E., Villasenor,L., & Grubinger,M.(2010). The segmented and annotated iapr tc-12 benchmark, Computer vision and image understanding,114(4), 419\u2013428.","DOI":"10.1016\/j.cviu.2009.03.008"},{"key":"2729_CR18","doi-asserted-by":"crossref","unstructured":"Plummer,B. A., Wang,L., Cervantes,C. M., Caicedo,J. C., Hockenmaier,J., & Lazebnik,S.(2015). Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models, in Proc. IEEE Int. Conf. Comput. Vis., pp. 2641\u20132649.","DOI":"10.1109\/ICCV.2015.303"},{"key":"2729_CR19","doi-asserted-by":"crossref","unstructured":"Chen,D. Z., Chang,A. X., & Nie\u00dfner,M.(2020). Scanrefer: 3d object localization in rgb-d scans using natural language, in Proc. Eur. Conf. Comput. Vis. Springer, pp. 202\u2013221.","DOI":"10.1007\/978-3-030-58565-5_13"},{"key":"2729_CR20","unstructured":"Karazija,L., Laina,I., & Rupprecht,C.(2021). Clevrtex: A texture-rich benchmark for unsupervised multi-object segmentation, in NeurIPS Track on Datasets and Benchmarks."},{"key":"2729_CR21","doi-asserted-by":"crossref","unstructured":"Margffoy-Tuay,E., P\u00e9rez,J. C., Botero,E., & Arbel\u00e1ez,P.(2018). Dynamic multimodal instance segmentation guided by natural language queries, in Proc. Eur. Conf. Comput. Vis., pp. 630\u2013645.","DOI":"10.1007\/978-3-030-01252-6_39"},{"key":"2729_CR22","unstructured":"Zhang,Z., Zhu,Y., Liu,J., Liang,X., & Ke,W.(2022). Coupalign: Coupling word-pixel with sentence-mask alignments for referring image segmentation, arXiv preprint arXiv:2212.01769 ."},{"key":"2729_CR23","doi-asserted-by":"crossref","unstructured":"Ding,H., Cohen,S., Price,B., & Jiang,X.(2020). Phraseclick: toward achieving flexible interactive segmentation by phrase and click, in Proc. Eur. Conf. Comput. Vis. Springer, pp. 417\u2013435.","DOI":"10.1007\/978-3-030-58580-8_25"},{"key":"2729_CR24","doi-asserted-by":"crossref","unstructured":"Li,R., Li,K., Kuo,Y.-C., Shu,M., Qi,X., Shen,X., & Jia,J.(2018). Referring image segmentation via recurrent refinement networks, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 5745\u20135753.","DOI":"10.1109\/CVPR.2018.00602"},{"key":"2729_CR25","doi-asserted-by":"crossref","unstructured":"Chen,D.-J., Jia,S., Lo,Y.-C., Chen,H.-T., & Liu,T.-L.(2019). See-through-text grouping for referring image segmentation, in Proc. IEEE Int. Conf. Comput. Vis., pp. 7454\u20137463.","DOI":"10.1109\/ICCV.2019.00755"},{"key":"2729_CR26","unstructured":"Ye,L., Rochan,M., Liu,Z., & Wang,Y.(2019). Cross-modal self-attention network for referring image segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit. , pp. 10\u00a0502\u201310\u00a0511."},{"key":"2729_CR27","doi-asserted-by":"crossref","unstructured":"Hu,Z., Feng,G., Sun,J., Zhang,L., & Lu,H.(2020). Bi-directional relationship inferring network for referring image segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit. , pp. 4424\u20134433.","DOI":"10.1109\/CVPR42600.2020.00448"},{"key":"2729_CR28","unstructured":"Huang,S., Hui,T., Liu,S., Li,G., Wei,Y., Han,J., Liu,L., & Li,B.(2020). Referring image segmentation via cross-modal progressive comprehension, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 10\u00a0488\u201310\u00a0497."},{"key":"2729_CR29","doi-asserted-by":"crossref","unstructured":"Hui,T., Liu,S., Huang,S., Li,G., Yu,S., Zhang,F., & Han,J.(2020). Linguistic structure guided context modeling for referring image segmentation, in Proc. Eur. Conf. Comput. Vis. Springer, pp. 59\u201375.","DOI":"10.1007\/978-3-030-58607-2_4"},{"key":"2729_CR30","doi-asserted-by":"crossref","unstructured":"Luo,G., Zhou,Y., Ji,R., Sun,X., Su,J., Lin,C.-W., & Tian,Q.(2020). Cascade grouped attention network for referring expression segmentation, in ACM Int. Conf. Multimedia, pp. 1274\u20131282.","DOI":"10.1145\/3394171.3414006"},{"key":"2729_CR31","doi-asserted-by":"crossref","unstructured":"Liu,C., Jiang,X., & Ding,H.(2022). Instance-specific feature propagation for referring segmentation, IEEE Trans. Multimedia.","DOI":"10.1109\/TMM.2022.3163578"},{"key":"2729_CR32","unstructured":"Chen,Y.-W., Tsai,Y.-H., Wang,T., Lin,Y.-Y., & Yang,M.-H.(2019). Referring expression object segmentation with caption-aware consistency, in Proc. Brit. Mach. Vis. Conf."},{"key":"2729_CR33","doi-asserted-by":"crossref","unstructured":"Long,J., Shelhamer,E., & Darrell,T.(2015). Fully convolutional networks for semantic segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 3431\u20133440.","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"2729_CR34","doi-asserted-by":"crossref","unstructured":"Jing,Y., Kong,T., Wang,W., Wang,L., Li,L., & Tan,T.(2021). Locate then segment: A strong pipeline for referring image segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 9858\u20139867.","DOI":"10.1109\/CVPR46437.2021.00973"},{"key":"2729_CR35","unstructured":"Luo,G., Zhou,Y., Sun,X., Cao,L., Wu,C., Deng,C., & Ji,R.(2020). Multi-task collaborative network for joint referring expression comprehension and segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 10\u00a0034\u201310\u00a0043."},{"key":"2729_CR36","unstructured":"Ding,H., Liu,C., Wang,S., & Jiang,X.(2021). Vision-language transformer and query generation for referring segmentation, in Proc. IEEE Int. Conf. Comput. Vis., pp. 16\u00a0321\u201316\u00a0330."},{"key":"2729_CR37","doi-asserted-by":"crossref","unstructured":"Carion,N., Massa,F., Synnaeve,G., Usunier,N., Kirillov,A., & Zagoruyko,S.(2020). End-to-end object detection with transformers, in Proc. Eur. Conf. Comput. Vis. Springer, pp. 213\u2013229.","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"2729_CR38","doi-asserted-by":"crossref","unstructured":"Yang,Z., Wang,J., Tang,Y., Chen,K., Zhao,H., & Torr,P. H.(2022). Lavt: Language-aware vision transformer for referring image segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 18\u00a0155\u201318\u00a0165.","DOI":"10.1109\/CVPR52688.2022.01762"},{"key":"2729_CR39","doi-asserted-by":"crossref","unstructured":"Wang,Z., Lu,Y., Li,Q., Tao,X., Guo,Y., Gong,M., & Liu,T.(2022). Cris: Clip-driven referring image segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 11\u00a0686\u201311\u00a0695.","DOI":"10.1109\/CVPR52688.2022.01139"},{"key":"2729_CR40","unstructured":"Tang,J., Zheng,G., Shi,C., & Yang,S.(2023). Contrastive grouping with transformer for referring image segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 23\u00a0570\u201323\u00a0580."},{"key":"2729_CR41","doi-asserted-by":"crossref","unstructured":"Liu,J., Ding,H., Cai,Z., Zhang,Y., Satzoda,R. K., Mahadevan,V., & Manmatha,R.(2023). Polyformer: Referring image segmentation as sequential polygon generation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 18\u00a0653\u201318\u00a0663.","DOI":"10.1109\/CVPR52729.2023.01789"},{"key":"2729_CR42","unstructured":"Yan,B., Jiang,Y., Wu,J., Wang,D., Yuan,Z., Luo,P., & Lu,H.(2023). Universal instance perception as object discovery and retrieval, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 15\u00a0325\u201315\u00a0336."},{"key":"2729_CR43","doi-asserted-by":"crossref","unstructured":"Xu,Z., Chen,Z., Zhang,Y., Song,Y., Wan,X., & Li,G.(2023). Bridging vision and language encoders: Parameter-efficient tuning for referring image segmentation, in Proc. IEEE Int. Conf. Comput. Vis., pp. 17\u00a0503\u201317\u00a0512.","DOI":"10.1109\/ICCV51070.2023.01605"},{"key":"2729_CR44","doi-asserted-by":"crossref","unstructured":"Li,W., Zhao,Z., Bai,H., & Su,F.(2024). Bring adaptive binding prototypes to generalized referring expression segmentation, IEEE Trans. Multimedia.","DOI":"10.1109\/TMM.2025.3565964"},{"key":"2729_CR45","doi-asserted-by":"crossref","unstructured":"Xia,Z., Han,D., Han,Y., Pan,X., Song,S., & Huang,G.(2024). Gsva: Generalized segmentation via multimodal large language models, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 3858\u20133869.","DOI":"10.1109\/CVPR52733.2024.00370"},{"key":"2729_CR46","doi-asserted-by":"crossref","unstructured":"Shah,N. A., VS,V., & Patel,V. M.(2024). Lqmformer: Language-aware query mask transformer for referring image segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 12\u00a0903\u201312\u00a0913.","DOI":"10.1109\/CVPR52733.2024.01226"},{"key":"2729_CR47","doi-asserted-by":"crossref","unstructured":"Luo,G., Zhou,Y., Sun,X., Wu,Y., Gao,Y., & Ji,R.(2024). Towards language-guided visual recognition via dynamic convolutions, Int. J. Comput. Vis.,132(1), 1\u201319.","DOI":"10.1007\/s11263-023-01871-1"},{"key":"2729_CR48","doi-asserted-by":"crossref","unstructured":"Zhang,Z., Ma,Y., Zhang,E., & Bai,X.(2024). Psalm: Pixelwise segmentation with large multi-modal model, in Proc. Eur. Conf. Comput. Vis.","DOI":"10.1007\/978-3-031-72754-2_5"},{"key":"2729_CR49","doi-asserted-by":"crossref","unstructured":"Wang,Y., Ding,H., He,S., Jiang,X., Wei,B., & Liu,J.(2025). Hierarchical alignment-enhanced adaptive grounding network for generalized referring expression comprehension, in AAAI.","DOI":"10.1609\/aaai.v39i8.32867"},{"key":"2729_CR50","doi-asserted-by":"crossref","unstructured":"Lai,X., Tian,Z., Chen,Y., Li,Y., Yuan,Y., Liu,S., & Jia,J.(2024). Lisa: Reasoning segmentation via large language model, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit .","DOI":"10.1109\/CVPR52733.2024.00915"},{"key":"2729_CR51","doi-asserted-by":"crossref","unstructured":"Wang,P., Wu,Q., Cao,J., Shen,C., Gao,L., & Hengel,A. v. d.(2019). Neighbourhood watch: Referring expression comprehension via language-guided graph attention networks, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 1960\u20131968.","DOI":"10.1109\/CVPR.2019.00206"},{"key":"2729_CR52","doi-asserted-by":"crossref","unstructured":"Liu,D., Zhang,H., Wu,F., & Zha,Z.-J.(2019). Learning to assemble neural module tree networks for visual grounding, in Proc. IEEE Int. Conf. Comput. Vis., pp. 4673\u20134682.","DOI":"10.1109\/ICCV.2019.00477"},{"key":"2729_CR53","doi-asserted-by":"crossref","unstructured":"Yang,Z., Gong,B., Wang,L., Huang,W., Yu,D., & Luo,J.(2019). A fast and accurate one-stage approach to visual grounding, in Proc. IEEE Int. Conf. Comput. Vis., pp. 4683\u20134693.","DOI":"10.1109\/ICCV.2019.00478"},{"key":"2729_CR54","doi-asserted-by":"crossref","unstructured":"Zhuang,B., Wu,Q., Shen,C., Reid,I., & Van Den Hengel,A.(2018). Parallel attention: A unified framework for visual object discovery through dialogs and queries, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 4252\u20134261.","DOI":"10.1109\/CVPR.2018.00447"},{"key":"2729_CR55","doi-asserted-by":"crossref","unstructured":"Yang,Z., Chen,T., Wang,L., & Luo,J.(2020). Improving one-stage visual grounding by recursive sub-query construction, in Proc. Eur. Conf. Comput. Vis., vol. 12359.Springer, pp. 387\u2013404.","DOI":"10.1007\/978-3-030-58568-6_23"},{"key":"2729_CR56","unstructured":"Liao,Y., Liu,S., Li,G., Wang,F., Chen,Y., Qian,C., & Li,B.(2020). A real-time cross-modality correlation filtering method for referring expression comprehension, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 10\u00a0880\u201310\u00a0889."},{"key":"2729_CR57","doi-asserted-by":"crossref","unstructured":"Jin,L., Luo,G., Zhou,Y., Sun,X., Jiang,G., Shu,A., & Ji,R.(2023). Refclip: A universal teacher for weakly supervised referring expression comprehension, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 2681\u20132690.","DOI":"10.1109\/CVPR52729.2023.00263"},{"key":"2729_CR58","doi-asserted-by":"crossref","unstructured":"Hu,R., Rohrbach,M., Andreas,J., Darrell,T., & Saenko,K.(2017). Modeling relationships in referential expressions with compositional modular networks, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 1115\u20131124.","DOI":"10.1109\/CVPR.2017.470"},{"key":"2729_CR59","doi-asserted-by":"crossref","unstructured":"Zhang,H., Niu,Y., & Chang,S.-F.(2018). Grounding referring expressions in images by variational context, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 4158\u20134166.","DOI":"10.1109\/CVPR.2018.00437"},{"key":"2729_CR60","doi-asserted-by":"crossref","unstructured":"Hong,R., Liu,D., Mo,X., He,X., & Zhang,H.(2022). Learning to compose and reason with language tree structures for visual grounding, IEEE Trans. Pattern Anal. Mach. Intell.,44(2), 684\u2013696 .","DOI":"10.1109\/TPAMI.2019.2911066"},{"key":"2729_CR61","doi-asserted-by":"crossref","unstructured":"He,K., Gkioxari,G., Doll\u00e1r,P., & Girshick,R.(2017). Mask r-cnn, in Proc. IEEE Int. Conf. Comput. Vis., pp. 2961\u20132969.","DOI":"10.1109\/ICCV.2017.322"},{"key":"2729_CR62","unstructured":"Chen,X., Ma,L., Chen,J., Jie,Z., Liu,W., & Luo,J.(2018). Real-time referring expression comprehension by single-stage grounding network, arXiv preprint arXiv:1812.03426."},{"key":"2729_CR63","doi-asserted-by":"crossref","unstructured":"Sun,M., Suo,W., Wang,P., Zhang,Y., & Wu,Q.(2022). A proposal-free one-stage framework for referring expression comprehension and generation via dense cross-attention, IEEE Trans. Multimedia.","DOI":"10.1109\/TMM.2022.3147385"},{"key":"2729_CR64","doi-asserted-by":"crossref","unstructured":"Deng,J., Yang,Z., Chen,T., Zhou,W., & Li,H.(2021). Transvg: End-to-end visual grounding with transformers, in Proc. IEEE Int. Conf. Comput. Vis., pp. 1769\u20131779.","DOI":"10.1109\/ICCV48922.2021.00179"},{"key":"2729_CR65","unstructured":"Redmon,J., & Farhadi,A.(2018). Yolov3: An incremental improvement, arXiv preprint arXiv:1804.02767."},{"key":"2729_CR66","doi-asserted-by":"crossref","unstructured":"Kamath,A., Singh,M., LeCun,Y., Misra,I., Synnaeve, G.,& Carion,N.(2021). Mdetr \u2013 modulated detection for end-to-end multi-modal understanding, in Proc. IEEE Int. Conf. Comput. Vis.","DOI":"10.1109\/ICCV48922.2021.00180"},{"key":"2729_CR67","doi-asserted-by":"crossref","unstructured":"Liu,S., Zeng,Z., Ren,T., Li,F., Zhang,H., Yang,J., Li,C., Yang,J., Su,H., & Zhu,J., et al.(2023). Grounding dino: Marrying dino with grounded pre-training for open-set object detection, in Proc. Eur. Conf. Comput. Vis.","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"2729_CR68","unstructured":"Zhao,X., Chen,Y., Xu,S., Li,X., Wang,X., Li,Y., & Huang,H.(2024). An open and comprehensive pipeline for unified object grounding and detection, arXiv preprint arXiv:2401.02361."},{"key":"2729_CR69","unstructured":"Cardiel,A., Zablocki,E., Ramzi,E., Sim\u00e9oni,O., & Cord,M.(2025). Llm-wrapper: Black-box semantic-aware adaptation of vision-language models for referring expression comprehension, in Proc. Int. Conf. Learn. Represent."},{"key":"2729_CR70","unstructured":"Chen,K., Zhang,Z., Zeng,W., Zhang,R., Zhu,F., & Zhao,R.(2023). Shikra: Unleashing multimodal llm\u2019s referential dialogue magic, arXiv preprint arXiv:2306.15195."},{"key":"2729_CR71","doi-asserted-by":"crossref","unstructured":"Reiter,E., & Dale,R.(1997). Building applied natural language generation systems, Natural Language Engineering, pp. 57\u201387.","DOI":"10.1017\/S1351324997001502"},{"key":"2729_CR72","doi-asserted-by":"crossref","unstructured":"Rohrbach,A., Rohrbach,A., Hu,R., Darrell,T., & Schiele,B.(2016). Grounding of textual phrases in images by reconstruction, in Proc. Eur. Conf. Comput. Vis.Springer, pp. 817\u2013834.","DOI":"10.1007\/978-3-319-46448-0_49"},{"key":"2729_CR73","doi-asserted-by":"crossref","unstructured":"Kim,J., Ko,H., & Wu,J.(2020). Conan: A complementary neighboring-based attention network for referring expression generation, in Proc. of the Int. Conf. on Comput. Ling., pp. 1952\u20131962.","DOI":"10.18653\/v1\/2020.coling-main.177"},{"key":"2729_CR74","doi-asserted-by":"crossref","unstructured":"Tanaka,M., Itamochi,T., Narioka,K., Sato,I., Ushiku,Y. & Harada,T.(2019). Generating easy-to-understand referring expressions for target identifications, in Proc. IEEE Int. Conf. Comput. Vis. , pp. 5794\u20135803.","DOI":"10.1109\/ICCV.2019.00589"},{"key":"2729_CR75","doi-asserted-by":"crossref","unstructured":"Ye,F., Long,Y., Feng,F., & Wang,X.(2023). Whether you can locate or not? interactive referring expression generation, in ACM Int. Conf. Multimedia, pp. 4697\u20134706.","DOI":"10.1145\/3581783.3612214"},{"key":"2729_CR76","unstructured":"Sch\u00fcz,S., & Zarrie\u00df,S.(2021). Decoupling pragmatics: discriminative decoding for referring expression generation, in Proc. of the Reason. and Inter. Conf., pp. 47\u201352."},{"key":"2729_CR77","unstructured":"Bracha,L., Shaar,E., Shamsian,A., Fetaya,E., & Chechik,G.(2023). Disclip: Open-vocabulary referring expression generation, arXiv preprint arXiv:2305.19108 ."},{"key":"2729_CR78","unstructured":"Xiaoke,H., Jianfeng,W., Yansong,T., Zheng,Z., Han,H., Jiwen,L., Lijuan,W., & Zicheng,L.(2024). Segment and Caption Anything, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit."},{"key":"2729_CR79","unstructured":"Yang,X., Xu,L., Sun,H., Li,H., & Zhang,S.(2024). Enhancing visual grounding and generalization: A multi-task cycle training approach for vision-language models, arXiv preprint arXiv:2311.12327."},{"key":"2729_CR80","unstructured":"Liang,Y., Cai,Z., Xu,J., Huang,G., Wang,Y., Liang,X., Liu,J., Li,Z., Wang,J., & Huang,S.-L.(2024). Unleashing region understanding in intermediate layers for mllm-based referring expression generation, Proc. Adv. Neural Inform. Process. Syst., vol. 37, pp. 120\u00a0578\u2013120\u00a0601 ."},{"key":"2729_CR81","doi-asserted-by":"crossref","unstructured":"Yu,E., Zhao,L., Wei,Y., Yang,J., Wu,D., Kong,L., Wei,H., Wang,T., Ge,Z., & Zhang,X., et al.(2024). Merlin: Empowering multimodal llms with foresight minds, in Proc. Eur. Conf. Comput. Vis. .","DOI":"10.1007\/978-3-031-73235-5_24"},{"key":"2729_CR82","unstructured":"Peng,Z., Wang,W., Dong,L., Hao,Y., Huang,S., Ma,S., & Wei,F.(2024). Kosmos-2: Grounding multimodal large language models to the world, in Proc. Int. Conf. Learn. Represent."},{"key":"2729_CR83","doi-asserted-by":"crossref","unstructured":"Luo,G., Zhou,Y., Sun,X., Cao,L., Wu,C., Deng,C., & Ji,R.(2020). Multi-task collaborative network for joint referring expression comprehension and segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit.","DOI":"10.1109\/CVPR42600.2020.01005"},{"key":"2729_CR84","doi-asserted-by":"crossref","unstructured":"Kang,W., Liu,G., Shah,M., & Yan,Y.(2024). Segvg: Transferring object bounding box to segmentation for visual grounding, in Proc. Eur. Conf. Comput. Vis .","DOI":"10.1007\/978-3-031-72920-1_4"},{"key":"2729_CR85","unstructured":"Chen,Y.-W., Tsai,Y.-H., Wang,T., Lin,Y.-Y., & Yang,M.-H.(2019). Referring expression object segmentation with caption-aware consistency, Proc. Brit. Mach. Vis. Conf."},{"key":"2729_CR86","doi-asserted-by":"crossref","unstructured":"Rasheed,H., Maaz,M., Shaji,S., Shaker,A., Khan,S., Cholakkal,H., Anwer,R. M., Xing,E., Yang,M.-H., & Khan,F. S.(2024). Glamm: Pixel grounding large multimodal model, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 13\u00a0009\u201313\u00a0018.","DOI":"10.1109\/CVPR52733.2024.01236"},{"key":"2729_CR87","doi-asserted-by":"crossref","unstructured":"Xiao,B., Wu,H., Xu,W., Dai,X., Hu,H., Lu,Y., Zeng,M., Liu,C., & Yuan,L.(2024). Florence-2: Advancing a unified representation for a variety of vision tasks, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 4818\u20134829.","DOI":"10.1109\/CVPR52733.2024.00461"},{"key":"2729_CR88","unstructured":"Banerjee,S., & Lavie,A.(2005). Meteor: An automatic metric for mt evaluation with improved correlation with human judgments, in Proc. Assoc. for Comput. Ling., pp. 65\u201372."},{"key":"2729_CR89","doi-asserted-by":"crossref","unstructured":"Vedantam,R., Lawrence Zitnick,C., & Parikh,D.(2015). Cider: Consensus-based image description evaluation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 4566\u20134575.","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"2729_CR90","doi-asserted-by":"crossref","unstructured":"Liu,J., Wang,W., Wang,L., & Yang,M.-H.(2020). Attribute-guided attention for referring expression generation and comprehension, IEEE Trans. Image Processing.","DOI":"10.1109\/TIP.2020.2979010"},{"key":"2729_CR91","doi-asserted-by":"crossref","unstructured":"Robertson,S.(2004). Understanding inverse document frequency: on theoretical arguments for idf, Journal of documentation, vol. 60, no. 5, pp. 503\u2013520,","DOI":"10.1108\/00220410410560582"},{"key":"2729_CR92","doi-asserted-by":"crossref","unstructured":"Xia,Z., Han,D., Han,Y., Pan,X., Song,S., & Huang,G.(2024). Gsva: Generalized segmentation via multimodal large language models, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit. , pp. 3858\u20133869.","DOI":"10.1109\/CVPR52733.2024.00370"},{"key":"2729_CR93","doi-asserted-by":"crossref","unstructured":"Geng,Z., Yang,B., Hang,T., Li,C., Gu,S., Zhang,T., Bao,J., Zhang,Z., Hu,H., & Chen,D. et al.(2024). Instructdiffusion: A generalist modeling interface for vision tasks, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit.","DOI":"10.1109\/CVPR52733.2024.01208"},{"key":"2729_CR94","unstructured":"Dosovitskiy,A., Beyer,L., Kolesnikov,A., Weissenborn,D., Zhai,X., Unterthiner,T., Dehghani,M., Minderer,M., Heigold,G., Gelly,S., Uszkoreit,J., & Houlsby,N.(2021). An image is worth 16x16 words: Transformers for image recognition at scale, in Proc. Int. Conf. Learn. Represent."},{"key":"2729_CR95","doi-asserted-by":"crossref","unstructured":"Liu,Z., Lin,Y., Cao,Y., Hu,H., Wei,Y., Zhang,Z., Lin,S., & Guo,B.(2021). Swin transformer: Hierarchical vision transformer using shifted windows, in Proc. IEEE Int. Conf. Comput. Vis., pp. 10\u00a0012\u201310\u00a0022.","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"2729_CR96","doi-asserted-by":"crossref","unstructured":"Devlin,J., Chang,M., Lee,K., & Toutanova,K.(2019). BERT: pre-training of deep bidirectional transformers for language understanding, in Proc. Assoc. for Comput. Ling., vol. 1.Association for Computational Linguistics, pp. 4171\u20134186.","DOI":"10.18653\/v1\/N19-1423"},{"key":"2729_CR97","unstructured":"Xie,E., Wang,W., Yu,Z., Anandkumar,A., Alvarez,J. M., & Luo,P.(2021). Segformer: Simple and efficient design for semantic segmentation with transformers, in Proc. Adv. Neural Inform. Process. Syst., pp. 12\u00a0077\u201312\u00a0090."},{"key":"2729_CR98","doi-asserted-by":"crossref","unstructured":"Strudel,R., Garcia,R., Laptev,I., & Schmid,C.(2021). Segmenter: Transformer for semantic segmentation, in Proc. IEEE Int. Conf. Comput. Vis., pp. 7262\u20137272.","DOI":"10.1109\/ICCV48922.2021.00717"},{"key":"2729_CR99","doi-asserted-by":"crossref","unstructured":"Kim,N., Kim,D., Lan,C., Zeng,W., & Kwak,S.(2022). Restr: Convolution-free referring image segmentation using transformers, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 18\u00a0145\u201318\u00a0154.","DOI":"10.1109\/CVPR52688.2022.01761"},{"key":"2729_CR100","doi-asserted-by":"crossref","unstructured":"Rezatofighi,H., Tsoi,N., Gwak,J., Sadeghian,A., Reid,I., & Savarese,S.(2019). Generalized intersection over union: A metric and a loss for bounding box regression, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 658\u2013666.","DOI":"10.1109\/CVPR.2019.00075"},{"key":"2729_CR101","unstructured":"Hendrycks,D., & Gimpel,K.(2016). Gaussian error linear units (gelus), arXiv preprint arXiv:1606.08415."},{"key":"2729_CR102","doi-asserted-by":"crossref","unstructured":"Wolf,T., Debut,L., Sanh,V., Chaumond,J., Delangue,C., Moi,A., Cistac,P., Rault,T., Louf,R., Funtowicz,M., Davison,J., Shleifer,S., von Platen,P., Ma,C., Jernite,Y., Plu,J., Xu,C., Scao,T. L., Gugger,S., Drame,M., Lhoest,Q., & Rush,A. M.(2020). Transformers: State-of-the-art natural language processing, in Proc. of the Conf. on Empirical Methods in Natural Language Process., pp. 38\u201345.","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"2729_CR103","unstructured":"Liu,S., Hui,T., Huang,S., Wei,Y., Li,B., & Li,G.(2022). Cross-modal progressive comprehension for referring segmentation, IEEE Trans. Pattern Anal. Mach. Intell., 44(9), 4761\u20134775."},{"key":"2729_CR104","doi-asserted-by":"crossref","unstructured":"Feng,G., Hu,Z., Zhang,L., & Lu,H.(2021). Encoder fusion network with co-attention embedding for referring image segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit..","DOI":"10.1109\/CVPR46437.2021.01525"},{"key":"2729_CR105","doi-asserted-by":"crossref","unstructured":"Yang,S., Xia,M., Li,G., Zhou,H.-Y., & Yu,Y.(2021). Bottom-up shift and reasoning for referring image segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit.","DOI":"10.1109\/CVPR46437.2021.01111"},{"key":"2729_CR106","doi-asserted-by":"crossref","unstructured":"Kirillov,A., Mintun,E., Ravi,N., Mao,H., Rolland,C., Gustafson,L., Xiao,T., Whitehead,S., Berg,A. C., & Lo,W.-Y., et al.(2023). Segment anything, in Proc. IEEE Int. Conf. Comput. Vis., pp. 4015\u20134026.","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"2729_CR107","unstructured":"Hurst,A., Lerer,A., Goucher,A. P., Perelman,A., Ramesh,A., Clark,A., Ostrow,A., Welihinda,A., Hayes,A., & Radford,A., et al.(2024). Gpt-4o system card, arXiv preprint arXiv:2410.21276."},{"key":"2729_CR108","unstructured":"Zhu,J., Wang,W., Chen,Z., Liu,Z., Ye,S., Gu,L., Tian,H., Duan,Y., Su,W., & Shao,J., et al.(2025). Internvl3: Exploring advanced training and test-time recipes for open-source multimodal models, arXiv preprint arXiv:2504.10479."},{"key":"2729_CR109","unstructured":"Bai,S., Chen,K., Liu,X., Wang, J., Ge,W., Song,S., Dang,K., Wang,P., Wang,S., & Tang,J. et al.(2025). Qwen2. 5-vl technical report, arXiv preprint arXiv:2502.13923."},{"key":"2729_CR110","doi-asserted-by":"crossref","unstructured":"Seo,S., Lee,J.-Y., & Han,B.(2020). Urvos: Unified referring video object segmentation network with a large-scale benchmark, in Proc. Eur. Conf. Comput. Vis.Springer, pp. 208\u2013223.","DOI":"10.1007\/978-3-030-58555-6_13"},{"key":"2729_CR111","doi-asserted-by":"crossref","unstructured":"Ding,Z., Hui,T., Huang,J., Wei,X., Han,J., & Liu,S.(2022). Language-bridged spatial-temporal interaction for referring video object segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 4964\u20134973.","DOI":"10.1109\/CVPR52688.2022.00491"},{"key":"2729_CR112","doi-asserted-by":"crossref","unstructured":"Botach,A., Zheltonozhskii,E., & Baskin,C.(2022). End-to-end referring video object segmentation with multimodal transformers, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 4985\u20134995.","DOI":"10.1109\/CVPR52688.2022.00493"},{"key":"2729_CR113","doi-asserted-by":"crossref","unstructured":"Wu,J., Jiang,Y., Sun,P., Yuan,Z., & Luo,P.(2022). Language as queries for referring video object segmentation, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 4974\u20134984.","DOI":"10.1109\/CVPR52688.2022.00492"},{"key":"2729_CR114","doi-asserted-by":"crossref","unstructured":"Wu,D., Wang,T., Zhang,Y., Zhang,X., & Shen,J.(2023). Onlinerefer: A simple online baseline for referring video object segmentation, in Proc. IEEE Int. Conf. Comput. Vis., pp. 2761\u20132770.","DOI":"10.1109\/ICCV51070.2023.00259"},{"key":"2729_CR115","doi-asserted-by":"crossref","unstructured":"Han,M., Wang,Y., Li,Z., Yao,L., Chang,X., & Qiao,Y.(2023). Html: Hybrid temporal-scale multimodal learning framework for referring video object segmentation, in Proc. IEEE Int. Conf. Comput. Vis., pp. 13\u00a0414\u201313\u00a0423.","DOI":"10.1109\/ICCV51070.2023.01234"},{"key":"2729_CR116","doi-asserted-by":"crossref","unstructured":"Ding,H., Liu,C., He,S., Jiang,X., & Loy, C. C.(2023).MeViS: A large-scale benchmark for video segmentation with motion expressions, in Proc. IEEE Int. Conf. Comput. Vis. , pp. 2694\u20132703.","DOI":"10.1109\/ICCV51070.2023.00254"},{"key":"2729_CR117","doi-asserted-by":"crossref","unstructured":"Ding, H., Liu, C., He, S., Ying, K., Jiang, X., Loy, C. C., & Jiang,Y.-G.(2025). MeViS: A multi-modal dataset for referring motion expression video segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 47(12), 11400\u201311416.","DOI":"10.1109\/TPAMI.2025.3600507"},{"key":"2729_CR118","doi-asserted-by":"crossref","unstructured":"Liu,Z., Ning,J., Cao,Y., Wei,Y., Zhang,Z., Lin,S., & Hu,H.(2022). Video swin transformer, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 3202\u20133211.","DOI":"10.1109\/CVPR52688.2022.00320"},{"key":"2729_CR119","unstructured":"Touvron,H., Lavril,T., Izacard,G., Martinet,X., Lachaux,M.-A., Lacroix,T., Rozi\u00e8re,B., Goyal,N., Hambro,E., & Azhar,F. et al.(2023). Llama: Open and efficient foundation language models, arXiv preprint arXiv:2302.13971."},{"key":"2729_CR120","doi-asserted-by":"publisher","first-page":"681","DOI":"10.1007\/s11023-020-09548-1","volume":"30","author":"L Floridi","year":"2020","unstructured":"Floridi, L., & Chiriatti, M. (2020). Gpt-3: Its nature, scope, limits, and consequences. Minds and Machines, 30, 681\u2013694.","journal-title":"Minds and Machines"},{"key":"2729_CR121","unstructured":"Yang,J., Zhang,H., Li,F., Zou,X., Li,C., & Gao,J.(2023). Set-of-mark prompting unleashes extraordinary visual grounding in gpt-4v, arXiv preprint arXiv:2310.11441."}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-026-02729-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-026-02729-y","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-026-02729-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T15:20:43Z","timestamp":1771341643000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-026-02729-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,1,21]]},"references-count":121,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2026,2]]}},"alternative-id":["2729"],"URL":"https:\/\/doi.org\/10.1007\/s11263-026-02729-y","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,1,21]]},"assertion":[{"value":"5 August 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 January 2026","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 January 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}],"article-number":"79"}}