{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,30]],"date-time":"2026-04-30T17:19:01Z","timestamp":1777569541287,"version":"3.51.4"},"publisher-location":"Cham","reference-count":55,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198328","type":"print"},{"value":"9783031198335","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19833-5_32","type":"book-chapter","created":{"date-parts":[[2022,11,4]],"date-time":"2022-11-04T00:40:30Z","timestamp":1667522430000},"page":"546-562","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":18,"title":["SiRi: A Simple Selective Retraining Mechanism for\u00a0Transformer-Based Visual Grounding"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9432-0205","authenticated-orcid":false,"given":"Mengxue","family":"Qu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1680-8253","authenticated-orcid":false,"given":"Yu","family":"Wu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1633-7575","authenticated-orcid":false,"given":"Wu","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3070-8937","authenticated-orcid":false,"given":"Qiqi","family":"Gong","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3213-3062","authenticated-orcid":false,"given":"Xiaodan","family":"Liang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5272-3241","authenticated-orcid":false,"given":"Olga","family":"Russakovsky","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8581-9554","authenticated-orcid":false,"given":"Yao","family":"Zhao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2812-8781","authenticated-orcid":false,"given":"Yunchao","family":"Wei","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,4]]},"reference":[{"key":"32_CR1","doi-asserted-by":"crossref","unstructured":"Bolme, D.S., Beveridge, J.R., Draper, B.A., Lui, Y.M.: Visual object tracking using adaptive correlation filters. In: CVPR (2010)","DOI":"10.1109\/CVPR.2010.5539960"},{"key":"32_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"32_CR3","unstructured":"Chen, X., Ma, L., Chen, J., Jie, Z., Liu, W., Luo, J.: Real-time referring expression comprehension by single-stage grounding network. arXiv preprint arXiv:1812.03426 (2018)"},{"key":"32_CR4","doi-asserted-by":"crossref","unstructured":"Chen, Y.-C., et al.: UNITER: learning universal image-text representations (2019)","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"32_CR5","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: CVPR (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"32_CR6","doi-asserted-by":"crossref","unstructured":"Deng, J., Yang, Z., Chen, T., Zhou, W., Li, H.: TransVG: end-to-end visual grounding with transformers. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00179"},{"key":"32_CR7","unstructured":"Devlin, J., Chang, M.-W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"32_CR8","unstructured":"Gan, Z., Chen, Y.-C., Li, L., Zhu, C., Cheng, Y., Liu, J.: Large-scale adversarial training for vision-and-language representation learning. In: NeruIPS (2020)"},{"key":"32_CR9","doi-asserted-by":"crossref","unstructured":"Girshick, R.: Fast R-CNN. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.169"},{"key":"32_CR10","unstructured":"Glorot, X., Bengio, Y.: Understanding the difficulty of training deep feedforward neural networks. In: AISTATS (2010)"},{"key":"32_CR11","unstructured":"Han, S., et al.: DSD: dense-sparse-sense training for deep neural networks. In: ICLR (2017)"},{"key":"32_CR12","doi-asserted-by":"crossref","unstructured":"Hansen, L.K., Salamon, P.: Neural network ensembles. IEEE Trans. Pattern Anal. Mach. Intell. 12, 993\u20131001 (1990)","DOI":"10.1109\/34.58871"},{"key":"32_CR13","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"32_CR14","doi-asserted-by":"crossref","unstructured":"Henriques, J.F., Caseiro, R., Martins, P., Batista, J.: High-speed tracking with kernelized correlation filters. IEEE Trans. Pattern Anal. Mach. Intell. 37, 583\u2013596 (2014)","DOI":"10.1109\/TPAMI.2014.2345390"},{"key":"32_CR15","unstructured":"Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531 (2015)"},{"key":"32_CR16","unstructured":"Hong, R., Liu, D., Mo, X., He, X., Zhang, H.: Learning to compose and reason with language tree structures for visual grounding. IEEE Trans. Pattern Anal. Mach. Intell. (2019)"},{"key":"32_CR17","doi-asserted-by":"crossref","unstructured":"Hu, R., Rohrbach, M., Andreas, J., Darrell, T., Saenko, K.: Modeling relationships in referential expressions with compositional modular networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.470"},{"key":"32_CR18","unstructured":"Huang, G., Li, Y., Pleiss, G., Liu, Z., Hopcroft, J.E., Weinberger, K.Q.: Snapshot ensembles: train 1, get m for free. In: ICLR (2017)"},{"key":"32_CR19","doi-asserted-by":"crossref","unstructured":"Huang, J., Qu, L., Jia, R., Zhao, B.: O2U-Net: a simple noisy label detection approach for deep neural networks. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00342"},{"key":"32_CR20","unstructured":"Huang, X.S., et al.: Improving transformer optimization through better initialization. In: ICML (2020)"},{"key":"32_CR21","doi-asserted-by":"crossref","unstructured":"Kamath, A., Singh, M., LeCun, Y., Synnaeve, G., Misra, I., Carion, N.: MDETR-modulated detection for end-to-end multi-modal understanding. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00180"},{"key":"32_CR22","unstructured":"Kim, W., Son, B., Kim, I.: ViLT: vision-and-language transformer without convolution or region supervision. In: ICML (2021)"},{"key":"32_CR23","unstructured":"Krogh, A., Vedelsby, J., et al.: Neural network ensembles, cross validation, and active learning. In: NeruIPS (1995)"},{"key":"32_CR24","unstructured":"Liang, C., Wu, Y., Luo, Y., Yang, Y.: ClawCraneNet: leveraging object-level relation for text-based video segmentation. arXiv preprint arXiv:2103.10702 (2021)"},{"key":"32_CR25","unstructured":"Liang, C., et al.: Rethinking cross-modal interaction from a top-down perspective for referring video object segmentation. arXiv preprint arXiv:2106.01061 (2021)"},{"key":"32_CR26","doi-asserted-by":"crossref","unstructured":"Liao, Y., et al.: A real-time cross-modality correlation filtering method for referring expression comprehension. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01089"},{"key":"32_CR27","doi-asserted-by":"crossref","unstructured":"Liu, D., Zhang, H., Wu, F., Zha, Z.-J.: Learning to assemble neural module tree networks for visual grounding. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00477"},{"key":"32_CR28","unstructured":"Liu, Y., et al.: RoBERTa: a robustly optimized BERT pretraining approach. arXiv preprint arXiv:1907.11692 (2019)"},{"key":"32_CR29","unstructured":"Loshchilov, I., Hutter, F.: Fixing weight decay regularization in adam (2018)"},{"key":"32_CR30","unstructured":"Jiasen, L., Batra, D., Parikh, D., Lee, S.: ViLBERT: pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In: NeruIPS (2019)"},{"key":"32_CR31","doi-asserted-by":"crossref","unstructured":"Luo, G., et al.: Multi-task collaborative network for joint referring expression comprehension and segmentation. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01005"},{"key":"32_CR32","doi-asserted-by":"crossref","unstructured":"Mao, J., Huang, J., Toshev, A., Camburu, O., Yuille, A.L., Murphy, K.: Generation and comprehension of unambiguous object descriptions. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.9"},{"key":"32_CR33","unstructured":"Redmon, J., Farhadi, A.: YOLOv3: an incremental improvement. arXiv preprint arXiv:1804.02767 (2018)"},{"key":"32_CR34","doi-asserted-by":"crossref","unstructured":"Sadhu, A., Chen, K., Nevatia, R.: Zero-shot grounding of objects from natural language queries. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00479"},{"key":"32_CR35","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: ICLR (2015)"},{"key":"32_CR36","doi-asserted-by":"crossref","unstructured":"Smith, L.N.: Cyclical learning rates for training neural networks. In: WACV, pp. 464\u2013472. IEEE Computer Society (2017)","DOI":"10.1109\/WACV.2017.58"},{"key":"32_CR37","unstructured":"Srivastava, N., Hinton, G., Krizhevsky, A., Sutskever, I., Salakhutdinov, R.: Dropout: a simple way to prevent neural networks from overfitting. J. Mach. Learn. Res. 15, 1929\u20131958 (2014)"},{"key":"32_CR38","unstructured":"Weijie, S., et al.: VL-BERT: pre-training of generic visual-linguistic representations. In: ICLR (2020)"},{"key":"32_CR39","doi-asserted-by":"crossref","unstructured":"Tan, H., Bansal, M.: LXMERT: learning cross-modality encoder representations from transformers. In: EMNLP (2019)","DOI":"10.18653\/v1\/D19-1514"},{"key":"32_CR40","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeuruIPS (2017)"},{"key":"32_CR41","doi-asserted-by":"crossref","unstructured":"Wang, L., Li, Y., Huang, J., Lazebnik, S.: Learning two-branch neural networks for image-text matching tasks. IEEE Trans. Pattern Anal. Mach. Intell. 41, 394\u2013407 (2018)","DOI":"10.1109\/TPAMI.2018.2797921"},{"key":"32_CR42","doi-asserted-by":"crossref","unstructured":"Wang, P., Qi, W., Cao, J., Shen, C., Gao, L., van den Hengel, A.: Neighbourhood watch: referring expression comprehension via language-guided graph attention networks. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00206"},{"key":"32_CR43","doi-asserted-by":"crossref","unstructured":"Wu, Y., Jiang, L., Yang, Y.: Switchable novel object captioner. IEEE Trans. Pattern Anal. Mach. Intell., 1 (2022)","DOI":"10.1109\/TPAMI.2022.3199784"},{"key":"32_CR44","doi-asserted-by":"crossref","unstructured":"Yang, S., Li, G., Yu, Y.: Dynamic graph attention for referring expression comprehension. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00474"},{"key":"32_CR45","doi-asserted-by":"crossref","unstructured":"Yang, S., Li, G., Yu, Y.: Graph-structured referring expression reasoning in the wild. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00997"},{"key":"32_CR46","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"387","DOI":"10.1007\/978-3-030-58568-6_23","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Z Yang","year":"2020","unstructured":"Yang, Z., Chen, T., Wang, L., Luo, J.: Improving one-stage visual grounding by recursive sub-query construction. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12359, pp. 387\u2013404. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58568-6_23"},{"key":"32_CR47","doi-asserted-by":"crossref","unstructured":"Yang, Z., et al.: LAVT: language-aware vision transformer for referring image segmentation. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01762"},{"key":"32_CR48","doi-asserted-by":"crossref","unstructured":"Yang, Z., Gong, B., Wang, L., Huang, W., Yu, D., Luo, J.: A fast and accurate one-stage approach to visual grounding. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00478"},{"key":"32_CR49","unstructured":"Yu, F., et al.: ERNRE-ViL knowledge enhanced vision-language representations through scene graph (2020)"},{"key":"32_CR50","doi-asserted-by":"crossref","unstructured":"Yu, L., et al.: MAttNet: modular attention network for referring expression comprehension. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00142"},{"key":"32_CR51","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"69","DOI":"10.1007\/978-3-319-46475-6_5","volume-title":"Computer Vision \u2013 ECCV 2016","author":"L Yu","year":"2016","unstructured":"Yu, L., Poirson, P., Yang, S., Berg, A.C., Berg, T.L.: Modeling context in referring expressions. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9906, pp. 69\u201385. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46475-6_5"},{"key":"32_CR52","doi-asserted-by":"crossref","unstructured":"Zhang, H., Niu, Y., Chang, S.-F.: Grounding referring expressions in images by variational context. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00437"},{"key":"32_CR53","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Wang, C., Wang, X., Zeng, W., Liu, W.: FairMOT: on the fairness of detection and re-identification in multiple object tracking. Int. J. Comput. Vis. 129, 3069\u20133087 (2021)","DOI":"10.1007\/s11263-021-01513-4"},{"key":"32_CR54","doi-asserted-by":"crossref","unstructured":"Zheng, K., Liu, W., Liu, J., Zha, Z.-J., Mei, T.: Hierarchical Gumbel attention network for text-based person search. In: ACM Multimedia, pp. 3441\u20133449. ACM (2020)","DOI":"10.1145\/3394171.3413864"},{"key":"32_CR55","doi-asserted-by":"crossref","unstructured":"Zhuang, B., Wu, Q., Shen, C., Reid, I., Van Den Hengel, A.: Parallel attention: a unified framework for visual object discovery through dialogs and queries. In CVPR (2018)","DOI":"10.1109\/CVPR.2018.00447"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19833-5_32","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,7]],"date-time":"2024-10-07T06:26:09Z","timestamp":1728282369000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19833-5_32"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198328","9783031198335"],"references-count":55,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19833-5_32","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}