{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,7]],"date-time":"2026-02-07T11:11:35Z","timestamp":1770462695365,"version":"3.49.0"},"publisher-location":"Cham","reference-count":50,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198380","type":"print"},{"value":"9783031198397","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19839-7_15","type":"book-chapter","created":{"date-parts":[[2022,10,22]],"date-time":"2022-10-22T11:40:06Z","timestamp":1666438806000},"page":"251-267","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":23,"title":["A Sketch is Worth a\u00a0Thousand Words: Image Retrieval with\u00a0Text and\u00a0Sketch"],"prefix":"10.1007","author":[{"given":"Patsorn","family":"Sangkloy","sequence":"first","affiliation":[]},{"given":"Wittawat","family":"Jitkrittum","sequence":"additional","affiliation":[]},{"given":"Diyi","family":"Yang","sequence":"additional","affiliation":[]},{"given":"James","family":"Hays","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,23]]},"reference":[{"key":"15_CR1","doi-asserted-by":"crossref","unstructured":"Alberti, C., Ling, J., Collins, M., Reitter, D.: Fusion of detected objects in text for visual question answering. arXiv preprint arXiv:1908.05054 (2019)","DOI":"10.18653\/v1\/D19-1219"},{"key":"15_CR2","unstructured":"Ben-Baruch, E., et al.: Asymmetric loss for multi-label classification (2020)"},{"key":"15_CR3","doi-asserted-by":"crossref","unstructured":"Bhunia, A.K., Yang, Y., Hospedales, T.M., Xiang, T., Song, Y.Z.: Sketch less for more: on-the-fly fine-grained sketch-based image retrieval. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9779\u20139788 (2020)","DOI":"10.1109\/CVPR42600.2020.00980"},{"key":"15_CR4","doi-asserted-by":"crossref","unstructured":"Changpinyo, S., Pont-Tuset, J., Ferrari, V., Soricut, R.: Telling the what while pointing to the where: multimodal queries for image retrieval. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 12136\u201312146 (2021)","DOI":"10.1109\/ICCV48922.2021.01192"},{"key":"15_CR5","doi-asserted-by":"publisher","unstructured":"Chen, T., Cheng, M.M., Tan, P., Shamir, A., Hu, S.M.: Sketch2Photo: internet image montage. ACM Trans. Graph. 28(5), 1\u201310 (2009). https:\/\/doi.org\/10.1145\/1618452.1618470","DOI":"10.1145\/1618452.1618470"},{"key":"15_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"136","DOI":"10.1007\/978-3-030-58542-6_9","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y Chen","year":"2020","unstructured":"Chen, Y., Bazzani, L.: Learning joint visual semantic matching embeddings for language-guided retrieval. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12367, pp. 136\u2013152. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58542-6_9"},{"key":"15_CR7","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"104","DOI":"10.1007\/978-3-030-58577-8_7","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y-C Chen","year":"2020","unstructured":"Chen, Y.-C., et al.: UNITER: UNiversal image-TExt representation learning. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 104\u2013120. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_7"},{"key":"15_CR8","doi-asserted-by":"crossref","unstructured":"Collomosse, J., Bui, T., Jin, H.: LiveSketch: query perturbations for guided sketch-based visual search. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2879\u20132887 (2019)","DOI":"10.1109\/CVPR.2019.00299"},{"key":"15_CR9","doi-asserted-by":"crossref","unstructured":"Dey, S., Dutta, A., Ghosh, S.K., Valveny, E., Llad\u00f3s, J., Pal, U.: Learning cross-modal deep embeddings for multi-object image retrieval using text and sketch. In: 2018 24th International Conference on Pattern Recognition (ICPR), pp. 916\u2013921. IEEE (2018)","DOI":"10.1109\/ICPR.2018.8545452"},{"key":"15_CR10","doi-asserted-by":"crossref","unstructured":"Dey, S., Riba, P., Dutta, A., Llados, J., Song, Y.Z.: Doodle to search: practical zero-shot sketch-based image retrieval. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019","DOI":"10.1109\/CVPR.2019.00228"},{"key":"15_CR11","doi-asserted-by":"crossref","unstructured":"Dong, H., Wang, Z., Qiu, Q., Sapiro, G.: Using text to teach image retrieval (2020)","DOI":"10.1109\/CVPRW53098.2021.00180"},{"key":"15_CR12","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. In: ICLR (2021)"},{"key":"15_CR13","doi-asserted-by":"crossref","unstructured":"Dutta, A., Akata, Z.: Semantically tied paired cycle consistency for zero-shot sketch-based image retrieval. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5089\u20135098 (2019)","DOI":"10.1109\/CVPR.2019.00523"},{"key":"15_CR14","doi-asserted-by":"crossref","unstructured":"Gao, C., Liu, Q., Xu, Q., Wang, L., Liu, J., Zou, C.: SketchyCOCO: image generation from freehand scene sketches. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020","DOI":"10.1109\/CVPR42600.2020.00522"},{"key":"15_CR15","unstructured":"Han, T., Schlangen, D.: Draw and tell: multimodal descriptions outperform verbal- or sketch-only descriptions in an image retrieval task. In: Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pp. 361\u2013365. Asian Federation of Natural Language Processing, Taipei, November 2017. https:\/\/aclanthology.org\/I17-2061"},{"key":"15_CR16","doi-asserted-by":"crossref","unstructured":"Han, X., et al.: Automatic spatially-aware fashion concept discovery. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.163"},{"key":"15_CR17","doi-asserted-by":"crossref","unstructured":"Hessel, J., Holtzman, A., Forbes, M., Bras, R.L., Choi, Y.: ClipScore: a reference-free evaluation metric for image captioning (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"15_CR18","doi-asserted-by":"publisher","unstructured":"Hu, R., Collomosse, J.: A performance evaluation of gradient field hog descriptor for sketch based image retrieval. Comput. Vis. Image Underst. 117(7), 790\u2013806 (2013). https:\/\/doi.org\/10.1016\/j.cviu.2013.02.005","DOI":"10.1016\/j.cviu.2013.02.005"},{"key":"15_CR19","doi-asserted-by":"publisher","unstructured":"Ilharco, G., et al.: Openclip (2021). https:\/\/doi.org\/10.5281\/zenodo.5143773","DOI":"10.5281\/zenodo.5143773"},{"key":"15_CR20","unstructured":"Jia, C., et al.: Scaling up visual and vision-language representation learning with noisy text supervision. arXiv preprint arXiv:2102.05918 (2021)"},{"key":"15_CR21","doi-asserted-by":"publisher","unstructured":"Song, J., Song, Y.Z., Xiang, T., Hospedales, T.M.: Fine-grained image retrieval: the text\/sketch input dilemma. In: Kim, T.K., Zafeiriou, S., Brostow, G., Mikolajczykpp, K. (eds.) 45.1-45.12. BMVA Press, September 2017. https:\/\/doi.org\/10.5244\/C.31.45","DOI":"10.5244\/C.31.45"},{"key":"15_CR22","doi-asserted-by":"crossref","unstructured":"Karpathy, A., Fei-Fei, L.: Deep visual-semantic alignments for generating image descriptions (2015)","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"15_CR23","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"key":"15_CR24","doi-asserted-by":"crossref","unstructured":"Lee, K.H., Chen, X., Hua, G., Hu, H., He, X.: Stacked cross attention for image-text matching. arXiv preprint arXiv:1803.08024 (2018)","DOI":"10.1007\/978-3-030-01225-0_13"},{"key":"15_CR25","doi-asserted-by":"crossref","unstructured":"Li, M., Lin, Z., Mech, R., Yumer, E., Ramanan, D.: Photo-sketching: inferring contour drawings from images. In: WACV (2019)","DOI":"10.1109\/WACV.2019.00154"},{"key":"15_CR26","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"121","DOI":"10.1007\/978-3-030-58577-8_8","volume-title":"Computer Vision \u2013 ECCV 2020","author":"X Li","year":"2020","unstructured":"Li, X., et al.: Oscar: object-semantics aligned pre-training for vision-language tasks. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 121\u2013137. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_8"},{"key":"15_CR27","unstructured":"Lin, T., et al.: Microsoft COCO: common objects in context. CoRR abs\/1405.0312 (2014). http:\/\/arxiv.org\/abs\/1405.0312"},{"key":"15_CR28","doi-asserted-by":"crossref","unstructured":"Liu, Q., Xie, L., Wang, H., Yuille, A.L.: Semantic-aware knowledge preservation for zero-shot sketch-based image retrieval. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3662\u20133671 (2019)","DOI":"10.1109\/ICCV.2019.00376"},{"key":"15_CR29","doi-asserted-by":"crossref","unstructured":"Nilsback, M.E., Zisserman, A.: Automated flower classification over a large number of classes. In: 2008 Sixth Indian Conference on Computer Vision, Graphics and Image Processing, pp. 722\u2013729. IEEE (2008)","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"15_CR30","unstructured":"van den Oord, A., Li, Y., Vinyals, O.: Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018)"},{"key":"15_CR31","doi-asserted-by":"crossref","unstructured":"Pandey, A., Mishra, A., Verma, V.K., Mittal, A., Murthy, H.: Stacked adversarial network for zero-shot sketch based image retrieval. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 2540\u20132549 (2020)","DOI":"10.1109\/WACV45572.2020.9093402"},{"key":"15_CR32","doi-asserted-by":"crossref","unstructured":"Pang, K., et al.: Generalising fine-grained sketch-based image retrieval. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2019","DOI":"10.1109\/CVPR.2019.00077"},{"key":"15_CR33","doi-asserted-by":"crossref","unstructured":"Pang, K., Song, Y.Z., Xiang, T., Hospedales, T.M.: Cross-domain generative learning for fine-grained sketch-based image retrieval. In: BMVC, pp. 1\u201312 (2017)","DOI":"10.5244\/C.31.46"},{"key":"15_CR34","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"647","DOI":"10.1007\/978-3-030-58558-7_38","volume-title":"Computer Vision \u2013 ECCV 2020","author":"J Pont-Tuset","year":"2020","unstructured":"Pont-Tuset, J., Uijlings, J., Changpinyo, S., Soricut, R., Ferrari, V.: Connecting vision and language with localized narratives. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12350, pp. 647\u2013664. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58558-7_38"},{"key":"15_CR35","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: Meila, M., Zhang, T. (eds.) Proceedings of the 38th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 139, pp. 8748\u20138763. PMLR, 18\u201324 July 2021. https:\/\/proceedings.mlr.press\/v139\/radford21a.html"},{"key":"15_CR36","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners (2019)"},{"key":"15_CR37","doi-asserted-by":"crossref","unstructured":"Sain, A., Bhunia, A.K., Yang, Y., Xiang, T., Song, Y.Z.: StyleMeUp: towards style-agnostic sketch-based image retrieval. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8504\u20138513 (2021)","DOI":"10.1109\/CVPR46437.2021.00840"},{"key":"15_CR38","doi-asserted-by":"crossref","unstructured":"Sangkloy, P., Burnell, N., Ham, C., Hays, J.: The Sketchy database: learning to retrieve badly drawn bunnies. ACM Trans. Graph. (Proceedings of SIGGRAPH) (2016)","DOI":"10.1145\/2897824.2925954"},{"key":"15_CR39","doi-asserted-by":"crossref","unstructured":"Song, J., Yu, Q., Song, Y.Z., Xiang, T., Hospedales, T.M.: Deep spatial-semantic attention for fine-grained sketch-based image retrieval. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 5551\u20135560 (2017)","DOI":"10.1109\/ICCV.2017.592"},{"key":"15_CR40","doi-asserted-by":"crossref","unstructured":"Tautkute, I., Trzcinski, T., Skorupa, A., Brocki, L., Marasek, K.: DeepStyle: multimodal search engine for fashion and interior design (2019)","DOI":"10.1109\/ACCESS.2019.2923552"},{"key":"15_CR41","unstructured":"Tiwary, S.: Turing Bletchley: A Universal Image Language Representation Model by Microsoft (2021). https:\/\/www.microsoft.com\/en-us\/research\/blog\/turing-bletchley-a-universal-image-language-representation-model-by-microsoft\/. Accessed 7 March 2021"},{"key":"15_CR42","doi-asserted-by":"crossref","unstructured":"Tursun, O., Denman, S., Sridharan, S., Goan, E., Fookes, C.: An efficient framework for zero-shot sketch-based image retrieval. arXiv preprint arXiv:2102.04016 (2021)","DOI":"10.1016\/j.patcog.2022.108528"},{"key":"15_CR43","doi-asserted-by":"crossref","unstructured":"Vo, N., et al.: Composing text and image for image retrieval - an empirical odyssey. In: CVPR (2019). https:\/\/arxiv.org\/abs\/1812.07119","DOI":"10.1109\/CVPR.2019.00660"},{"key":"15_CR44","doi-asserted-by":"crossref","unstructured":"Wang, B., Yang, Y., Xu, X., Hanjalic, A., Shen, H.T.: Adversarial cross-modal retrieval. In: Proceedings of the 25th ACM International Conference on Multimedia, pp. 154\u2013162 (2017)","DOI":"10.1145\/3123266.3123326"},{"key":"15_CR45","unstructured":"Wang, C., Sun, Z., Zhang, L., Zhang, L.: Sketch2Tag: automatic hand-drawn sketch recognition. In: ACM Conference on Multimedia, January 2012. https:\/\/www.microsoft.com\/en-us\/research\/publication\/sketch2tag-automatic-hand-drawn-sketch-recognition\/"},{"key":"15_CR46","doi-asserted-by":"crossref","unstructured":"Yang, Y., Jin, N., Lin, K., Guo, M., Cer, D.: Neural retrieval for question answering with cross-attention supervised data augmentation. arXiv preprint arXiv:2009.13815 (2020)","DOI":"10.18653\/v1\/2021.acl-short.35"},{"key":"15_CR47","doi-asserted-by":"crossref","unstructured":"Yu, Q., Liu, F., Song, Y.Z., Xiang, T., Hospedales, T.M., Loy, C.C.: Sketch me that shoe. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 799\u2013807 (2016)","DOI":"10.1109\/CVPR.2016.93"},{"key":"15_CR48","doi-asserted-by":"crossref","unstructured":"Zhang, H., Liu, S., Zhang, C., Ren, W., Wang, R., Cao, X.: SketchNet: sketch classification with web images. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1105\u20131113 (2016)","DOI":"10.1109\/CVPR.2016.125"},{"key":"15_CR49","doi-asserted-by":"crossref","unstructured":"Zhang, Q., Lei, Z., Zhang, Z., Li, S.Z.: Context-aware attention network for image-text retrieval. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3536\u20133545 (2020)","DOI":"10.1109\/CVPR42600.2020.00359"},{"key":"15_CR50","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Zhang, Y., Feng, R., Zhang, T., Fan, W.: Zero-shot sketch-based image retrieval via graph convolution network. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 12943\u201312950 (2020)","DOI":"10.1609\/aaai.v34i07.6993"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19839-7_15","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,7]],"date-time":"2024-03-07T12:22:16Z","timestamp":1709814136000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19839-7_15"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198380","9783031198397"],"references-count":50,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19839-7_15","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"23 October 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}