{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T13:41:45Z","timestamp":1743082905680,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":28,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819984312"},{"type":"electronic","value":"9789819984329"}],"license":[{"start":{"date-parts":[[2023,12,24]],"date-time":"2023-12-24T00:00:00Z","timestamp":1703376000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,12,24]],"date-time":"2023-12-24T00:00:00Z","timestamp":1703376000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-99-8432-9_35","type":"book-chapter","created":{"date-parts":[[2023,12,23]],"date-time":"2023-12-23T08:02:17Z","timestamp":1703318537000},"page":"440-452","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Target-Aware Bi-Transformer for\u00a0Few-Shot Segmentation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-7412-3524","authenticated-orcid":false,"given":"Xianglin","family":"Wang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2365-4950","authenticated-orcid":false,"given":"Xiaoliu","family":"Luo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9891-4203","authenticated-orcid":false,"given":"Taiping","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,12,24]]},"reference":[{"key":"35_CR1","unstructured":"Adam, P., et al.: PyTorch: an imperative style, high-performance deep learning library. arXiv:1912.01703 (2019)"},{"key":"35_CR2","unstructured":"Amirreza, S., Shray, B., Liu, Z., Irfan, E., Byron, B.: One-shot learning for semantic segmentation. arXiv:1709.03410 (2017)"},{"key":"35_CR3","unstructured":"Ashish, V., et al.: Attention is all you need. arXiv:1706.03762 (2017)"},{"key":"35_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"297","DOI":"10.1007\/978-3-319-10584-0_20","volume-title":"Computer Vision \u2013 ECCV 2014","author":"B Hariharan","year":"2014","unstructured":"Hariharan, B., Arbel\u00e1ez, P., Girshick, R., Malik, J.: Simultaneous detection and segmentation. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8695, pp. 297\u2013312. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10584-0_20"},{"key":"35_CR5","unstructured":"Dahyun, K., Minsu, C.: Integrative few-shot learning for classification and segmentation. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9969\u20139980 (2022)"},{"key":"35_CR6","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. arXiv:2010.11929 (2020)"},{"key":"35_CR7","doi-asserted-by":"publisher","first-page":"98","DOI":"10.1007\/s11263-014-0733-5","volume":"111","author":"M Everingham","year":"2014","unstructured":"Everingham, M., Eslami, S., Gool, L., Williams, C.K.I., Winn, J., Andrew, Z.: The pascal visual object classes challenge: a retrospective. Int. J. Comput. Vision 111, 98\u2013136 (2014)","journal-title":"Int. J. Comput. Vision"},{"key":"35_CR8","unstructured":"Gengwei, Z., Guoliang, K., Yunchao, W., Yi, Y.: Few-shot segmentation via cycle-consistent transformer (2021)"},{"key":"35_CR9","unstructured":"Jake, S., Kevin, S., Zemel, R.: Prototypical networks for few-shot learning (2017)"},{"key":"35_CR10","unstructured":"Juhong, M., Dahyun, K., Minsu, C.: Hypercorrelation squeeze for few-shot segmentation. In: 2021 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 6921\u20136932 (2021)"},{"key":"35_CR11","unstructured":"Jungbeom, L., Joon, O.S., Sangdoo, Y., Junsuk, C., Eunji, K., Sung-Hoon, Y.: Weakly supervised semantic segmentation using out-of-distribution data. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 16876\u201316885 (2022)"},{"key":"35_CR12","unstructured":"Kaiming, H., Zhang, X., Shaoqing, R., Jian, S.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2015)"},{"key":"35_CR13","doi-asserted-by":"crossref","unstructured":"Kaixin, W., Liew, J., Yingtian, Z., Daquan, Z., Jiashi, F.: Panet: few-shot image semantic segmentation with prototype alignment. In: 2019 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 9196\u20139205 (2019)","DOI":"10.1109\/ICCV.2019.00929"},{"key":"35_CR14","unstructured":"Kingma, D.P., Jimmy, B.: Adam: a method for stochastic optimization. CoRR (2014)"},{"key":"35_CR15","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2012","unstructured":"Krizhevsky, A., Ilya, S., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. Commun. ACM 60, 84\u201390 (2012)","journal-title":"Commun. ACM"},{"key":"35_CR16","doi-asserted-by":"crossref","unstructured":"Minh, N.K.D., Todorovic, S.: Feature weighting and boosting for few-shot segmentation. In: 2019 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 622\u2013631 (2019)","DOI":"10.1109\/ICCV.2019.00071"},{"key":"35_CR17","doi-asserted-by":"crossref","unstructured":"Okazawa, A.: Interclass prototype relation for few-shot segmentation. ArXiv (2022)","DOI":"10.1007\/978-3-031-19818-2_21"},{"key":"35_CR18","unstructured":"Qi, F., Wenjie, P., Yu-Wing, T., Chi-Keung, T.: Self-support few-shot semantic segmentation (2022)"},{"key":"35_CR19","unstructured":"Sunghwan, H., Seokju, C., Jisu, N., Stephen, L., Wook, K.S.: Cost aggregation with 4D convolutional swin transformer for few-shot segmentation. arXiv:2207.10866 (2022)"},{"key":"35_CR20","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"TY Lin","year":"2014","unstructured":"Lin, T.Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"35_CR21","unstructured":"Weide, L., Chi, Z., Henghui, D., Tzu-Yi, H., Guosheng, L.: Few-shot segmentation with optimal transport matching and message flow. arXiv:2108.08518 (2021)"},{"key":"35_CR22","first-page":"3855","volume":"50","author":"Z Xiaolin","year":"2018","unstructured":"Xiaolin, Z., Yunchao, W., Yi, Y., Thomas, H.: SG-One: similarity guidance network for one-shot semantic segmentation. IEEE Trans. Cybern. 50, 3855\u20133865 (2018)","journal-title":"IEEE Trans. Cybern."},{"key":"35_CR23","volume":"203","author":"L Xiaoliu","year":"2022","unstructured":"Xiaoliu, L., Zhao, D., Taiping, Z.: Intermediate prototype network for few-shot segmentation. Signal Process. 203, 108811 (2022)","journal-title":"Signal Process."},{"key":"35_CR24","unstructured":"Xingjia, P., et al.: Unveiling the potential of structure preserving for weakly supervised object localization. In: 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11637\u201311646 (2021)"},{"key":"35_CR25","unstructured":"Xinyu, S., et al.: Dense cross-query-and-support attention weighted mask aggregation for few-shot segmentation. arXiv:2207.08549 (2022)"},{"key":"35_CR26","unstructured":"Yuanwei, L., Nian, L., Xiwen, Y., Junwei, H.: Intermediate prototype mining transformer for few-shot semantic segmentation. arXiv:2210.06780 (2022)"},{"key":"35_CR27","doi-asserted-by":"crossref","unstructured":"Yuchao, W., et al.: Semi-supervised semantic segmentation using unreliable pseudo-labels. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4238\u20134247 (2022)","DOI":"10.1109\/CVPR52688.2022.00421"},{"key":"35_CR28","first-page":"1050","volume":"44","author":"T Zhuotao","year":"2020","unstructured":"Zhuotao, T., Hengshuang, Z., Michelle, S., Zhicheng, Y., Ruiyu, L., Jiaya, J.: Prior guided feature enrichment network for few-shot segmentation. IEEE Trans. Pattern Anal. Mach. Intell. 44, 1050\u20131065 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-8432-9_35","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,6]],"date-time":"2024-11-06T19:34:28Z","timestamp":1730921668000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-8432-9_35"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,24]]},"ISBN":["9789819984312","9789819984329"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-8432-9_35","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023,12,24]]},"assertion":[{"value":"24 December 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Xiamen","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"13 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"6","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/prcv2023.xmu.edu.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1420","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"532","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"37% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3,78","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3,69","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}