{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,6]],"date-time":"2025-11-06T11:45:35Z","timestamp":1762429535555,"version":"3.37.3"},"reference-count":62,"publisher":"Springer Science and Business Media LLC","issue":"21","license":[{"start":{"date-parts":[[2023,8,11]],"date-time":"2023-08-11T00:00:00Z","timestamp":1691712000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,8,11]],"date-time":"2023-08-11T00:00:00Z","timestamp":1691712000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["618771186;61771322"],"award-info":[{"award-number":["618771186;61771322"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2023,11]]},"DOI":"10.1007\/s10489-023-04922-9","type":"journal-article","created":{"date-parts":[[2023,8,11]],"date-time":"2023-08-11T14:02:18Z","timestamp":1691762538000},"page":"25771-25786","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["A novel inference paradigm based on multi-view prototypes for one-shot semantic segmentation"],"prefix":"10.1007","volume":"53","author":[{"given":"Hailing","family":"Wang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4059-4806","authenticated-orcid":false,"given":"Guitao","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Wenming","family":"Cao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,8,11]]},"reference":[{"key":"4922_CR1","doi-asserted-by":"crossref","unstructured":"Ronneberger O, Fischer P, Brox T (2015) U-net: Convolutional networks for biomedical image segmentation. In: MICCAI, pp 234\u2013241","DOI":"10.1007\/978-3-319-24574-4_28"},{"issue":"10","key":"4922_CR2","doi-asserted-by":"publisher","first-page":"2281","DOI":"10.1109\/TMI.2019.2903562","volume":"38","author":"Z Gu","year":"2019","unstructured":"Gu Z, Cheng J, Fu H et al (2019) Cenet: Context encoder network for 2d medical image segmentation. IEEE Trans Med Imaging 38(10):2281\u20132292","journal-title":"IEEE Trans Med Imaging"},{"key":"4922_CR3","doi-asserted-by":"publisher","first-page":"74","DOI":"10.1016\/j.neunet.2019.08.025","volume":"121","author":"N Ibtehaz","year":"2020","unstructured":"Ibtehaz N, Rahman MS (2020) Multiresunet: Rethinking the u-net architecture for multimodal biomedical image segmentation. Neural Netw 121:74\u201387","journal-title":"Neural Netw"},{"issue":"2","key":"4922_CR4","doi-asserted-by":"publisher","first-page":"77","DOI":"10.1023\/A:1019956318069","volume":"18","author":"R Vilalta","year":"2002","unstructured":"Vilalta R, Drissi Y (2002) A perspective view and survey of meta-learning. Artif Intell Rev 18(2):77\u201395","journal-title":"Artif Intell Rev"},{"issue":"9","key":"4922_CR5","doi-asserted-by":"publisher","first-page":"5149","DOI":"10.1109\/TPAMI.2021.3079209","volume":"44","author":"TM Hospedales","year":"2022","unstructured":"Hospedales TM, Antoniou A, Micaelli P et al (2022) Meta-learning in neural networks: A survey. IEEE Trans Pattern Anal Mach Intell 44(9):5149\u20135169. https:\/\/doi.org\/10.1109\/TPAMI.2021.3079209","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"4922_CR6","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2022.108586","volume":"126","author":"S Luo","year":"2022","unstructured":"Luo S, Li Y, Gao P et al (2022) Meta-seg: A survey of meta-learning for image segmentation. Pattern Recognit 126:108586. https:\/\/doi.org\/10.1016\/j.patcog.2022.108586","journal-title":"Pattern Recognit"},{"key":"4922_CR7","doi-asserted-by":"crossref","unstructured":"Li W, Xu J, Huo J et al (2019) Distribution consistency based covariance metric networks for few-shot learning. In: AAAI, pp 8642\u20138649","DOI":"10.1609\/aaai.v33i01.33018642"},{"key":"4922_CR8","doi-asserted-by":"crossref","unstructured":"Liu J, Song L, Qin Y (2020) Prototype rectification for few-shot learning. In: ECCV, pp 741\u2013756","DOI":"10.1007\/978-3-030-58452-8_43"},{"key":"4922_CR9","doi-asserted-by":"crossref","unstructured":"Shen Z, Liu Z, Qin J et al (2021) Partial is better than all: Revisiting fine-tuning strategy for few-shot learning. In: AAAI, pp 9594\u20139602","DOI":"10.1609\/aaai.v35i11.17155"},{"key":"4922_CR10","doi-asserted-by":"crossref","unstructured":"Shaban A, Bansal S, Liu Z et al (2017) Oneshot learning for semantic segmentation. In: BMVC","DOI":"10.5244\/C.31.167"},{"key":"4922_CR11","doi-asserted-by":"crossref","unstructured":"Zhang C, Lin G, Liu F et al (2019) Canet: Class-agnostic segmentation networks with iterative refinement and attentive few\u2013shot learning. In: CVPR, pp 5217\u20135226","DOI":"10.1109\/CVPR.2019.00536"},{"key":"4922_CR12","doi-asserted-by":"crossref","unstructured":"Nguyen K, Todorovic S (2019) Feature weighting and boosting for few-shot segmentation. In: ICCV, pp 622\u2013631","DOI":"10.1109\/ICCV.2019.00071"},{"key":"4922_CR13","doi-asserted-by":"crossref","unstructured":"Yang L, Zhuo W, Qi L, et al (2021) Mining latent classes for few-shot segmentation. In: ICCV, pp 8701\u20138710","DOI":"10.1109\/ICCV48922.2021.00860"},{"key":"4922_CR14","doi-asserted-by":"crossref","unstructured":"Li G, Jampani V, Sevilla-Lara L et al (2021) Adaptive prototype learning and allocation for few-shot segmentation. In: CVPR, pp 8334\u20138343","DOI":"10.1109\/CVPR46437.2021.00823"},{"issue":"2","key":"4922_CR15","doi-asserted-by":"publisher","first-page":"1050","DOI":"10.1109\/TPAMI.2020.3013717","volume":"44","author":"Z Tian","year":"2022","unstructured":"Tian Z, Zhao H, Shu M et al (2022) Prior guided feature enrichment network for fewshot segmentation. IEEE Trans Pattern Anal Mach Intell 44(2):1050\u20131065","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"4","key":"4922_CR16","first-page":"4650","volume":"45","author":"G Cheng","year":"2023","unstructured":"Cheng G, Lang C, Han J (2023) Holistic prototype activation for few-shot segmentation. IEEE Trans Pattern Anal Mach Intell 45(4):4650\u20134666","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"11","key":"4922_CR17","doi-asserted-by":"publisher","first-page":"6484","DOI":"10.1109\/TNNLS.2021.3081693","volume":"33","author":"X Zhang","year":"2022","unstructured":"Zhang X, Wei Y, Li Z et al (2022) Rich embedding features for one-shot semantic segmentation. IEEE Trans Neural Netw Learn Syst 33(11):6484\u20136493","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"4922_CR18","doi-asserted-by":"crossref","unstructured":"Zhang C, Lin G, Liu F, et al (2019) Pyramid graph networks with connection attentions for region-based one-shot semantic segmentation. In: ICCV, pp 9586\u20139594","DOI":"10.1109\/ICCV.2019.00968"},{"key":"4922_CR19","doi-asserted-by":"crossref","unstructured":"Wang H, Zhang X, Hu Y, et al (2020) Fewshot semantic segmentation with democratic attention networks. In: ECCV (13), Lecture Notes in Computer Science, vol 12358.Springer, pp 730\u2013746","DOI":"10.1007\/978-3-030-58601-0_43"},{"key":"4922_CR20","doi-asserted-by":"crossref","unstructured":"Gairola S, Hemani M, Chopra A et al (2020) Simpropnet: Improved similarity propagation for few-shot image segmentation. In: IJCAI.ijcai.org, pp 573\u2013579","DOI":"10.24963\/ijcai.2020\/80"},{"key":"4922_CR21","doi-asserted-by":"crossref","unstructured":"Min J, Kang D, Cho M (2021) Hypercorrelation squeeze for few-shot segmenation. In: ICCV, pp 6921\u20136932","DOI":"10.1109\/ICCV48922.2021.00686"},{"key":"4922_CR22","doi-asserted-by":"publisher","first-page":"3142","DOI":"10.1109\/TIP.2021.3058512","volume":"30","author":"B Liu","year":"2021","unstructured":"Liu B, Jiao J, Ye Q (2021) Harmonic feature activation for few-shot semantic segmentation. IEEE Trans Image Process 30:3142-3153","journal-title":"IEEE Trans Image Process"},{"key":"4922_CR23","doi-asserted-by":"crossref","unstructured":"Fan Q, Pei W, Tai Y et al (2022) Self-support few-shot semantic segmentation. In: ECCV, pp 701\u2013719","DOI":"10.1007\/978-3-031-19800-7_41"},{"key":"4922_CR24","doi-asserted-by":"crossref","unstructured":"Lang C, Cheng G, Tu B et al (2022) Learning what not to segment: A new perspective on few-shot segmentation. In: CVPR, pp 8047\u20138057","DOI":"10.1109\/CVPR52688.2022.00789"},{"key":"4922_CR25","doi-asserted-by":"crossref","unstructured":"Long J, Shelhamer E, Darrell T (2015) Fully convolutional networks for semantic segmentation. In: CVPR, pp 3431\u20133440","DOI":"10.1109\/CVPR.2015.7298965"},{"issue":"12","key":"4922_CR26","doi-asserted-by":"publisher","first-page":"2481","DOI":"10.1109\/TPAMI.2016.2644615","volume":"39","author":"V Badrinarayanan","year":"2017","unstructured":"Badrinarayanan V, Kendall A, Cipolla R (2017) Segnet: A deep convolutional encoderdecoder architecture for image segmentation. IEEE Trans Pattern Anal Mach Intell 39(12):2481\u20132495","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"4922_CR27","doi-asserted-by":"crossref","unstructured":"Zhao H, Shi J, Qi X et al (2017) Pyramid scene parsing network. In: CVPR, pp 6230\u20136239","DOI":"10.1109\/CVPR.2017.660"},{"key":"4922_CR28","doi-asserted-by":"crossref","unstructured":"Chen L, Zhu Y, Papandreou G et al (2018) Encoder-decoder with atrous separable convolution for semantic image segmentation. In: ECCV, pp 833\u2013851","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"4922_CR29","doi-asserted-by":"crossref","unstructured":"He J, Deng Z, Zhou L et al (2019) Adaptive pyramid context network for semantic segmentation. In: CVPR, pp 7519\u20137528","DOI":"10.1109\/CVPR.2019.00770"},{"issue":"4","key":"4922_CR30","doi-asserted-by":"publisher","first-page":"834","DOI":"10.1109\/TPAMI.2017.2699184","volume":"40","author":"L Chen","year":"2018","unstructured":"Chen L, Papandreou G, Kokkinos I et al (2018) Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. IEEE Trans Pattern Anal Mach Intell 40(4):834\u2013848","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"4922_CR31","doi-asserted-by":"crossref","unstructured":"Fu J, Liu J, Tian H et al (2019) Dual attention network for scene segmentation. In: CVPR, pp 3146\u20133154","DOI":"10.1109\/CVPR.2019.00326"},{"key":"4922_CR32","doi-asserted-by":"crossref","unstructured":"Li X, Zhong Z, Wu J et al (2019) Expectation-maximization attention networks for semantic segmentation. In: ICCV, pp 9166\u20139175","DOI":"10.1109\/ICCV.2019.00926"},{"key":"4922_CR33","doi-asserted-by":"crossref","unstructured":"Choi S, Kim JT, Choo J (2020) Cars can\u2019t fly up in the sky: Improving urban-scene segmentation via height-driven attention networks. In: CVPR, pp 9370\u20139380","DOI":"10.1109\/CVPR42600.2020.00939"},{"key":"4922_CR34","doi-asserted-by":"crossref","unstructured":"Zhang F, Chen Y, Li Z et al (2019) Acfnet: Attentional class feature network for semantic segmentation. In: ICCV, pp 6797\u20136806","DOI":"10.1109\/ICCV.2019.00690"},{"key":"4922_CR35","doi-asserted-by":"crossref","unstructured":"Huang Z, Wang X, Huang L et al (2019) Ccnet: Criss-cross attention for semantic segmentation. In: ICCV, pp 603\u2013612","DOI":"10.1109\/ICCV.2019.00069"},{"key":"4922_CR36","unstructured":"Ravi S, Larochelle H (2017) Optimization as a model for few-shot learning. In: ICLR"},{"key":"4922_CR37","unstructured":"Finn C, Abbeel P, Levine S (2017) Model-agnostic meta-learning for fast adaptation of deep networks. In: ICML, pp 1126\u20131135"},{"key":"4922_CR38","doi-asserted-by":"crossref","unstructured":"Jamal MA, Qi G (2019) Task agnostic meta-learning for few-shot learning. In: CVPR, pp 11719\u201311727","DOI":"10.1109\/CVPR.2019.01199"},{"key":"4922_CR39","doi-asserted-by":"crossref","unstructured":"Chen Z, Fu Y, Chen K et al (2019) Image block augmentation for one-shot learning. In: AAAI, pp 3379\u20133386","DOI":"10.1609\/aaai.v33i01.33013379"},{"key":"4922_CR40","doi-asserted-by":"crossref","unstructured":"Chen Z, Fu Y, Wang Y et al (2019) Image deformation meta-networks for oneshot learning. In: CVPR, pp 8680\u20138689","DOI":"10.1109\/CVPR.2019.00888"},{"key":"4922_CR41","doi-asserted-by":"crossref","unstructured":"Sung F, Yang Y, Zhang L et al (2018) Learning to compare: Relation network for few\u2013shot learning. In: CVPR, pp 1199\u20131208","DOI":"10.1109\/CVPR.2018.00131"},{"key":"4922_CR42","doi-asserted-by":"crossref","unstructured":"Li H, Eigen D, Dodge S et al (2019) Finding task-relevant features for few-shot learning by category traversal. In: CVPR, pp 1\u201310","DOI":"10.1109\/CVPR.2019.00009"},{"key":"4922_CR43","unstructured":"Allen KR, Shelhamer E, Shin H et al (2019) Infinite mixture prototypes for fewshot learning. In: ICML, pp 232\u2013241"},{"key":"4922_CR44","unstructured":"Hou R, Chang H, Ma B et al (2019) Cross attention network for few-shot classification. In: NIPS, pp 4005\u20134016"},{"key":"4922_CR45","unstructured":"Doersch C, Gupta A, Zisserman A (2020) Crosstransformers: spatially-aware few-shot transfer. In: NIPS"},{"key":"4922_CR46","doi-asserted-by":"crossref","unstructured":"Liu J, Song L, Qin Y (2020) Prototype rectification for few-shot learning. In: ECCV, pp 741\u2013756","DOI":"10.1007\/978-3-030-58452-8_43"},{"key":"4922_CR47","unstructured":"Snell J, Swersky K, Zemel RS (2017) Prototypical networks for few-shot learning. In: NIPS, pp 4077\u20134087"},{"issue":"9","key":"4922_CR48","doi-asserted-by":"publisher","first-page":"3855","DOI":"10.1109\/TCYB.2020.2992433","volume":"50","author":"X Zhang","year":"2020","unstructured":"Zhang X, Wei Y, Yang Y et al (2020) Sgone: Similarity guidance network for one-shot semantic segmentation. IEEE Trans Cybern 50(9):3855\u20133865","journal-title":"IEEE Trans Cybern"},{"key":"4922_CR49","doi-asserted-by":"crossref","unstructured":"Zhang B, Xiao J, Qin T (2021) Self-guided and cross-guided learning for few-shot segmentation. In: CVPR, pp 8312\u20138321","DOI":"10.1109\/CVPR46437.2021.00821"},{"key":"4922_CR50","doi-asserted-by":"crossref","unstructured":"Mao B, Zhang X, Wang L et al (2022) Learning from the target: Dual prototype network for few shot semantic segmentation. In: AAAI, pp 1953\u20131961","DOI":"10.1609\/aaai.v36i2.20090"},{"key":"4922_CR51","doi-asserted-by":"crossref","unstructured":"Wang Y, Wang H, Shen Y et al (2022) Semi-supervised semantic segmentation using unreliable pseudo\u2013labels. In: CVPR, pp 4238\u20134247","DOI":"10.1109\/CVPR52688.2022.00421"},{"key":"4922_CR52","doi-asserted-by":"crossref","unstructured":"Yang L, Zhuo W, Qi L et al (2022) ST++: make self-trainingwork better for semi-supervised semantic segmentation. In: CVPR, pp 4258\u20134267","DOI":"10.1109\/CVPR52688.2022.00423"},{"key":"4922_CR53","doi-asserted-by":"crossref","unstructured":"Liu Y, Zhang X, Zhang S et al (2020) Part-aware prototype network for few-shot semantic segmentation. In: ECCV, pp 142\u2013158","DOI":"10.1007\/978-3-030-58545-7_9"},{"issue":"2","key":"4922_CR54","doi-asserted-by":"publisher","first-page":"303","DOI":"10.1007\/s11263-009-0275-4","volume":"88","author":"M Everingham","year":"2010","unstructured":"Everingham M, Gool LV, Williams CKI et al (2010) The pascal visual object classes (VOC) challenge. Int J Comput Vis 88(2):303\u2013338","journal-title":"Int J Comput Vis"},{"key":"4922_CR55","doi-asserted-by":"crossref","unstructured":"Hariharan B, Arbel\u00e1ez PA, Girshick RB et al (2014) Simultaneous detection and segmentation. In: ECCV, pp 297\u2013312","DOI":"10.1007\/978-3-319-10584-0_20"},{"key":"4922_CR56","doi-asserted-by":"crossref","unstructured":"Lin T, Maire M, Belongie SJ et al (2014) Microsoft COCO: common objects in context. In: ECCV, pp 740\u2013755","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"4922_CR57","doi-asserted-by":"crossref","unstructured":"Wang K, Liew JH, Zou Y et al (2019) Panet: Few-shot image semantic segmentation with prototype alignment. In: ICCV, pp 9196\u20139205","DOI":"10.1109\/ICCV.2019.00929"},{"key":"4922_CR58","doi-asserted-by":"crossref","unstructured":"Yang B, Liu C, Li B et al (2020) Prototype mixture models for few-shot semantic segmentation. In: ECCV, pp 763\u2013778","DOI":"10.1007\/978-3-030-58598-3_45"},{"key":"4922_CR59","doi-asserted-by":"crossref","unstructured":"Lu Z, He S, Zhu X et al (2021) Simpler is better: Few-shot semantic segmentation with classifier weight transformer. In: ICCV, pp 8721\u20138730","DOI":"10.1109\/ICCV48922.2021.00862"},{"key":"4922_CR60","doi-asserted-by":"crossref","unstructured":"Lang C, Tu B, Cheng G et al (2022) Beyond the prototype: Divide-and-conquer proxies for few-shot segmentation. In: IJCAI, pp 1024\u20131030","DOI":"10.24963\/ijcai.2022\/143"},{"issue":"12","key":"4922_CR61","doi-asserted-by":"publisher","first-page":"7141","DOI":"10.1109\/TNNLS.2021.3084252","volume":"33","author":"B Yang","year":"2022","unstructured":"Yang B, Wan F, Liu C et al (2022) Part-based semantic transform for few-shot semantic segmentation. IEEE Trans Neural Networks Learn Syst 33(12):7141\u20137152","journal-title":"IEEE Trans Neural Networks Learn Syst"},{"key":"4922_CR62","doi-asserted-by":"publisher","unstructured":"Liu H, Peng P, Chen T et al (2023) Fecanet: Boosting few-shot semantic segmentation with feature-enhanced context-aware network. IEEE Trans Multimed 1\u201313. https:\/\/doi.org\/10.1109\/TMM.2023.3238521","DOI":"10.1109\/TMM.2023.3238521"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-023-04922-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-023-04922-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-023-04922-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,23]],"date-time":"2023-10-23T14:23:22Z","timestamp":1698071002000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-023-04922-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,11]]},"references-count":62,"journal-issue":{"issue":"21","published-print":{"date-parts":[[2023,11]]}},"alternative-id":["4922"],"URL":"https:\/\/doi.org\/10.1007\/s10489-023-04922-9","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"type":"print","value":"0924-669X"},{"type":"electronic","value":"1573-7497"}],"subject":[],"published":{"date-parts":[[2023,8,11]]},"assertion":[{"value":"29 July 2023","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 August 2023","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}]}}