{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,9]],"date-time":"2026-01-09T19:23:27Z","timestamp":1767986607390,"version":"3.49.0"},"reference-count":43,"publisher":"Springer Science and Business Media LLC","issue":"18","license":[{"start":{"date-parts":[[2023,12,6]],"date-time":"2023-12-06T00:00:00Z","timestamp":1701820800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,12,6]],"date-time":"2023-12-06T00:00:00Z","timestamp":1701820800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"the 111 Project","award":["B16009"],"award-info":[{"award-number":["B16009"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"DOI":"10.1007\/s11042-023-17708-5","type":"journal-article","created":{"date-parts":[[2023,12,6]],"date-time":"2023-12-06T08:02:31Z","timestamp":1701849751000},"page":"55999-56019","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Learning discriminative foreground-and-background features for few-shot segmentation"],"prefix":"10.1007","volume":"83","author":[{"given":"Cong","family":"Jiang","sequence":"first","affiliation":[]},{"given":"Yange","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Zhaoshuo","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5575-2328","authenticated-orcid":false,"given":"Chaolu","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Li","sequence":"additional","affiliation":[]},{"given":"Jinzhu","family":"Yang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,12,6]]},"reference":[{"key":"17708_CR1","unstructured":"Dong N, Xing EP (2018) Few-shot semantic segmentation with prototype learning. In: BMVC, vol 3"},{"issue":"9","key":"17708_CR2","doi-asserted-by":"publisher","first-page":"3855","DOI":"10.1109\/TCYB.2020.2992433","volume":"50","author":"X Zhang","year":"2020","unstructured":"Zhang X, Wei Y, Yang Y, Huang TS (2020) Sg-one: Similarity guidance network for one-shot semantic segmentation. IEEE Trans Cybernet 50(9):3855\u20133865","journal-title":"IEEE Trans Cybernet"},{"key":"17708_CR3","doi-asserted-by":"crossref","unstructured":"Wang K, Liew JH, Zou Y, Zhou D, Feng J (2019) Panet: Few-shot image semantic segmentation with prototype alignment. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 9197\u20139206","DOI":"10.1109\/ICCV.2019.00929"},{"key":"17708_CR4","doi-asserted-by":"crossref","unstructured":"Yang B, Liu C, Li B, Jiao J, Ye Q (2020) Prototype mixture models for few-shot semantic segmentation. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part VIII 16. Springer, pp 763\u2013778","DOI":"10.1007\/978-3-030-58598-3_45"},{"issue":"2","key":"17708_CR5","doi-asserted-by":"publisher","first-page":"1050","DOI":"10.1109\/TPAMI.2020.3013717","volume":"44","author":"Z Tian","year":"2020","unstructured":"Tian Z, Zhao H, Shu M, Yang Z, Li R, Jia J (2020) Prior guided feature enrichment network for few-shot segmentation. IEEE Trans Pattern Anal Mach Intell 44(2):1050\u20131065","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"17708_CR6","doi-asserted-by":"crossref","unstructured":"Xie G-S, Liu J, Xiong H, Shao L (2021) Scale-aware graph neural network for few-shot semantic segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp 5475\u20135484","DOI":"10.1109\/CVPR46437.2021.00543"},{"key":"17708_CR7","doi-asserted-by":"crossref","unstructured":"Li G, Jampani V, Sevilla-Lara L, Sun D, Kim J, Kim J (2021) Adaptive prototype learning and allocation for few-shot segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp 8334\u20138343","DOI":"10.1109\/CVPR46437.2021.00823"},{"issue":"10","key":"17708_CR8","doi-asserted-by":"publisher","first-page":"13275","DOI":"10.1007\/s11042-021-10536-5","volume":"81","author":"S Chan","year":"2022","unstructured":"Chan S, Huang C, Bai C, Ding W, Chen S (2022) Res2-unext: a novel deep learning framework for few-shot cell image segmentation. Multimedia Tools Appl 81(10):13275\u201313288","journal-title":"Multimedia Tools Appl"},{"issue":"13","key":"17708_CR9","doi-asserted-by":"publisher","first-page":"18305","DOI":"10.1007\/s11042-022-12096-8","volume":"81","author":"Y Liu","year":"2022","unstructured":"Liu Y, Guo Y, Zhu Y, Yu M (2022) Mining semantic information from intra-image and cross-image for few-shot segmentation. Multimedia Tools Appl 81(13):18305\u201318326","journal-title":"Multimedia Tools Appl"},{"key":"17708_CR10","doi-asserted-by":"crossref","unstructured":"Shi X, Wei D, Zhang Y, Lu D, Ning M, Chen J, Ma K, Zheng Y (2022) Dense cross-query-and-support attention weighted mask aggregation for few-shot segmentation. In: European Conference on Computer Vision. Springer, pp 151\u2013168","DOI":"10.1007\/978-3-031-20044-1_9"},{"key":"17708_CR11","doi-asserted-by":"crossref","unstructured":"Fan Q, Pei W, Tai Y-W, Tang C-K (2022) Self-support few-shot semantic segmentation. In: European Conference on Computer Vision. Springer, pp 701\u2013719","DOI":"10.1007\/978-3-031-19800-7_41"},{"key":"17708_CR12","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2022.109018","volume":"133","author":"H Ding","year":"2023","unstructured":"Ding H, Zhang H, Jiang X (2023) Self-regularized prototypical network for few-shot semantic segmentation. Pattern Recognit 133:109018","journal-title":"Pattern Recognit"},{"key":"17708_CR13","doi-asserted-by":"crossref","unstructured":"Min H, Zhang Y, Zhao Y, Jia W, Lei Y, Fan C (2023) Hybrid feature enhancement network for few-shot semantic segmentation. Pattern Recognit 109291","DOI":"10.1016\/j.patcog.2022.109291"},{"key":"17708_CR14","doi-asserted-by":"crossref","unstructured":"Liu J, Bao Y, Xie G-S, Xiong H, Sonke J-J, Gavves E (2022) Dynamic prototype convolution network for few-shot semantic segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp 11553\u201311562","DOI":"10.1109\/CVPR52688.2022.01126"},{"key":"17708_CR15","doi-asserted-by":"crossref","unstructured":"Long J, Shelhamer E, Darrell T (2015) Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp 3431\u20133440","DOI":"10.1109\/CVPR.2015.7298965"},{"issue":"4","key":"17708_CR16","doi-asserted-by":"publisher","first-page":"834","DOI":"10.1109\/TPAMI.2017.2699184","volume":"40","author":"L-C Chen","year":"2017","unstructured":"Chen L-C, Papandreou G, Kokkinos I, Murphy K, Yuille AL (2017) Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. IEEE Trans Pattern Anal Mach Intell 40(4):834\u2013848","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"17708_CR17","doi-asserted-by":"crossref","unstructured":"Zhao H, Shi J, Qi X, Wang X, Jia J (2017) Pyramid scene parsing network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp 2881\u20132890","DOI":"10.1109\/CVPR.2017.660"},{"key":"17708_CR18","doi-asserted-by":"crossref","unstructured":"Zheng S, Lu J, Zhao H, Zhu X, Luo Z, Wang Y, Fu Y, Feng J, Xiang T, Torr PH et al. (2021) Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp 6881\u20136890","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"17708_CR19","first-page":"12077","volume":"34","author":"E Xie","year":"2021","unstructured":"Xie E, Wang W, Yu Z, Anandkumar A, Alvarez JM, Luo P (2021) Segformer: Simple and efficient design for semantic segmentation with transformers. Adv Neural Inf Process Syst 34:12077\u201312090","journal-title":"Adv Neural Inf Process Syst"},{"key":"17708_CR20","unstructured":"Santoro A, Bartunov S, Botvinick M, Wierstra D, Lillicrap T (2016) Meta-learning with memory-augmented neural networks. In: International Conference on Machine Learning. pp 1842\u20131850, PMLR"},{"key":"17708_CR21","unstructured":"Finn C, Abbeel P, Levine S (2017) Model-agnostic meta-learning for fast adaptation of deep networks. In: International Conference on Machine Learning. pp 1126\u20131135, PMLR"},{"key":"17708_CR22","unstructured":"Vinyals O, Blundell C, Lillicrap T, Wierstra D et al. (2016) Matching networks for one shot learning. Adv Neural Inf Process Syst 29"},{"key":"17708_CR23","unstructured":"Snell J, Swersky K, Zemel R (2017) Prototypical networks for few-shot learning. Adv Neural Inf Process Syst 30"},{"key":"17708_CR24","doi-asserted-by":"crossref","unstructured":"Shaban A, Bansal S, Liu Z, Essa I, Boots B (2017) One-shot learning for semantic segmentation. arXiv:1709.03410","DOI":"10.5244\/C.31.167"},{"key":"17708_CR25","doi-asserted-by":"crossref","unstructured":"Min J, Kang D, Cho M (2021) Hypercorrelation squeeze for few-shot segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision. pp 6941\u20136952","DOI":"10.1109\/ICCV48922.2021.00686"},{"key":"17708_CR26","first-page":"21984","volume":"34","author":"G Zhang","year":"2021","unstructured":"Zhang G, Kang G, Yang Y, Wei Y (2021) Few-shot segmentation via cycle-consistent transformer. Adv Neural Inf Process Syst 34:21984\u201321996","journal-title":"Adv Neural Inf Process Syst"},{"key":"17708_CR27","doi-asserted-by":"crossref","unstructured":"Hong S, Cho S, Nam J, Lin S, Kim S (2022) Cost aggregation with 4d convolutional swin transformer for few-shot segmentation. In: European Conference on Computer Vision. pp 108\u2013126, Springer","DOI":"10.1007\/978-3-031-19818-2_7"},{"key":"17708_CR28","doi-asserted-by":"crossref","unstructured":"Liu Z, Lin Y, Cao Y, Hu H, Wei Y, Zhang Z, Lin S, Guo B (2021) Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision. pp 10012\u201310022","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"17708_CR29","doi-asserted-by":"crossref","unstructured":"Liu Y, Zhang X, Zhang S He X (2020) Part-aware prototype network for few-shot semantic segmentation. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part IX 16. pp 142\u2013158, Springer","DOI":"10.1007\/978-3-030-58545-7_9"},{"key":"17708_CR30","doi-asserted-by":"crossref","unstructured":"Lu Z, He S, Zhu X, Zhang L, Song Y-Z, Xiang T (2021) Simpler is better: Few-shot semantic segmentation with classifier weight transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision. pp 8741\u20138750","DOI":"10.1109\/ICCV48922.2021.00862"},{"key":"17708_CR31","first-page":"38020","volume":"35","author":"Y Liu","year":"2022","unstructured":"Liu Y, Liu N, Yao X, Han J (2022) Intermediate prototype mining transformer for few-shot semantic segmentation. Adv Neural Inf Process Syst 35:38020\u201338031","journal-title":"Adv Neural Inf Process Syst"},{"key":"17708_CR32","doi-asserted-by":"crossref","unstructured":"Nguyen K, Todorovic S (2019) Feature weighting and boosting for few-shot segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision. pp 622\u2013631","DOI":"10.1109\/ICCV.2019.00071"},{"key":"17708_CR33","unstructured":"Rakelly K, Shelhamer E, Darrell T, Efros AA, Levine S (2018) Few-shot segmentation propagation with guided networks. arXiv:1806.07373"},{"key":"17708_CR34","doi-asserted-by":"crossref","unstructured":"Li X, Wei T, Chen YP, Tai Y-W, Tang C-K (2020) Fss-1000: A 1000-class dataset for few-shot segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp 2869\u20132878","DOI":"10.1109\/CVPR42600.2020.00294"},{"key":"17708_CR35","doi-asserted-by":"crossref","unstructured":"Lin T-Y, Maire M, Belongie S, Hays J, Perona P, Ramanan D, Doll\u00e1r P, Zitnick CL (2014) Microsoft coco: Common objects in context. In: Computer Vision\u2013ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13. Springer, pp 740\u2013755","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"17708_CR36","doi-asserted-by":"publisher","first-page":"303","DOI":"10.1007\/s11263-009-0275-4","volume":"88","author":"M Everingham","year":"2010","unstructured":"Everingham M, Van Gool L, Williams CK, Winn J, Zisserman A (2010) The pascal visual object classes (voc) challenge. Inter J Comput Vision 88:303\u2013338","journal-title":"Inter J Comput Vision"},{"key":"17708_CR37","doi-asserted-by":"crossref","unstructured":"Hariharan B, Arbel\u00e1ez P, Bourdev L, Maji S, Malik J (2011) Semantic contours from inverse detectors. In: 2011 International Conference on Computer Vision. pp 991\u2013998, IEEE","DOI":"10.1109\/ICCV.2011.6126343"},{"key":"17708_CR38","unstructured":"Aggarwal AK, Jaidka P (2022) Segmentation of crop images for crop yield prediction. Inter J Biol Biomed 7"},{"issue":"2","key":"17708_CR39","doi-asserted-by":"publisher","first-page":"1327","DOI":"10.1109\/TCBB.2022.3167090","volume":"20","author":"A Kaur","year":"2022","unstructured":"Kaur A, Chauhan APS, Aggarwal AK (2022) Prediction of enhancers in dna sequence data using a hybrid cnn-dlstm model. IEEE\/ACM Trans Comput Biol Bioinfo 20(2):1327\u20131336","journal-title":"IEEE\/ACM Trans Comput Biol Bioinfo"},{"key":"17708_CR40","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"17708_CR41","doi-asserted-by":"crossref","unstructured":"Deng J, Dong W, Socher R, Li L-J, Li K, Fei-Fei L (2009) Imagenet: A large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition. pp 248\u2013255, Ieee","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"17708_CR42","doi-asserted-by":"publisher","first-page":"60","DOI":"10.37394\/232014.2022.18.8","volume":"18","author":"AK Aggarwal","year":"2022","unstructured":"Aggarwal AK (2022) Learning texture features from glcm for classification of brain tumor mri images using random forest classifier. Trans Signal Process 18:60\u201363","journal-title":"Trans Signal Process"},{"key":"17708_CR43","first-page":"199","volume":"10","author":"D Maini","year":"2018","unstructured":"Maini D, Aggarwal AK (2018) Camera position estimation using 2d image dataset. Int J Innov Eng Technol 10:199\u2013203","journal-title":"Int J Innov Eng Technol"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-17708-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-023-17708-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-17708-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,5,15]],"date-time":"2024-05-15T10:44:04Z","timestamp":1715769844000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-023-17708-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,6]]},"references-count":43,"journal-issue":{"issue":"18","published-online":{"date-parts":[[2024,5]]}},"alternative-id":["17708"],"URL":"https:\/\/doi.org\/10.1007\/s11042-023-17708-5","relation":{},"ISSN":["1573-7721"],"issn-type":[{"value":"1573-7721","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,12,6]]},"assertion":[{"value":"22 July 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 November 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 November 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 December 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no known conflict of interests including funding and\/or competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of interests"}}]}}