{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T07:27:03Z","timestamp":1740122823983,"version":"3.37.3"},"reference-count":35,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2024,4,10]],"date-time":"2024-04-10T00:00:00Z","timestamp":1712707200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,4,10]],"date-time":"2024-04-10T00:00:00Z","timestamp":1712707200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2022YFE0196000"],"award-info":[{"award-number":["2022YFE0196000"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61502429"],"award-info":[{"award-number":["61502429"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"DOI":"10.1007\/s11042-024-19018-w","type":"journal-article","created":{"date-parts":[[2024,4,10]],"date-time":"2024-04-10T03:29:12Z","timestamp":1712719752000},"page":"1013-1030","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Few-shot semantic segmentation in complex industrial components"],"prefix":"10.1007","volume":"84","author":[{"given":"Caie","family":"Xu","sequence":"first","affiliation":[]},{"given":"Bingyan","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Jin","family":"Gan","sequence":"additional","affiliation":[]},{"given":"Jin","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Yu","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Minglei","family":"Tu","sequence":"additional","affiliation":[]},{"given":"WuJie","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,4,10]]},"reference":[{"key":"19018_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s10916-018-1088-1","volume":"42","author":"SM Anwar","year":"2018","unstructured":"Anwar SM, Majid M, Qayyum A, Awais M, Alnowami M, Khan MK (2018) Medical image analysis using convolutional neural networks: a review. J Med Syst 42:1\u201313","journal-title":"J Med Syst"},{"key":"19018_CR2","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2020.113816","volume":"165","author":"C Badue","year":"2021","unstructured":"Badue C, Guidolini R, Carneiro RV, Azevedo P, Cardoso VB, Forechi A, Jesus L, Berriel R, Paixao TM, Mutz F et al (2021) Self-driving cars: A survey. Expert Syst Appl 165:113816","journal-title":"Expert Syst Appl"},{"key":"19018_CR3","doi-asserted-by":"crossref","unstructured":"Astrid M, Zaheer MZ, Lee S-I (2021) Synthetic temporal anomaly guided end-to-end video anomaly detection. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp 207\u2013214","DOI":"10.1109\/ICCVW54120.2021.00028"},{"key":"19018_CR4","doi-asserted-by":"crossref","unstructured":"Chen YC, Lai CF (2023) An intuitive pre-processing method based on human-robot interactions: zero-shot learning semantic segmentation based on synthetic semantic template. Journal of supercomputing","DOI":"10.1007\/s11227-023-05068-8"},{"key":"19018_CR5","doi-asserted-by":"crossref","unstructured":"Wang X, Huang W, Yang W, Liao Q (2023) Spatial correlation fusion network for few-shot segmentation","DOI":"10.1109\/ICASSP49357.2023.10094973"},{"key":"19018_CR6","unstructured":"Pavithra LK, Paramanandham N, Sharan T, Sarkar RK, Gupta S (2023) Brain tumor segmentation using unet-few shot schematic segmentation"},{"issue":"2","key":"19018_CR7","doi-asserted-by":"publisher","first-page":"282","DOI":"10.1117\/1.1637364","volume":"43","author":"Y Du","year":"2004","unstructured":"Du Y, Chang C-I, Thouin PD (2004) Unsupervised approach to color video thresholding. Optical Eng 43(2):282\u2013289","journal-title":"Optical Eng"},{"key":"19018_CR8","unstructured":"Senthilkumaran N, Rajesh R (2008) Edge detection techniques for image segmentation-a survey. In: Proceedings of the international conference on managing next generation software applications (MNGSA-08), pp 749\u2013760"},{"issue":"5","key":"19018_CR9","doi-asserted-by":"publisher","first-page":"3027","DOI":"10.1109\/TFUZZ.2018.2796074","volume":"26","author":"T Lei","year":"2018","unstructured":"Lei T, Jia X, Zhang Y, He L, Meng H, Nandi AK (2018) Significantly fast and robust fuzzy c-means clustering algorithm based on morphological reconstruction and membership filtering. IEEE Trans Fuzzy Syst 26(5):3027\u20133041","journal-title":"IEEE Trans Fuzzy Syst"},{"key":"19018_CR10","doi-asserted-by":"publisher","first-page":"167","DOI":"10.1023\/B:VISI.0000022288.19776.77","volume":"59","author":"PF Felzenszwalb","year":"2004","unstructured":"Felzenszwalb PF, Huttenlocher DP (2004) Efficient graph-based image segmentation. Int J Comput Vis 59:167\u2013181","journal-title":"Int J Comput Vis"},{"key":"19018_CR11","doi-asserted-by":"crossref","unstructured":"Long J, Shelhamer E, Darrell T (2015) Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 3431\u20133440","DOI":"10.1109\/CVPR.2015.7298965"},{"issue":"12","key":"19018_CR12","doi-asserted-by":"publisher","first-page":"2481","DOI":"10.1109\/TPAMI.2016.2644615","volume":"39","author":"V Badrinarayanan","year":"2017","unstructured":"Badrinarayanan V, Kendall A, Cipolla R (2017) Segnet: A deep convolutional encoder-decoder architecture for image segmentation. IEEE Trans Pattern Anal Mach Intell 39(12):2481\u20132495","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"19018_CR13","doi-asserted-by":"crossref","unstructured":"Zhao H, Shi J, Qi X, Wang X, Jia J (2017) Pyramid scene parsing network. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 2881\u20132890","DOI":"10.1109\/CVPR.2017.660"},{"key":"19018_CR14","doi-asserted-by":"crossref","unstructured":"Chen L-C, Zhu Y, Papandreou G, Schroff F, Adam H (2018) Encoder-decoder with atrous separable convolution for semantic image segmentation. In: Proceedings of the european conference on computer vision (ECCV), pp 801\u2013818","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"19018_CR15","doi-asserted-by":"crossref","unstructured":"Ronneberger O, Fischer P, Brox T (2015) U-net: Convolutional networks for biomedical image segmentation. In: Medical image computing and computer-assisted intervention\u2013MICCAI 2015: 18th international conference, Munich, Germany, October 5\u20139, 2015, Proceedings, Part III 18, pp 234\u2013241. Springer","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"19018_CR16","doi-asserted-by":"crossref","unstructured":"Chollet F (2017) Xception: Deep learning with depthwise separable convolutions. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 1251\u20131258","DOI":"10.1109\/CVPR.2017.195"},{"issue":"7","key":"19018_CR17","doi-asserted-by":"publisher","first-page":"1431","DOI":"10.1109\/TPAMI.2016.2592916","volume":"39","author":"T Hassner","year":"2016","unstructured":"Hassner T, Filosof S, Mayzels V, Zelnik-Manor L (2016) Sifting through scales. IEEE Trans Pattern Anal Mach Intell 39(7):1431\u20131443","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"19018_CR18","doi-asserted-by":"crossref","unstructured":"Hassner T, Mayzels V, Zelnik-Manor L (2012) On sifts and their scales. In: 2012 IEEE Conference on computer vision and pattern recognition, pp 1522\u20131528. IEEE","DOI":"10.1109\/CVPR.2012.6247842"},{"issue":"5","key":"19018_CR19","doi-asserted-by":"publisher","first-page":"875","DOI":"10.1109\/TPAMI.2015.2474356","volume":"38","author":"M Tau","year":"2015","unstructured":"Tau M, Hassner T (2015) Dense correspondences across scenes and scales. IEEE Trans Pattern Anal Mach Intell 38(5):875\u2013888","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"19018_CR20","doi-asserted-by":"crossref","unstructured":"Hassner T, Liu C (2016) Dense Image Correspondences for Computer Vision. Springer, ???","DOI":"10.1007\/978-3-319-23048-1"},{"issue":"4","key":"19018_CR21","doi-asserted-by":"publisher","first-page":"834","DOI":"10.1109\/TPAMI.2017.2699184","volume":"40","author":"L-C Chen","year":"2017","unstructured":"Chen L-C, Papandreou G, Kokkinos I, Murphy K, Yuille AL (2017) Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. IEEE Trans Pattern Anal Mach Intell 40(4):834\u2013848","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"19018_CR22","doi-asserted-by":"crossref","unstructured":"He L, Zhou Q, Li X, Niu L, Zhang L (2021) End-to-end video object detection with spatial-temporal transformers","DOI":"10.1145\/3474085.3475285"},{"key":"19018_CR23","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A, Weissenborn D, Zhai X, Unterthiner T, Dehghani M, Minderer M, Heigold G, Gelly S (2021) An image is worth 16x16 words: Transformers for image recognition at scale. In: International conference on learning representations"},{"key":"19018_CR24","unstructured":"Rai MCE, Darweesh M, Far AB (2023) Msflood: A multi-sources segmentation for remote sensing flood images. In: 2023 IEEE Intl Conf on dependable, autonomic and secure computing, intl conf on pervasive intelligence and computing, intl conf on cloud and big data computing, intl conf on cyber science and technology congress (DASC\/PiCom\/CBDCom\/CyberSciTech)"},{"key":"19018_CR25","unstructured":"Xie E, Wang W, Yu Z, Anandkumar A, Alvarez JM, Luo P (2021) Segformer: Simple and efficient design for semantic segmentation with transformers"},{"key":"19018_CR26","doi-asserted-by":"crossref","unstructured":"Zheng S, Lu J, Zhao H, Zhu X, Luo Z, Wang Y, Fu Y, Feng J, Xiang T, Torr PHS (2021) Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In: Computer vision and pattern recognition","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"19018_CR27","doi-asserted-by":"crossref","unstructured":"Zhang B, Liu L, Phan MH, Tian Z, Shen C, Liu Y (2023) Segvitv2: Exploring efficient and continual semantic segmentation with plain vision transformers. IJCV","DOI":"10.1007\/s11263-023-01894-8"},{"key":"19018_CR28","doi-asserted-by":"crossref","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 7132\u20137141","DOI":"10.1109\/CVPR.2018.00745"},{"key":"19018_CR29","doi-asserted-by":"crossref","unstructured":"Woo S, Park J, Lee J, Kweon IS (2018) Cbam: convolutional block attention module. In proceedings of the European conference on computer vision (ECCV) pp 3\u201319","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"19018_CR30","unstructured":"Oktay O, Schlemper J, Folgoc LL, Lee M, Heinrich M, Misawa K, Mori K, McDonagh S, Hammerla NY, Kainz B et al (2018) Attention u-net: Learning where to look for the pancreas. arXiv preprint arXiv:1804.03999"},{"key":"19018_CR31","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"19018_CR32","unstructured":"Liang-Chieh C, Papandreou G, Kokkinos I, Murphy K, Yuille A (2015) Semantic image segmentation with deep convolutional nets and fully connected crfs. In: International conference on learning representations"},{"key":"19018_CR33","unstructured":"Chen L-C, Papandreou G, Schroff F, Adam H (2017) Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587"},{"issue":"6","key":"19018_CR34","doi-asserted-by":"publisher","first-page":"1856","DOI":"10.1109\/TMI.2019.2959609","volume":"39","author":"Z Zhou","year":"2019","unstructured":"Zhou Z, Siddiquee MMR, Tajbakhsh N, Liang J (2019) Unet++: Redesigning skip connections to exploit multiscale features in image segmentation. IEEE Trans Med Imaging 39(6):1856\u20131867","journal-title":"IEEE Trans Med Imaging"},{"key":"19018_CR35","unstructured":"Lucio DR, Zanlorensi LA, Menotti D et al (2022) Orcnet: A context-based network to simultaneously segment the ocular region components. arXiv preprint arXiv:2204.07456"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-024-19018-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-024-19018-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-024-19018-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,27]],"date-time":"2025-01-27T13:55:54Z","timestamp":1737986154000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-024-19018-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,10]]},"references-count":35,"journal-issue":{"issue":"2","published-online":{"date-parts":[[2025,1]]}},"alternative-id":["19018"],"URL":"https:\/\/doi.org\/10.1007\/s11042-024-19018-w","relation":{},"ISSN":["1573-7721"],"issn-type":[{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2024,4,10]]},"assertion":[{"value":"22 August 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 February 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 March 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 April 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"We confirm that we have read, understand, and agreed to the submission guidelines, policies, and submission declaration of the journal, and all authors of the manuscript have no conflict of interest to declare.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of interest"}},{"value":"The manuscript is the author?s original work, and the manuscript has not received prior publication and is not under consideration for publication elsewhere. On behalf of all Co-Authors, I bear full responsibility for the submission.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Informed consent"}}]}}