{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T09:35:04Z","timestamp":1742981704127,"version":"3.40.3"},"publisher-location":"Cham","reference-count":33,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031189098"},{"type":"electronic","value":"9783031189104"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-18910-4_14","type":"book-chapter","created":{"date-parts":[[2022,10,26]],"date-time":"2022-10-26T23:03:53Z","timestamp":1666825433000},"page":"164-177","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Fluorescence Microscopy Images Segmentation Based on\u00a0Prototypical Networks with\u00a0a\u00a0Few Annotations"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4797-1616","authenticated-orcid":false,"given":"Yuanhao","family":"Guo","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6547-1634","authenticated-orcid":false,"given":"Yaoru","family":"Luo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1154-2895","authenticated-orcid":false,"given":"Wenjing","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6176-3130","authenticated-orcid":false,"given":"Ge","family":"Yang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,27]]},"reference":[{"issue":"12","key":"14_CR1","doi-asserted-by":"publisher","first-page":"1233","DOI":"10.1038\/s41592-019-0403-1","volume":"16","author":"E Moen","year":"2019","unstructured":"Moen, E., Bannon, D., Kudo, T., et al.: Deep learning for cellular image analysis. Nat. Methods 16(12), 1233\u20131246 (2019)","journal-title":"Nat. Methods"},{"key":"14_CR2","doi-asserted-by":"crossref","unstructured":"Nixon-Abell, j., Obara, C.J., Weigel, A.V., et al.: Increased spatiotemporal resolution reveals highly dynamic dense tubular matrices in the peripheral ER. Science 354(6311), aaf3928 (2016)","DOI":"10.1126\/science.aaf3928"},{"issue":"12","key":"14_CR3","doi-asserted-by":"publisher","first-page":"1247","DOI":"10.1038\/s41592-019-0612-7","volume":"16","author":"JC Caicedo","year":"2019","unstructured":"Caicedo, J.C., Goodman, A., Karhohs, K.W., et al.: Nucleus segmentation across imaging experiments: the 2018 Data Science Bowl. Nat. Methods 16(12), 1247\u20131253 (2019)","journal-title":"Nat. Methods"},{"issue":"4","key":"14_CR4","doi-asserted-by":"publisher","first-page":"555","DOI":"10.1038\/s41587-021-01094-0","volume":"40","author":"NF Greenwald","year":"2022","unstructured":"Greenwald, N.F., Miller, G., Moen, E., et al.: Whole-cell segmentation of tissue images with human-level performance using large-scale data annotation and deep learning. Nat. Biotechnol. 40(4), 555\u2013565 (2022)","journal-title":"Nat. Biotechnol."},{"issue":"3","key":"14_CR5","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3386252","volume":"53","author":"Y Wang","year":"2020","unstructured":"Wang, Y., Yao, Q., Kwok, J.T., Ni, L.M.: Generalizing from a few examples: a survey on few-shot learning. ACM Comput. Surv. 53(3), 1\u201334 (2020)","journal-title":"ACM Comput. Surv."},{"issue":"2","key":"14_CR6","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s11432-020-3156-7","volume":"64","author":"G Cheng","year":"2021","unstructured":"Cheng, G., Li, R., Lang, C., Han, J.: Task-wise attention guided part complementary learning for few-shot image classification. Sci. China Inf. Sci. 64(2), 1\u201314 (2021). https:\/\/doi.org\/10.1007\/s11432-020-3156-7","journal-title":"Sci. China Inf. Sci."},{"key":"14_CR7","unstructured":"Snell, J., Swersky, K., Zemel, R.: Prototypical networks for few-shot learning. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"14_CR8","unstructured":"Dong , N., Xing, E.P.: Few-shot semantic segmentation with prototype learnings. In: British Machine Vision Conference, vol. 3 (2018)"},{"key":"14_CR9","doi-asserted-by":"crossref","unstructured":"Nguyen, K., Todorovic, S.: Feature weighting and boosting for few-shot segmentation. In: IEEE International Conference on Computer Vision, pp. 622\u2013631 (2019)","DOI":"10.1109\/ICCV.2019.00071"},{"key":"14_CR10","unstructured":"Tian, Z., Zhao, H., Shu, M., et al.: Prior guided feature enrichment network for few-shot segmentation. IEEE Trans. Pattern. Anal. Mach. Intell. (2020)"},{"key":"14_CR11","doi-asserted-by":"crossref","unstructured":"Hospedales, T., Antoniou, A., Micaelli, P., Storkey, A.: Meta-learning in neural networks: a survey. arXiv preprint arXiv:2004.05439 (2020)","DOI":"10.1109\/TPAMI.2021.3079209"},{"issue":"1","key":"14_CR12","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/s40537-016-0043-6","volume":"3","author":"K Weiss","year":"2016","unstructured":"Weiss, K., Khoshgoftaar, T.M., Wang, D.D.: A survey of transfer learning. J. Big Data 3(1), 1\u201340 (2016). https:\/\/doi.org\/10.1186\/s40537-016-0043-6","journal-title":"J. Big Data"},{"issue":"9","key":"14_CR13","doi-asserted-by":"publisher","first-page":"3855","DOI":"10.1109\/TCYB.2020.2992433","volume":"50","author":"X Zhang","year":"2020","unstructured":"Zhang, X., Wei, Y., Yang, Y., Huang, T.S.: SG-One: similarity guidance network for one-shot semantic segmentation. IEEE Trans. Cybern. 50(9), 3855\u20133865 (2020)","journal-title":"IEEE Trans. Cybern."},{"issue":"7","key":"14_CR14","doi-asserted-by":"publisher","first-page":"1837","DOI":"10.1109\/TMI.2022.3150682","volume":"41","author":"C Ouyang","year":"2022","unstructured":"Ouyang, C., Biffi, C., Chen, C., et al.: Self-supervised learning for few-shot medical image segmentation. IEEE Trans. Med. Imaging 41(7), 1837\u20131848 (2022)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"14_CR15","doi-asserted-by":"publisher","first-page":"386","DOI":"10.1007\/978-3-030-88010-1_32","volume-title":"Chinese Conference on Pattern Recognition and Computer Vision","author":"Y Guo","year":"2021","unstructured":"Guo, Y., Huang, J., Zhou, Y., Luo, Y., Li, W., Yang, G.: Segmentation of intracellular structures in fluorescence microscopy images by fusing low-level features. In: Chinese Conference on Pattern Recognition and Computer Vision, vol. 13021, pp. 386\u2013397. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-88010-1_32"},{"key":"14_CR16","unstructured":"Finn, C., Abbeel, P., Levine, S.: Model-agnostic meta-learning for fast adaptation of deep networks. In: International Conference on Machine Learning, pp. 1126\u20131135. PMLR (2017)"},{"key":"14_CR17","doi-asserted-by":"crossref","unstructured":"Jamal, M.A., Qi, G.J.: Task agnostic meta-learning for few-shot learning. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 11719\u201311727 (2019)","DOI":"10.1109\/CVPR.2019.01199"},{"key":"14_CR18","doi-asserted-by":"crossref","unstructured":"Sun, Q., Liu, Y., Chua, T., Schiele, B.: Meta-transfer learning for few-shot learning. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 403\u2013412 (2019)","DOI":"10.1109\/CVPR.2019.00049"},{"key":"14_CR19","doi-asserted-by":"crossref","unstructured":"Siam, M., Oreshkin, B.N., Jagersand, M.: AMP: adaptive masked proxies for few-shot segmentation. In: IEEE International Conference on Computer Vision, pp. 5249\u20135258 (2019)","DOI":"10.1109\/ICCV.2019.00535"},{"key":"14_CR20","doi-asserted-by":"crossref","unstructured":"Zhang, C., Lin, G., Liu, F., et al.: Pyramid graph networks with connection attentions for region-based one-shot semantic segmentation. In: IEEE International Conference on Computer Vision, pp. 9587\u20139595 (2019)","DOI":"10.1109\/ICCV.2019.00968"},{"key":"14_CR21","doi-asserted-by":"crossref","unstructured":"Liu, W., Zhang, C., Lin, G., Liu, F.: CRNet: cross-reference networks for few-shot segmentation. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 4165\u20134173 (2020)","DOI":"10.1109\/CVPR42600.2020.00422"},{"key":"14_CR22","doi-asserted-by":"crossref","unstructured":"Zhang, B., Xiao, J., Qin, T.: Self-guided and cross-guided learning for few-shot segmentation. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 8312\u20138321 (2021)","DOI":"10.1109\/CVPR46437.2021.00821"},{"key":"14_CR23","doi-asserted-by":"publisher","first-page":"142","DOI":"10.1007\/978-3-030-58545-7_9","volume-title":"Europe Conference on Computer Vision","author":"Y Liu","year":"2020","unstructured":"Liu, Y., Zhang, X., Zhang, S., He, X.: Part-Aware prototype network for few-shot semantic segmentation. In: Europe Conference on Computer Vision, vol. 12354, pp. 142\u2013158. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58545-7_9"},{"key":"14_CR24","doi-asserted-by":"publisher","first-page":"763","DOI":"10.1007\/978-3-030-58598-3_45","volume-title":"Europe Conference on Computer Vision","author":"B Yang","year":"2020","unstructured":"Yang, B., Liu, C., Li, B., Jiao, J., Ye, Q.: Prototype mixture models for few-shot semantic segmentation. In: Europe Conference on Computer Vision, vol. 12353, pp. 763\u2013778. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58598-3_45"},{"key":"14_CR25","doi-asserted-by":"publisher","first-page":"76","DOI":"10.1007\/978-3-030-37734-2_7","volume-title":"International Conference on Multimedia Modeling","author":"Y Yang","year":"2020","unstructured":"Yang, Y., Meng, F., Li, H., Wu, Q., Xu, X., Chen, S.: A new local transformation module for few-shot segmentation. In: International Conference on Multimedia Modeling, vol. 11962, pp. 76\u201387. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-37734-2_7"},{"key":"14_CR26","doi-asserted-by":"crossref","unstructured":"Lang, C., Cheng, G., Tu, B., Han, J.: Learning what not to segment: a new perspective on few-shot segmentation. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 8057\u20138067 (2022)","DOI":"10.1109\/CVPR52688.2022.00789"},{"key":"14_CR27","doi-asserted-by":"crossref","unstructured":"Shen, Z., Liu, Z., Qin, J., et al.: Partial is better than all: revisiting fine-tuning strategy for few-shot learning. In: AAAI Conference on Artificial Intelligence, vol. 35, pp. 9594\u20139602 (2021)","DOI":"10.1609\/aaai.v35i11.17155"},{"key":"14_CR28","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1007\/978-3-319-24574-4_28","volume-title":"International Conference on Medical Image Computing and Computer-Assisted Intervention","author":"O Ronneberger","year":"2015","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-Net: convolutional networks for biomedical image segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28"},{"key":"14_CR29","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1007\/978-3-030-00889-5_1","volume-title":"Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support","author":"Z Zhou","year":"2018","unstructured":"Zhou, Z., Rahman Siddiquee, M.M., Tajbakhsh, N., Liang, J.: UNet++: a nested U-Net architecture for medical image segmentation. In: Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support, vol. 11045, pp. 3\u201311. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-00889-5_1"},{"issue":"10","key":"14_CR30","doi-asserted-by":"publisher","first-page":"3349","DOI":"10.1109\/TPAMI.2020.2983686","volume":"43","author":"J Wang","year":"2020","unstructured":"Wang, J., Sun, K., Cheng, T., et al.: Deep high-resolution representation learning for visual recognition. IEEE Trans. Pattern. Anal. Mach. Intell. 43(10), 3349\u20133364 (2020)","journal-title":"IEEE Trans. Pattern. Anal. Mach. Intell."},{"key":"14_CR31","unstructured":"Paszke, A., Gross, S., Massa, F., et al.: Pytorch: an imperative style, high-performance deep learning library. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"14_CR32","doi-asserted-by":"crossref","unstructured":"Wang, Z., Xu, J., Liu, L., et al.: RANet: ranking attention network for fast video object segmentation. In: IEEE International Conference on Computer Vision, pp. 3978\u20133987 (2019)","DOI":"10.1109\/ICCV.2019.00408"},{"issue":"6","key":"14_CR33","doi-asserted-by":"publisher","first-page":"1121","DOI":"10.1109\/TMI.2013.2255309","volume":"32","author":"S Arslan","year":"2013","unstructured":"Arslan, S., Ersahin, T., Cetin-Atalay, R., Gunduz-Demir, C.: Attributed relational graphs for cell nucleus segmentation in fluorescence microscopy images. IEEE Trans. Med. Imaging 32(6), 1121\u20131131 (2013)","journal-title":"IEEE Trans. Med. Imaging"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-18910-4_14","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,26]],"date-time":"2022-10-26T23:33:20Z","timestamp":1666827200000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-18910-4_14"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031189098","9783031189104"],"references-count":33,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-18910-4_14","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"27 October 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision  (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Shenzhen","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/en.prcv.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"microsoft","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"564","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"233","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"41% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.03","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.35","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}