{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,13]],"date-time":"2025-12-13T09:46:43Z","timestamp":1765619203550,"version":"3.48.0"},"reference-count":46,"publisher":"Springer Science and Business Media LLC","issue":"12","license":[{"start":{"date-parts":[[2025,6,25]],"date-time":"2025-06-25T00:00:00Z","timestamp":1750809600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,6,25]],"date-time":"2025-06-25T00:00:00Z","timestamp":1750809600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62273155"],"award-info":[{"award-number":["62273155"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"the Key-Area Research and Development Program of Guangdong","award":["2020B1111010002 and  2018B010109001"],"award-info":[{"award-number":["2020B1111010002 and  2018B010109001"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int. J. Mach. Learn. &amp; Cyber."],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1007\/s13042-025-02708-8","type":"journal-article","created":{"date-parts":[[2025,6,25]],"date-time":"2025-06-25T01:56:50Z","timestamp":1750816610000},"page":"10349-10363","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["RPGT: a retrospective prompt-guided parameter tuning method for knowledge transfer of contrastive self-supervised vision model"],"prefix":"10.1007","volume":"16","author":[{"given":"Huangyuan","family":"Wu","sequence":"first","affiliation":[]},{"given":"Bin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Lianfang","family":"Tian","sequence":"additional","affiliation":[]},{"given":"Wenzhi","family":"Liao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,6,25]]},"reference":[{"issue":"11","key":"2708_CR1","doi-asserted-by":"publisher","first-page":"4037","DOI":"10.1109\/TPAMI.2020.2992393","volume":"43","author":"L Jing","year":"2021","unstructured":"Jing L, Tian Y (2021) Self-supervised visual feature learning with deep neural networks: A survey. IEEE Trans Pattern Anal Mach Intell 43(11):4037\u20134058. https:\/\/doi.org\/10.1109\/TPAMI.2020.2992393","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"2708_CR2","first-page":"4146","volume":"36","author":"L Sun","year":"2022","unstructured":"Sun L, Zhang Z, Ye J, Peng H, Zhang J, Su S, Philip SY (2022) A self-supervised mixed-curvature graph neural network. In: Proceedings of the AAAI Conference on Artificial Intelligence 36:4146\u20134155","journal-title":"In: Proceedings of the AAAI Conference on Artificial Intelligence"},{"issue":"5","key":"2708_CR3","doi-asserted-by":"publisher","first-page":"2039","DOI":"10.1007\/s13042-023-02014-1","volume":"15","author":"QB Sang","year":"2024","unstructured":"Sang QB, Hou YJ, Qian PJ, Wu Q (2024) Self-supervised learning-leveraged boosting ultrasound image segmentation via mask reconstruction. Int J Mach Learn Cybern 15(5):2039\u20132048. https:\/\/doi.org\/10.1007\/s13042-023-02014-1","journal-title":"Int J Mach Learn Cybern"},{"key":"2708_CR4","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2023.3291776","volume":"72","author":"H Wu","year":"2023","unstructured":"Wu H, Li B, Tian L, Sun Z, Dong C, Liao W (2023) Core: Contrastive and restorative self-supervised learning for surface defect inspection. IEEE Trans Instrum Meas 72:1\u201312. https:\/\/doi.org\/10.1109\/TIM.2023.3291776","journal-title":"IEEE Trans Instrum Meas"},{"issue":"2","key":"2708_CR5","doi-asserted-by":"publisher","first-page":"2412","DOI":"10.1109\/TPAMI.2022.3170559","volume":"45","author":"Y Xie","year":"2023","unstructured":"Xie Y, Xu Z, Zhang J, Wang Z, Ji S (2023) Self-supervised learning of graph neural networks: A unified review. IEEE Trans Pattern Anal Mach Intell 45(2):2412\u20132429. https:\/\/doi.org\/10.1109\/TPAMI.2022.3170559","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"2708_CR6","doi-asserted-by":"crossref","unstructured":"Chen X, He K (2021) Exploring simple siamese representation learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 15750\u201315758","DOI":"10.1109\/CVPR46437.2021.01549"},{"key":"2708_CR7","doi-asserted-by":"crossref","unstructured":"Zhang JO, Sax A, Zamir A, Guibas L, Malik J (2020) Side-tuning: a baseline for network adaptation via additive side networks. In: 16th European Conference on Computer Vision (ECCV), pp. 698\u2013714","DOI":"10.1007\/978-3-030-58580-8_41"},{"key":"2708_CR8","doi-asserted-by":"publisher","unstructured":"Liu PF, Yuan WZ, Fu JL, Jiang ZB, Hayashi H, Neubig G (2023) Pre-train, prompt, and predict: A systematic survey of prompting methods in natural language processing. ACM Computing Surveys 55(9) https:\/\/doi.org\/10.1145\/3560815","DOI":"10.1145\/3560815"},{"key":"2708_CR9","doi-asserted-by":"crossref","unstructured":"Yu BX, Chang J, Wang H, Liu L, Wang S, Wang Z, Lin J, Xie L, Li H, Lin Z, et al (2023) Visual tuning. ACM Computing Surveys","DOI":"10.1145\/3657632"},{"key":"2708_CR10","doi-asserted-by":"crossref","unstructured":"Huang Q, Dong X, Chen D, Zhang W, Wang F, Hua G, Yu N (2023) Diversity-aware meta visual prompting. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10878\u201310887","DOI":"10.1109\/CVPR52729.2023.01047"},{"key":"2708_CR11","doi-asserted-by":"publisher","unstructured":"Bahng H, Jahanian A, Sankaranarayanan S, Isola P (2022) Exploring Visual Prompts for Adapting Large-Scale Models, 2203\u201317274 https:\/\/doi.org\/10.48550\/arXiv.2203.17274arXiv:2203.17274","DOI":"10.48550\/arXiv.2203.17274"},{"key":"2708_CR12","doi-asserted-by":"publisher","unstructured":"Jia ML, Tang LM, Chen BC, Cardie C, Belongie S, Hariharan B, Lim SN (2022) Visual prompt tuning. In: Proceedings of European Conference on Computer Vision (ECCV). Lecture Notes in Computer Science, vol. 13693, pp. 709\u2013727 . https:\/\/doi.org\/10.1007\/978-3-031-19827-4_41","DOI":"10.1007\/978-3-031-19827-4_41"},{"key":"2708_CR13","doi-asserted-by":"publisher","unstructured":"Yoo S, Kim E, Jung D, Lee J, Yoon S (2023) Improving Visual Prompt Tuning for Self-supervised Vision Transformers, 2306\u201305067 https:\/\/doi.org\/10.48550\/arXiv.2306.05067arXiv:2306.05067","DOI":"10.48550\/arXiv.2306.05067"},{"key":"2708_CR14","doi-asserted-by":"crossref","unstructured":"He K, Fan H, Wu Y, Xie S, Girshick R (2020) Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9729\u20139738","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"2708_CR15","unstructured":"Wang Y, Cheng L, Fang C, Zhang D, Duan M, Wang M (2024) Revisiting the power of prompt for visual tuning. arXiv, 2402"},{"issue":"7","key":"2708_CR16","doi-asserted-by":"publisher","first-page":"7470","DOI":"10.1109\/tkde.2022.3193569","volume":"35","author":"J Xu","year":"2023","unstructured":"Xu J, Ren YZ, Tang HY, Yang ZM, Pan LL, Yang Y, Pu XR, Yu PS, He LF (2023) Self-supervised discriminative feature learning for deep multi-view clustering. IEEE Trans Knowl Data Eng 35(7):7470\u20137482. https:\/\/doi.org\/10.1109\/tkde.2022.3193569","journal-title":"IEEE Trans Knowl Data Eng"},{"key":"2708_CR17","doi-asserted-by":"publisher","unstructured":"Liu X, Zhang F, Hou Z, Mian L, Wang Z, Zhang J, Tang J (2021) Self-supervised learning: Generative or contrastive. IEEE Transactions on Knowledge and Data Engineering, 1\u20131 https:\/\/doi.org\/10.1109\/TKDE.2021.3090866","DOI":"10.1109\/TKDE.2021.3090866"},{"key":"2708_CR18","unstructured":"Chen T, Kornblith S, Norouzi M, Hinton G (2021) A simple framework for contrastive learning of visual representations. In: Proceedings of International Conference on Machine Learning, pp. 1597\u20131607"},{"key":"2708_CR19","doi-asserted-by":"publisher","unstructured":"Chen X, Xie S, He K (2021) An empirical study of training self-supervised vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 9620\u20139629 . https:\/\/doi.org\/10.1109\/ICCV48922.2021.00950","DOI":"10.1109\/ICCV48922.2021.00950"},{"key":"2708_CR20","doi-asserted-by":"publisher","unstructured":"Chen X, Fan H, Girshick R, He K (2020) Improved Baselines with Momentum Contrastive Learning, 2003\u201304297 https:\/\/doi.org\/10.48550\/arXiv.2003.04297arXiv:2003.04297","DOI":"10.48550\/arXiv.2003.04297"},{"key":"2708_CR21","doi-asserted-by":"publisher","unstructured":"Yang C, Wu Z, Zhou B, Lin S (2021) Instance localization for self-supervised detection pretraining. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3986\u20133995. https:\/\/doi.org\/10.1109\/CVPR46437.2021.00398","DOI":"10.1109\/CVPR46437.2021.00398"},{"key":"2708_CR22","doi-asserted-by":"crossref","unstructured":"Wang X, Zhang R, Shen C, Kong T, Li L (2021) Dense contrastive learning for self-supervised visual pre-training. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3024\u20133033","DOI":"10.1109\/CVPR46437.2021.00304"},{"issue":"10","key":"2708_CR23","doi-asserted-by":"publisher","first-page":"1345","DOI":"10.1109\/TKDE.2009.191","volume":"22","author":"SJ Pan","year":"2010","unstructured":"Pan SJ, Yang Q (2010) A survey on transfer learning. IEEE Trans Knowl Data Eng 22(10):1345\u20131359. https:\/\/doi.org\/10.1109\/TKDE.2009.191","journal-title":"IEEE Trans Knowl Data Eng"},{"key":"2708_CR24","doi-asserted-by":"crossref","unstructured":"Pfeiffer J, Rckl A, Poth C, Kamath A, Vulic I, Ruder S, Cho K, Gurevych I (2020) Adapterhub: A framework for adapting transformers. In: Proceedings of Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pp. 46\u201354","DOI":"10.18653\/v1\/2020.emnlp-demos.7"},{"key":"2708_CR25","unstructured":"Cai H, Gan C, Zhu L, Han S (2020) Tinytl: Reduce memory, not parameters for efficient on-device learning. In: Annual Conference on Neural Information Processing Systems"},{"key":"2708_CR26","unstructured":"Zhang T, Wu F, Katiyar A, Weinberger KQ, Artzi Y (2020) Revisiting few-sample bert fine-tuning. In: International Conference on Learning Representations"},{"issue":"1","key":"2708_CR27","doi-asserted-by":"publisher","first-page":"41","DOI":"10.1023\/a:1007379606734","volume":"28","author":"R Caruana","year":"1997","unstructured":"Caruana R (1997) Multitask learning. Mach Learn 28(1):41\u201375. https:\/\/doi.org\/10.1023\/a:1007379606734","journal-title":"Mach Learn"},{"key":"2708_CR28","doi-asserted-by":"crossref","unstructured":"Jha A, Kumar A, Banerjee B, Chaudhuri S (2020) Adamt-net: An adaptive weight learning based multi-task learning model for scene understanding. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops, pp. 706\u2013707","DOI":"10.1109\/CVPRW50498.2020.00361"},{"key":"2708_CR29","doi-asserted-by":"publisher","unstructured":"Vandenhende S, Georgoulis S, Gansbeke WV, Proesmans M, Dai D, Gool LV (2021) Multi-task learning for dense prediction tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 1\u20131 https:\/\/doi.org\/10.1109\/TPAMI.2021.3054719","DOI":"10.1109\/TPAMI.2021.3054719"},{"key":"2708_CR30","doi-asserted-by":"crossref","unstructured":"Kokkinos I (2017) Ubernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6129\u20136138","DOI":"10.1109\/CVPR.2017.579"},{"key":"2708_CR31","doi-asserted-by":"publisher","unstructured":"Gao Y, Ma J, Zhao M, Liu W, Yuille AL (2019) Nddr-cnn: Layerwise feature fusing in multi-task cnns by neural discriminative dimensionality reduction. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3200\u20133209 . https:\/\/doi.org\/10.1109\/CVPR.2019.00332","DOI":"10.1109\/CVPR.2019.00332"},{"key":"2708_CR32","doi-asserted-by":"publisher","unstructured":"Kirillov A, Mintun E, Ravi N, Mao H, Rolland C, Gustafson L, Xiao T, Whitehead S, Berg AC, Lo W-Y, Doll\u00e1r P, Girshick R (2023) Segment Anything, 2304\u201302643 https:\/\/doi.org\/10.48550\/arXiv.2304.02643arXiv:2304.02643","DOI":"10.48550\/arXiv.2304.02643"},{"key":"2708_CR33","unstructured":"Krizhevsky A, Hinton G (2009) Learning multiple layers of features from tiny images"},{"key":"2708_CR34","doi-asserted-by":"crossref","unstructured":"Bossard L, Guillaumin M, Van\u00a0Gool L (2014) Food-101mining discriminative components with random forests. In: 13th European Conference on Computer Vision (ECCV), pp. 446\u2013461","DOI":"10.1007\/978-3-319-10599-4_29"},{"key":"2708_CR35","doi-asserted-by":"crossref","unstructured":"Cimpoi M, Maji S, Kokkinos I, Mohamed S, Vedaldi A (2014) Describing textures in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3606\u20133613","DOI":"10.1109\/CVPR.2014.461"},{"key":"2708_CR36","unstructured":"Wah C, Branson S, Welinder P, Perona P, Belongie S (2011) The caltech-ucsd birds-200-2011 dataset"},{"key":"2708_CR37","doi-asserted-by":"crossref","unstructured":"Van\u00a0Horn G, Branson S, Farrell R, Haber S, Barry J, Ipeirotis P, Perona P, Belongie S (2015) Building a bird recognition app and large scale dataset with citizen scientists: The fine print in fine-grained dataset collection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 595\u2013604","DOI":"10.1109\/CVPR.2015.7298658"},{"key":"2708_CR38","unstructured":"Khosla A, Jayadevaprakash N, Yao B, Li F-F (2011) Novel dataset for fine-grained image categorization: Stanford dogs. In: Proceedings of CVPR Workshop on Fine-grained Visual Categorization (FGVC), vol. 2"},{"key":"2708_CR39","doi-asserted-by":"crossref","unstructured":"Nilsback M-E, Zisserman A (2018) Automated flower classification over a large number of classes. In: Proceedings of Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722\u2013729","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"2708_CR40","doi-asserted-by":"publisher","first-page":"303","DOI":"10.1007\/s11263-009-0275-4","volume":"88","author":"M Everingham","year":"2010","unstructured":"Everingham M, Van Gool L, Williams CK, Winn J, Zisserman A (2010) The pascal visual object classes (voc) challenge. Int J Comput Vision 88:303\u2013338","journal-title":"Int J Comput Vision"},{"issue":"1","key":"2708_CR41","doi-asserted-by":"publisher","first-page":"85","DOI":"10.1007\/s00371-018-1588-5","volume":"36","author":"YB Huang","year":"2020","unstructured":"Huang YB, Qiu CY, Yuan K (2020) Surface defect saliency of magnetic tile. Visual Computer 36(1):85\u201396. https:\/\/doi.org\/10.1007\/s00371-018-1588-5","journal-title":"Visual Computer"},{"issue":"23","key":"2708_CR42","doi-asserted-by":"publisher","first-page":"7935","DOI":"10.1109\/JSEN.2017.2761858","volume":"17","author":"J Gan","year":"2017","unstructured":"Gan J, Li Q, Wang J, Yu H (2017) A hierarchical extractor-based visual rail surface inspection system. IEEE Sens J 17(23):7935\u20137944. https:\/\/doi.org\/10.1109\/JSEN.2017.2761858","journal-title":"IEEE Sens J"},{"key":"2708_CR43","doi-asserted-by":"publisher","DOI":"10.1016\/j.compind.2021.103459","author":"J Bozic","year":"2021","unstructured":"Bozic J, Tabernik D, Skocaj D (2021) Mixed supervision for surface-defect detection: From weakly to fully supervised learning. Comput Ind. https:\/\/doi.org\/10.1016\/j.compind.2021.103459","journal-title":"Comput Ind"},{"key":"2708_CR44","doi-asserted-by":"publisher","unstructured":"Xie Z, Zhang Z, Cao Y, Lin Y, Bao J, Yao Z, Dai Q, Hu H (2022) Simmim: a simple framework for masked image modeling. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9643\u20139653. https:\/\/doi.org\/10.1109\/CVPR52688.2022.00943","DOI":"10.1109\/CVPR52688.2022.00943"},{"key":"2708_CR45","doi-asserted-by":"publisher","unstructured":"Xiao TT, Liu YC, Zhou BL, Jiang YN, Sun J (2018) Unified perceptual parsing for scene understanding. In: Proceedings of European Conference on Computer Vision (ECCV), pp. 432\u2013448. https:\/\/doi.org\/10.1007\/978-3-030-01228-1_26","DOI":"10.1007\/978-3-030-01228-1_26"},{"key":"2708_CR46","doi-asserted-by":"publisher","unstructured":"Zhang RR, Zhang W, Fang RY, Gao P, Li KC, Dai JF, Qiao Y, Li HS (2022) Tip-adapter: Training-free adaption of clip for few-shot classification. In: 17th European Conference on Computer Vision (ECCV), pp. 493\u2013510 . https:\/\/doi.org\/10.1007\/978-3-031-19833-5_29","DOI":"10.1007\/978-3-031-19833-5_29"}],"container-title":["International Journal of Machine Learning and Cybernetics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-025-02708-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s13042-025-02708-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-025-02708-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,13]],"date-time":"2025-12-13T09:41:49Z","timestamp":1765618909000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s13042-025-02708-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,25]]},"references-count":46,"journal-issue":{"issue":"12","published-print":{"date-parts":[[2025,12]]}},"alternative-id":["2708"],"URL":"https:\/\/doi.org\/10.1007\/s13042-025-02708-8","relation":{},"ISSN":["1868-8071","1868-808X"],"issn-type":[{"type":"print","value":"1868-8071"},{"type":"electronic","value":"1868-808X"}],"subject":[],"published":{"date-parts":[[2025,6,25]]},"assertion":[{"value":"22 May 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"30 May 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 June 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}