{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T16:08:45Z","timestamp":1774541325132,"version":"3.50.1"},"reference-count":59,"publisher":"Springer Science and Business Media LLC","issue":"22","license":[{"start":{"date-parts":[[2023,9,5]],"date-time":"2023-09-05T00:00:00Z","timestamp":1693872000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,9,5]],"date-time":"2023-09-05T00:00:00Z","timestamp":1693872000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2023,11]]},"DOI":"10.1007\/s10489-023-04890-0","type":"journal-article","created":{"date-parts":[[2023,9,5]],"date-time":"2023-09-05T06:01:51Z","timestamp":1693893711000},"page":"27191-27206","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":8,"title":["Domain consensual contrastive learning for few-shot universal domain adaptation"],"prefix":"10.1007","volume":"53","author":[{"given":"Haojin","family":"Liao","sequence":"first","affiliation":[]},{"given":"Qiang","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Sicheng","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Tengfei","family":"Xing","sequence":"additional","affiliation":[]},{"given":"Runbo","family":"Hu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,5]]},"reference":[{"key":"4890_CR1","doi-asserted-by":"crossref","unstructured":"Alipour N, Tahmoresnezhad J (2022) Heterogeneous domain adaptation with statistical distribution alignment and progressive pseudo label selection. Appl Intell 52:1\u201318","DOI":"10.1007\/s10489-021-02756-x"},{"issue":"2","key":"4890_CR2","doi-asserted-by":"publisher","first-page":"539","DOI":"10.1109\/TNNLS.2020.3028078","volume":"33","author":"J Chen","year":"2020","unstructured":"Chen J, Wu X, Duan L, Gao S (2020) Domain adversarial reinforcement learning for partial domain adaptation. IEEE Trans Neural Netw Learn Syst 33(2):539\u2013553","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"4890_CR3","unstructured":"Chen T, Kornblith S, Norouzi M, Hinton G (2020) A simple framework for contrastive learning of visual representations. In: International Conference on Machine Learning. pp 1597\u20131607"},{"key":"4890_CR4","doi-asserted-by":"publisher","first-page":"199","DOI":"10.1109\/TIP.2019.2928630","volume":"29","author":"Y Chen","year":"2019","unstructured":"Chen Y, Song S, Li S, Wu C (2019) A graph embedding framework for maximum mean discrepancy-based domain adaptation algorithms. IEEE Trans Image Process 29:199\u2013213","journal-title":"IEEE Trans Image Process"},{"key":"4890_CR5","doi-asserted-by":"publisher","first-page":"6891","DOI":"10.1007\/s00521-020-05465-7","volume":"33","author":"Z Cheng","year":"2021","unstructured":"Cheng Z, Chen C, Chen Z, Fang K, Jin X (2021) Robust and high-order correlation alignment for unsupervised domain adaptation. Neural Comput Appl 33:6891\u20136903","journal-title":"Neural Comput Appl"},{"key":"4890_CR6","doi-asserted-by":"crossref","unstructured":"Deng J, Dong W, Socher R, Li L-J, Li K, Fei-Fei L (2009) Imagenet: A large-scale hierarchical image database. In: IEEE Conference on Computer Vision and Pattern Recognition. pp\u00a0248\u2013255","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"4890_CR7","doi-asserted-by":"crossref","unstructured":"Ebrahimi M, Chai Y, Zhang H\u00a0H, Chen H (2022) Heterogeneous domain adaptation with adversarial neural representation learning: Experiments on e-commerce and cybersecurity. IEEE Trans Pattern Anal Mach Intell 45:1862\u20131875","DOI":"10.1109\/TPAMI.2022.3163338"},{"issue":"10","key":"4890_CR8","doi-asserted-by":"publisher","first-page":"4309","DOI":"10.1109\/TNNLS.2020.3017213","volume":"32","author":"Z Fang","year":"2021","unstructured":"Fang Z, Lu J, Liu F, Xuan J, Zhang G (2021) Open set domain adaptation: Theoretical bound and algorithm. IEEE Trans Neural Netw Learn Syst 32(10):4309\u20134322","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"4890_CR9","doi-asserted-by":"publisher","first-page":"2898","DOI":"10.1109\/TIP.2021.3056212","volume":"30","author":"H Feng","year":"2021","unstructured":"Feng H, Chen M, Hu J, Shen D, Liu H, Cai D (2021) Complementary pseudo labels for unsupervised domain adaptation on person re-identification. IEEE Trans Image Process 30:2898\u20132907","journal-title":"IEEE Trans Image Process"},{"key":"4890_CR10","doi-asserted-by":"crossref","unstructured":"Fu B, Cao Z, Long M, Wang J (2020) Learning to detect open classes for universal domain adaptation. In: European Conference on Computer Vision. pp 567\u2013583","DOI":"10.1007\/978-3-030-58555-6_34"},{"key":"4890_CR11","doi-asserted-by":"crossref","unstructured":"He K, Chen X, Xie S, Li Y, Doll\u00e1r P, Girshick R (2022) Masked autoencoders are scalable vision learners. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp\u00a016000\u201316009","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"4890_CR12","doi-asserted-by":"crossref","unstructured":"He K, Fan H, Wu Y, Xie S, Girshick R (2020) Momentum contrast for unsupervised visual representation learning. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp\u00a09729\u20139738","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"4890_CR13","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: IEEE Conference on Computer Vision and Pattern Recognition. pp\u00a0770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"4890_CR14","doi-asserted-by":"crossref","unstructured":"He Q-Q, Siu SWI, Si Y-W (2022) Attentive recurrent adversarial domain adaptation with top-k pseudo-labeling for time series classification. Appl Intell 53:1\u201320","DOI":"10.1007\/s10489-022-04176-x"},{"key":"4890_CR15","doi-asserted-by":"publisher","first-page":"11255","DOI":"10.1007\/s11042-020-10193-0","volume":"80","author":"J Huang","year":"2021","unstructured":"Huang J, Zhang P, Zhou Z, Fan K (2021) Domain compensatory adversarial networks for partial domain adaptation. Multimed Tools Appl 80:11255\u201311272","journal-title":"Multimed Tools Appl"},{"issue":"3","key":"4890_CR16","doi-asserted-by":"publisher","first-page":"766","DOI":"10.1109\/TPAMI.2019.2945942","volume":"43","author":"WM Kouw","year":"2021","unstructured":"Kouw WM, Loog M (2021) A review of domain adaptation without target labels. IEEE Trans Pattern Anal Mach Intell 43(3):766\u2013785","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"7","key":"4890_CR17","first-page":"3909","volume":"44","author":"M Kutbi","year":"2021","unstructured":"Kutbi M, Peng K-C, Wu Z (2021) Zero-shot deep domain adaptation with common representation learning. IEEE Trans Pattern Anal Mach Intell 44(7):3909\u20133924","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"4890_CR18","doi-asserted-by":"crossref","unstructured":"Li G, Kang G, Zhu Y, Wei Y, Yang Y (2021) Domain consensus clustering for universal domain adaptation. In IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp\u00a09757\u20139766","DOI":"10.1109\/CVPR46437.2021.00963"},{"key":"4890_CR19","doi-asserted-by":"publisher","first-page":"267","DOI":"10.1007\/s11263-020-01364-5","volume":"129","author":"H Li","year":"2021","unstructured":"Li H, Wan R, Wang S, Kot AC (2021) Unsupervised domain adaptation in the wild via disentangling representation learning. Int J Comput Vis 129:267\u2013283","journal-title":"Int J Comput Vis"},{"issue":"7","key":"4890_CR20","doi-asserted-by":"publisher","first-page":"2329","DOI":"10.1109\/TPAMI.2020.2964173","volume":"43","author":"S Li","year":"2020","unstructured":"Li S, Liu CH, Lin Q, Wen Q, Su L, Huang G, Ding Z (2020) Deep residual correction network for partial domain adaptation. IEEE Trans Pattern Anal Mach Intell 43(7):2329\u20132344","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"4890_CR21","unstructured":"Paszke A, Gross S, Massa F, Lerer A, Bradbury J, Chanan G, Killeen T, Lin Z, Gimelshein N, Antiga L, et\u00a0al (2019) Pytorch: An imperative style, high-performance deep learning library. In: Advances in Neural Information Processing Systems. pp 8024\u20138035"},{"key":"4890_CR22","doi-asserted-by":"crossref","unstructured":"Peng X, Bai Q, Xia X, Huang Z, Saenko K, Wang B (2019) Moment matching for multi-source domain adaptation. In: IEEE International Conference on Computer Vision. pp\u00a01406\u20131415","DOI":"10.1109\/ICCV.2019.00149"},{"key":"4890_CR23","doi-asserted-by":"crossref","unstructured":"Peng X, Usman B, Kaushik N, Wang D, Hoffman J, Saenko K (2018) Visda: A synthetic-to-real benchmark for visual domain adaptation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops. pp\u00a02021\u20132026","DOI":"10.1109\/CVPRW.2018.00271"},{"key":"4890_CR24","doi-asserted-by":"crossref","unstructured":"Qin Z, Yang L, Gao F, Hu Q, Shen C (2022) Uncertainty-aware aggregation for federated open set domain adaptation. IEEE Trans Neural Netw Learn Syst","DOI":"10.1109\/TNNLS.2022.3214930"},{"key":"4890_CR25","doi-asserted-by":"publisher","first-page":"107124","DOI":"10.1016\/j.patcog.2019.107124","volume":"100","author":"MM Rahman","year":"2020","unstructured":"Rahman MM, Fookes C, Baktashmotlagh M, Sridharan S (2020) Correlation-aware adversarial domain adaptation and generalization. Pattern Recognit 100:107124","journal-title":"Pattern Recognit"},{"issue":"5","key":"4890_CR26","doi-asserted-by":"publisher","first-page":"1989","DOI":"10.1109\/TNNLS.2020.2995648","volume":"32","author":"C-X Ren","year":"2020","unstructured":"Ren C-X, Ge P, Yang P, Yan S (2020) Learning target-domain-specific classifier for partial domain adaptation. IEEE Trans Neural Netw Learn Syst 32(5):1989\u20132001","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"4890_CR27","doi-asserted-by":"crossref","unstructured":"Ren Y, Cong Y, Dong J, Sun G (2022) Uni3da: Universal 3d domain adaptation for object recognition. IEEE Trans Circ Syst Video Technol 33:379\u2013392","DOI":"10.1109\/TCSVT.2022.3202213"},{"key":"4890_CR28","doi-asserted-by":"crossref","unstructured":"Saenko K, Kulis B, Fritz M, Darrell T (2010) Adapting visual category models to new domains. In: European Conference on Computer Vision. pp\u00a0213\u2013226","DOI":"10.1007\/978-3-642-15561-1_16"},{"key":"4890_CR29","unstructured":"Saito K, Kim D, Sclaroff S, Saenko K (2020) Universal domain adaptation through self supervision. In: Advances in Neural Information Processing Systems. pp\u00a016282\u201316292"},{"key":"4890_CR30","doi-asserted-by":"crossref","unstructured":"Saito K, Saenko K (2021) Ovanet: One-vs-all network for universal domain adaptation. In: IEEE International Conference on Computer Vision. pp\u00a09000\u20139009","DOI":"10.1109\/ICCV48922.2021.00887"},{"key":"4890_CR31","doi-asserted-by":"publisher","first-page":"2732","DOI":"10.1109\/TMM.2020.3016126","volume":"23","author":"T Shermin","year":"2020","unstructured":"Shermin T, Lu G, Teng SW, Murshed M, Sohel F (2020) Adversarial network with multiple classifiers for open set domain adaptation. IEEE Trans Multimedia 23:2732\u20132744","journal-title":"IEEE Trans Multimedia"},{"issue":"6","key":"4890_CR32","doi-asserted-by":"publisher","first-page":"3798","DOI":"10.1109\/TCSVT.2021.3116210","volume":"32","author":"Y Tian","year":"2021","unstructured":"Tian Y, Zhu S (2021) Partial domain adaptation on semantic segmentation. IEEE Trans Circ Syst Video Technol 32(6):3798\u20133809","journal-title":"IEEE Trans Circ Syst Video Technol"},{"issue":"11","key":"4890_CR33","first-page":"2579","volume":"9","author":"L Van der Maaten","year":"2008","unstructured":"Van der Maaten L, Hinton G (2008) Visualizing data using t-sne. J Mach Learn Res 9(11):2579\u20132605","journal-title":"J Mach Learn Res"},{"key":"4890_CR34","doi-asserted-by":"crossref","unstructured":"Venkateswara H, Eusebio J, Chakraborty S, Panchanathan S (2017) Deep hashing network for unsupervised domain adaptation. In: IEEE Conference on Computer Vision and Pattern Recognition. pp\u00a05018\u20135027","DOI":"10.1109\/CVPR.2017.572"},{"key":"4890_CR35","doi-asserted-by":"crossref","unstructured":"Wang W, Li H, Ding Z, Nie F, Chen J, Dong X, Wang Z (2021) Rethinking maximum mean discrepancy for visual domain adaptation. IEEE Trans Neural Netw Learn Syst 34:264\u2013277","DOI":"10.1109\/TNNLS.2021.3093468"},{"key":"4890_CR36","doi-asserted-by":"publisher","first-page":"72","DOI":"10.1109\/TIP.2022.3226405","volume":"32","author":"W Wang","year":"2022","unstructured":"Wang W, Shen Z, Li D, Zhong P, Chen Y (2022) Probability-based graph embedding cross-domain and class discriminative feature learning for domain adaptation. IEEE Trans Image Process 32:72\u201387","journal-title":"IEEE Trans Image Process"},{"issue":"73","key":"4890_CR37","first-page":"1","volume":"23","author":"G Wynne","year":"2022","unstructured":"Wynne G, Duncan AB (2022) A kernel two-sample test for functional data. J Mach Learn Res 23(73):1\u201351","journal-title":"J Mach Learn Res"},{"key":"4890_CR38","first-page":"1","volume":"61","author":"Q Xu","year":"2023","unstructured":"Xu Q, Shi Y, Yuan X, Zhu XX (2023) Universal domain adaptation for remote sensing image scene classification. IEEE Trans Geosci Remote Sens 61:1\u201315","journal-title":"IEEE Trans Geosci Remote Sens"},{"key":"4890_CR39","doi-asserted-by":"crossref","unstructured":"Xu Y, Cao H, Mao K, Chen Z, Xie L, Yang J (2022) Aligning correlation information for domain adaptation in action recognition. IEEE Trans Neural Netw Learn Syst","DOI":"10.1109\/TNNLS.2022.3212909"},{"issue":"9","key":"4890_CR40","doi-asserted-by":"publisher","first-page":"2420","DOI":"10.1109\/TMM.2019.2953375","volume":"22","author":"H Yan","year":"2019","unstructured":"Yan H, Li Z, Wang Q, Li P, Xu Y, Zuo W (2019) Weighted and class-specific maximum mean discrepancy for unsupervised domain adaptation. IEEE Trans Multimedia 22(9):2420\u20132433","journal-title":"IEEE Trans Multimedia"},{"key":"4890_CR41","doi-asserted-by":"crossref","unstructured":"Ye Y, Fu S, Chen J (2023) Learning cross-domain representations by vision transformer for unsupervised domain adaptation. Neural Comput Appl 35:1\u201314","DOI":"10.1007\/s00521-023-08269-7"},{"key":"4890_CR42","doi-asserted-by":"publisher","first-page":"108238","DOI":"10.1016\/j.patcog.2021.108238","volume":"121","author":"Y Yin","year":"2022","unstructured":"Yin Y, Yang Z, Hu H, Wu X (2022) Universal multi-source domain adaptation for image classification. Pattern Recogn 121:108238","journal-title":"Pattern Recogn"},{"key":"4890_CR43","doi-asserted-by":"crossref","unstructured":"You K, Long M, Cao Z, Wang J, Jordan MI (2019) Universal domain adaptation. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp\u00a02720\u20132729","DOI":"10.1109\/CVPR.2019.00283"},{"key":"4890_CR44","doi-asserted-by":"crossref","unstructured":"Yue X, Zheng Z, Zhang S, Gao Y, Darrell T, Keutzer K, Vincentelli A\u00a0S (2021) Prototypical cross-domain self-supervised learning for few-shot unsupervised domain adaptation. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp\u00a013834\u201313844","DOI":"10.1109\/CVPR46437.2021.01362"},{"key":"4890_CR45","first-page":"1","volume":"19","author":"S Zhang","year":"2022","unstructured":"Zhang S, Chen Z, Wang D, Wang ZJ (2022) Cross-domain few-shot contrastive learning for hyperspectral images classification. IEEE Geosci Remote Sens Lett 19:1\u20135","journal-title":"IEEE Geosci Remote Sens Lett"},{"issue":"11","key":"4890_CR46","doi-asserted-by":"publisher","first-page":"7445","DOI":"10.1109\/TII.2021.3054651","volume":"17","author":"W Zhang","year":"2021","unstructured":"Zhang W, Li X, Ma H, Luo Z, Li X (2021) Open-set domain adaptation in machinery fault diagnostics using instance-level weighted adversarial learning. IEEE Trans Ind Inform 17(11):7445\u20137455","journal-title":"IEEE Trans Ind Inform"},{"issue":"8","key":"4890_CR47","doi-asserted-by":"publisher","first-page":"2399","DOI":"10.1007\/s11263-021-01479-3","volume":"129","author":"S Zhao","year":"2021","unstructured":"Zhao S, Li B, Xu P, Yue X, Ding G, Keutzer K (2021) Madan: multi-source adversarial domain aggregation network for domain adaptation. Int J Comput Vis 129(8):2399\u20132424","journal-title":"Int J Comput Vis"},{"issue":"2","key":"4890_CR48","doi-asserted-by":"publisher","first-page":"473","DOI":"10.1109\/TNNLS.2020.3028503","volume":"33","author":"S Zhao","year":"2022","unstructured":"Zhao S, Yue X, Zhang S, Li B, Zhao H, Wu B, Krishna R, Gonzalez JE, Sangiovanni-Vincentelli AL, Seshia SA et al (2022) A review of single-source deep unsupervised visual domain adaptation. IEEE Trans Neural Netw Learn Syst 33(2):473\u2013493","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"issue":"7","key":"4890_CR49","doi-asserted-by":"publisher","first-page":"7862","DOI":"10.1007\/s10489-022-03805-9","volume":"53","author":"X Zhao","year":"2023","unstructured":"Zhao X, Wang S, Sun Q (2023) Open-set domain adaptation by deconfounding domain gaps. Appl Intell 53(7):7862\u20137875","journal-title":"Appl Intell"},{"issue":"5","key":"4890_CR50","doi-asserted-by":"publisher","first-page":"2605","DOI":"10.1109\/TCBB.2021.3066331","volume":"19","author":"J Zhou","year":"2021","unstructured":"Zhou J, Jing B, Wang Z, Xin H, Tong H (2021) Soda: Detecting covid-19 in chest x-rays with semi-supervised open set domain adaptation. IEEE\/ACM Trans Comput Biol Bioinforma 19(5):2605\u20132612","journal-title":"IEEE\/ACM Trans Comput Biol Bioinforma"},{"key":"4890_CR51","doi-asserted-by":"publisher","first-page":"5689","DOI":"10.1109\/JSTARS.2022.3190699","volume":"15","author":"Y Zhu","year":"2022","unstructured":"Zhu Y, Sun X, Diao W, Li H, Fu K (2022) Rfa-net: Reconstructed feature alignment network for domain adaptation object detection in remote sensing imagery. IEEE J Sel Top Appl Earth Obs Remote Sens 15:5689\u20135703","journal-title":"IEEE J Sel Top Appl Earth Obs Remote Sens"},{"issue":"5","key":"4890_CR52","doi-asserted-by":"publisher","first-page":"175334","DOI":"10.1007\/s11704-022-1349-5","volume":"17","author":"Y Zhu","year":"2023","unstructured":"Zhu Y, Wu X, Qiang J, Yuan Y, Li Y (2023) Representation learning via an integrated autoencoder for unsupervised domain adaptation. Front Comput Sci 17(5):175334","journal-title":"Front Comput Sci"},{"key":"4890_CR53","doi-asserted-by":"crossref","unstructured":"Caputo B, M\u00fcller H, Martinez-Gomez J, Villegas M, Acar B, Patricia N, Marvasti N, \u00dcsk\u00fcdarl\u0131 S, Paredes R, Cazorla M, et\u00a0al (2014) Imageclef 2014: Overview and analysis of the results. In: Information Access Evaluation. Multilinguality, Multimodality, and Interaction. pp\u00a0192\u2013211","DOI":"10.1007\/978-3-319-11382-1_18"},{"key":"4890_CR54","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A, Weissenborn D, Zhai X, Unterthiner T, Dehghani M, Minderer M, Heigold G, Gelly S, Uszkoreit J, Houlsby N (2021) An image is worth 16x16 words: Transformers for image recognition at scale. In: International Conference on Learning Representations"},{"key":"4890_CR55","unstructured":"Howard AG, Zhu M, Chen B, Kalenichenko D, Wang W, Weyand T, Andreetto M, Adam H (2017) Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861"},{"key":"4890_CR56","doi-asserted-by":"crossref","unstructured":"Liu Z, Lin Y, Cao Y, Hu H, Wei Y, Zhang Z, Lin S, Guo B (2021) Swin transformer: Hierarchical vision transformer using shifted windows. In: IEEE International Conference on Computer Vision. pp\u00a010012\u201310022","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"4890_CR57","doi-asserted-by":"crossref","unstructured":"Liu Z, Mao H, Wu C-Y, Feichtenhofer C, Darrell T, Xie S (2022) A convnet for the 2020s. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp\u00a011976\u201311986","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"4890_CR58","unstructured":"Tan M, Le Q (2019) Efficientnet: Rethinking model scaling for convolutional neural networks. In International Conference on Machine Learning. pp.\u00a06105\u20136114"},{"key":"4890_CR59","doi-asserted-by":"crossref","unstructured":"Xie S, Girshick R, Doll\u00e1r P, Tu Z, He K (2017) Aggregated residual transformations for deep neural networks. In: IEEE Conference on Computer Vision and Pattern Recognition, pp\u00a01492\u20131500","DOI":"10.1109\/CVPR.2017.634"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-023-04890-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-023-04890-0\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-023-04890-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,25]],"date-time":"2023-10-25T23:14:29Z","timestamp":1698275669000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-023-04890-0"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,9,5]]},"references-count":59,"journal-issue":{"issue":"22","published-print":{"date-parts":[[2023,11]]}},"alternative-id":["4890"],"URL":"https:\/\/doi.org\/10.1007\/s10489-023-04890-0","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,9,5]]},"assertion":[{"value":"14 July 2023","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 September 2023","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no competing interests that are relevant to the content of this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of interest"}}]}}