{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,4]],"date-time":"2026-04-04T04:56:21Z","timestamp":1775278581964,"version":"3.50.1"},"reference-count":54,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,11,15]],"date-time":"2024-11-15T00:00:00Z","timestamp":1731628800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2024,11,15]],"date-time":"2024-11-15T00:00:00Z","timestamp":1731628800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/100022963","name":"Key Research and Development Program of Zhejiang Province","doi-asserted-by":"publisher","award":["No. 2021C03145"],"award-info":[{"award-number":["No. 2021C03145"]}],"id":[{"id":"10.13039\/100022963","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Complex Intell. Syst."],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1007\/s40747-024-01642-6","type":"journal-article","created":{"date-parts":[[2024,11,15]],"date-time":"2024-11-15T05:28:11Z","timestamp":1731648491000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Enhancing zero-shot relation extraction with a dual contrastive learning framework and a cross-attention module"],"prefix":"10.1007","volume":"11","author":[{"given":"Diyou","family":"Li","sequence":"first","affiliation":[]},{"given":"Lijuan","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Jie","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Neal","family":"Xiong","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6400-1884","authenticated-orcid":false,"given":"Lei","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Jian","family":"Wan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,15]]},"reference":[{"key":"1642_CR1","doi-asserted-by":"publisher","unstructured":"Carlson A, Betteridge J, Kisiel B, Settles B, Hruschka E, Mitchell T (2010) Toward an architecture for never-ending language learning 24:1306\u20131313. https:\/\/doi.org\/10.1609\/aaai.v24i1.7519","DOI":"10.1609\/aaai.v24i1.7519"},{"key":"1642_CR2","doi-asserted-by":"publisher","first-page":"134","DOI":"10.1007\/978-3-642-39844-5_16","volume-title":"Smart Health","author":"X Liu","year":"2013","unstructured":"Liu X, Chen H (2013) Azdrugminer: an information extraction system for mining patient-reported adverse drug events in online patient forums. In: Zeng D, Yang CC, Tseng VS, Xing C, Chen H, Wang F-Y, Zheng X (eds) Smart Health. Springer, Berlin, pp 134\u2013150"},{"key":"1642_CR3","doi-asserted-by":"publisher","first-page":"123","DOI":"10.1016\/j.isatra.2023.07.043","volume":"142","author":"R Wang","year":"2023","unstructured":"Wang R, Zhuang Z, Tao H, Paszke W, Stojanovic V (2023) Q-learning based fault estimation and fault tolerant iterative learning control for mimo systems. ISA Trans 142:123\u2013135. https:\/\/doi.org\/10.1016\/j.isatra.2023.07.043","journal-title":"ISA Trans"},{"key":"1642_CR4","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3264735","author":"D Sui","year":"2023","unstructured":"Sui D, Zeng X, Chen Y, Liu K, Zhao J (2023) Joint entity and relation extraction with set prediction networks. IEEE Trans Neural Netw Learn Syst. https:\/\/doi.org\/10.1109\/TNNLS.2023.3264735","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"1642_CR5","doi-asserted-by":"publisher","first-page":"14257","DOI":"10.1609\/aaai.v35i16.17677","volume":"35","author":"H Ye","year":"2021","unstructured":"Ye H, Zhang N, Deng S, Chen M, Tan C, Huang F, Chen H (2021) Contrastive triple extraction with generative transformer. Proc AAAI Conf Artif Intell 35:14257\u201314265. https:\/\/doi.org\/10.1609\/aaai.v35i16.17677","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"1642_CR6","doi-asserted-by":"publisher","unstructured":"Baldini\u00a0Soares L, FitzGerald N, Ling J, Kwiatkowski T (2019) Matching the blanks: Distributional similarity for relation learning. In: Korhonen A, Traum D, M\u00e0rquez L (eds) Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics. Association for Computational Linguistics, Florence, pp 2895\u20132905. https:\/\/doi.org\/10.18653\/v1\/P19-1279","DOI":"10.18653\/v1\/P19-1279"},{"key":"1642_CR7","doi-asserted-by":"publisher","unstructured":"Sun H, Grishman R (2022) Lexicalized dependency paths based supervised learning for relation extraction. Comput Syst Sci Eng 43(3). https:\/\/doi.org\/10.32604\/csse.2022.030759","DOI":"10.32604\/csse.2022.030759"},{"key":"1642_CR8","doi-asserted-by":"crossref","unstructured":"Mintz M, Bills S, Snow R, Jurafsky D (2009) Distant supervision for relation extraction without labeled data. In: Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP, pp 1003\u20131011. https:\/\/aclanthology.org\/P09-1113","DOI":"10.3115\/1690219.1690287"},{"key":"1642_CR9","unstructured":"Wang Z, Wen R, Chen X, Huang S.-L, Zhang N, Zheng Y (2022) Finding influential instances for distantly supervised relation extraction. In: Calzolari N, Huang C-R, Kim H, Pustejovsky J, Wanner L, Choi K-S, Ryu P-M, Chen H-H, Donatelli L, Ji H, Kurohashi S, Paggio P, Xue N, Kim S, Hahm Y, He Z, Lee TK, Santus E, Bond F, Na S-H (eds) Proceedings of the 29th International Conference on Computational Linguistics. International Committee on Computational Linguistics, Gyeongju, pp 2639\u20132650. https:\/\/aclanthology.org\/2022.coling-1.233"},{"key":"1642_CR10","doi-asserted-by":"publisher","unstructured":"Ye Z-X, Ling Z-H (2019) Distant supervision relation extraction with intra-bag and inter-bag attentions. In: Burstein J, Doran C, Solorio T (eds) Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). Association for Computational Linguistics, Minneapolis, pp 2810\u20132819. https:\/\/doi.org\/10.18653\/v1\/N19-1288","DOI":"10.18653\/v1\/N19-1288"},{"key":"1642_CR11","doi-asserted-by":"publisher","first-page":"62574","DOI":"10.1109\/ACCESS.2021.3073428","volume":"9","author":"D Christou","year":"2021","unstructured":"Christou D, Tsoumakas G (2021) Improving distantly-supervised relation extraction through bert-based label and instance embeddings. IEEE Access 9:62574\u201362582. https:\/\/doi.org\/10.1109\/ACCESS.2021.3073428","journal-title":"IEEE Access"},{"issue":"11","key":"1642_CR12","doi-asserted-by":"publisher","DOI":"10.1088\/1361-6501\/ac8368","volume":"33","author":"H Tao","year":"2022","unstructured":"Tao H, Cheng L, Qiu J, Stojanovic V (2022) Few shot cross equipment fault diagnosis method based on parameter optimization and feature mertic. Meas Sci Technol 33(11):115005. https:\/\/doi.org\/10.1088\/1361-6501\/ac8368","journal-title":"Meas Sci Technol"},{"issue":"3","key":"1642_CR13","doi-asserted-by":"publisher","first-page":"3179","DOI":"10.1609\/aaai.v36i3.20226","volume":"36","author":"T Yu","year":"2022","unstructured":"Yu T, He S, Song Y-Z, Xiang T (2022) Hybrid graph neural networks for few-shot learning. Proc AAAI Conf Artif Intell 36(3):3179\u20133187. https:\/\/doi.org\/10.1609\/aaai.v36i3.20226","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"1642_CR14","doi-asserted-by":"publisher","unstructured":"Levy O, Seo M, Choi E, Zettlemoyer L (2017) Zero-shot relation extraction via reading comprehension. In: Levy R, Specia L (eds) Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017). Association for Computational Linguistics, Vancouver, pp 333\u2013342. https:\/\/doi.org\/10.18653\/v1\/K17-1034","DOI":"10.18653\/v1\/K17-1034"},{"key":"1642_CR15","doi-asserted-by":"publisher","unstructured":"Obamuyide A, Vlachos A (2018) Zero-shot relation classification as textual entailment. In: Thorne J, Vlachos A, Cocarascu O, Christodoulopoulos C, Mittal A (eds) Proceedings of the First Workshop on Fact Extraction and VERification (FEVER). Association for Computational Linguistics, Brussels, pp 72\u201378. https:\/\/doi.org\/10.18653\/v1\/W18-5511","DOI":"10.18653\/v1\/W18-5511"},{"key":"1642_CR16","doi-asserted-by":"publisher","unstructured":"Chen C-Y, Li C-T (2021) Zs-bert: Towards zero-shot relation extraction with attribute representation learning. In: Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp 3470\u20133479. https:\/\/doi.org\/10.18653\/v1\/2021.naacl-main.272","DOI":"10.18653\/v1\/2021.naacl-main.272"},{"key":"1642_CR17","doi-asserted-by":"publisher","unstructured":"Wang S, Zhang B, Xu Y, Wu Y, Xiao B (2022) Rcl: Relation contrastive learning for zero-shot relation extraction. In: Findings of the Association for Computational Linguistics: NAACL 2022, pp 2456\u20132468. https:\/\/doi.org\/10.18653\/v1\/2022.findings-naacl.188","DOI":"10.18653\/v1\/2022.findings-naacl.188"},{"key":"1642_CR18","doi-asserted-by":"publisher","first-page":"193907","DOI":"10.1109\/ACCESS.2020.3031549","volume":"8","author":"PH Le-Khac","year":"2020","unstructured":"Le-Khac PH, Healy G, Smeaton AF (2020) Contrastive representation learning: A framework and review. IEEE Access 8:193907\u2013193934. https:\/\/doi.org\/10.1109\/ACCESS.2020.3031549","journal-title":"IEEE Access"},{"key":"1642_CR19","doi-asserted-by":"publisher","unstructured":"Yan Y, Li R, Wang S, Zhang F, Wu W, Xu W (2021) ConSERT: A contrastive framework for self-supervised sentence representation transfer. In: Zong C, Xia F, Li W, Navigli R (eds) Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers). Association for Computational Linguistics, pp 5065\u20135075. https:\/\/doi.org\/10.18653\/v1\/2021.acl-long.393","DOI":"10.18653\/v1\/2021.acl-long.393"},{"key":"1642_CR20","doi-asserted-by":"publisher","unstructured":"Gao T, Yao X, Chen D (2021) SimCSE: Simple contrastive learning of sentence embeddings. In: Moens M-F, Huang X, Specia L, Yih SW-T (eds) Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, Online and Punta Cana, pp 6894\u20136910. https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.552","DOI":"10.18653\/v1\/2021.emnlp-main.552"},{"key":"1642_CR21","doi-asserted-by":"publisher","unstructured":"Zhang D, Nan F, Wei X, Li S-W, Zhu H, McKeown K, Nallapati R, Arnold AO, Xiang B (2021) Supporting clustering with contrastive learning. In: Toutanova K, Rumshisky A, Zettlemoyer L, Hakkani-Tur D, Beltagy I, Bethard S, Cotterell R, Chakraborty T, Zhou Y (eds) Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Association for Computational Linguistics, pp 5419\u20135430. https:\/\/doi.org\/10.18653\/v1\/2021.naacl-main.427","DOI":"10.18653\/v1\/2021.naacl-main.427"},{"key":"1642_CR22","unstructured":"Bahdanau D, Cho K, Bengio Y (2014) Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473"},{"key":"1642_CR23","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser \u0141, Polosukhin I (2017) Attention is all you need. Adv Neural Inf Process Syst. 30"},{"key":"1642_CR24","doi-asserted-by":"publisher","first-page":"325","DOI":"10.1016\/j.neucom.2019.01.078","volume":"337","author":"G Liu","year":"2019","unstructured":"Liu G, Guo J (2019) Bidirectional lstm with attention mechanism and convolutional layer for text classification. Neurocomputing 337:325\u2013338. https:\/\/doi.org\/10.1016\/j.neucom.2019.01.078","journal-title":"Neurocomputing"},{"key":"1642_CR25","unstructured":"Hou R, Chang H, Ma B, Shan S, Chen X (2019) Cross attention network for few-shot classification. Adv Neural Inf Process Syst, 32"},{"key":"1642_CR26","unstructured":"Liu Y, Ott M, Goyal N, Du J, Joshi M, Chen D, Levy O, Lewis M, Zettlemoyer L, Stoyanov V (2019) Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692"},{"key":"1642_CR27","doi-asserted-by":"publisher","unstructured":"Ma W, Cui Y, Si C, Liu T, Wang S, Hu G (2020) CharBERT: Character-aware pre-trained language model. In: Scott D, Bel N, Zong C (eds) Proceedings of the 28th International Conference on Computational Linguistics. International Committee on Computational Linguistics, Barcelona, pp 39\u201350 (Online). https:\/\/doi.org\/10.18653\/v1\/2020.coling-main.4","DOI":"10.18653\/v1\/2020.coling-main.4"},{"key":"1642_CR28","doi-asserted-by":"publisher","unstructured":"Wu S, He Y (2019) Enriching pre-trained language model with entity information for relation classification. In: Proceedings of the 28th ACM International Conference on Information and Knowledge Management, pp 2361\u20132364. https:\/\/doi.org\/10.1145\/3357384.3358119","DOI":"10.1145\/3357384.3358119"},{"key":"1642_CR29","doi-asserted-by":"publisher","unstructured":"Liang X, Wu S, Li M, Li Z (2022) Modeling multi-granularity hierarchical features for relation extraction. In: Carpuat M, Marneffe M.-C, Meza\u00a0Ruiz IV (eds) Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Association for Computational Linguistics, Seattle, pp 5088\u20135098. https:\/\/doi.org\/10.18653\/v1\/2022.naacl-main.375","DOI":"10.18653\/v1\/2022.naacl-main.375"},{"key":"1642_CR30","doi-asserted-by":"publisher","first-page":"269","DOI":"10.1016\/j.ins.2021.10.047","volume":"584","author":"Y-M Shang","year":"2022","unstructured":"Shang Y-M, Huang H, Sun X, Wei W, Mao X-L (2022) A pattern-aware self-attention network for distant supervised relation extraction. Inf Sci 584:269\u2013279. https:\/\/doi.org\/10.1016\/j.ins.2021.10.047","journal-title":"Inf Sci"},{"key":"1642_CR31","doi-asserted-by":"publisher","unstructured":"Cetoli A (2020) Exploring the zero-shot limit of fewrel. In: Proceedings of the 28th International Conference on Computational Linguistics, pp 1447\u20131451. https:\/\/doi.org\/10.18653\/v1\/2020.coling-main.124","DOI":"10.18653\/v1\/2020.coling-main.124"},{"key":"1642_CR32","unstructured":"Bragg J, Cohan A, Lo K, Beltagy I (2021) Flex: Unifying evaluation for few-shot nlp. Adv Neural Inf Process Syst 34:15787\u201315800"},{"key":"1642_CR33","doi-asserted-by":"publisher","unstructured":"Najafi S, Fyshe A (2023) Weakly-supervised questions for zero-shot relation extraction. In: Vlachos A, Augenstein I (eds) Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics. Association for Computational Linguistics, Dubrovnik, pp 3075\u20133087. https:\/\/doi.org\/10.18653\/v1\/2023.eacl-main.224","DOI":"10.18653\/v1\/2023.eacl-main.224"},{"key":"1642_CR34","unstructured":"Socher R, Ganjoo M, Manning CD, Ng A (2013) Zero-shot learning through cross-modal transfer. Adv Neural Inf Process Syst, 26"},{"key":"1642_CR35","doi-asserted-by":"publisher","unstructured":"Sainz O, Lacalle O, Labaka G, Barrena A, Agirre E (2021) Label verbalization and entailment for effective zero and few-shot relation extraction. In: Moens M-F, Huang X, Specia L, Yih SW-T (eds) Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, Online and Punta Cana, pp 1199\u20131212. https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.92","DOI":"10.18653\/v1\/2021.emnlp-main.92"},{"key":"1642_CR36","doi-asserted-by":"publisher","unstructured":"Liu F, Lin H, Han X, Cao B, Sun L (2022) Pre-training to match for unified low-shot relation extraction. In: Muresan S, Nakov P, Villavicencio A (eds) Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, Dublin, pp 5785\u20135795. https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.397","DOI":"10.18653\/v1\/2022.acl-long.397"},{"key":"1642_CR37","doi-asserted-by":"publisher","unstructured":"Devlin J, Chang M-W, Lee K, Toutanova K (2019) Bert: Pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp 4171\u20134186. https:\/\/doi.org\/10.18653\/v1\/N19-1423","DOI":"10.18653\/v1\/N19-1423"},{"issue":"8","key":"1642_CR38","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford A, Wu J, Child R, Luan D, Amodei D, Sutskever I et al (2019) Language models are unsupervised multitask learners. OpenAI blog 1(8):9","journal-title":"OpenAI blog"},{"key":"1642_CR39","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown T, Mann B, Ryder N, Subbiah M, Kaplan JD, Dhariwal P, Neelakantan A, Shyam P, Sastry G, Askell A et al (2020) Language models are few-shot learners. Adv Neural Inf Process Syst 33:1877\u20131901","journal-title":"Adv Neural Inf Process Syst"},{"key":"1642_CR40","doi-asserted-by":"publisher","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 7132\u20137141. https:\/\/doi.org\/10.1109\/CVPR.2018.00745","DOI":"10.1109\/CVPR.2018.00745"},{"key":"1642_CR41","doi-asserted-by":"publisher","unstructured":"Woo S, Park J, Lee J-Y, Kweon IS (2018) Cbam: Convolutional block attention module. In: Proceedings of the European Conference on Computer Vision (ECCV), pp 3\u201319. https:\/\/doi.org\/10.1007\/978-3-030-01234-2_1","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"1642_CR42","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A, Weissenborn D, Zhai X, Unterthiner T, Dehghani M, Minderer M, Heigold G, Gelly S, et al (2020) An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929"},{"key":"1642_CR43","unstructured":"Liu Y, Shao Z, Hoffmann N (2021) Global attention mechanism: Retain information to enhance channel-spatial interactions. arXiv preprint arXiv:2112.05561"},{"key":"1642_CR44","unstructured":"Chen Q, Zhang R, Zheng Y, Mao Y (2022) Dual contrastive learning: text classification via label-aware data augmentation. arXiv preprint arXiv:2201.08702"},{"key":"1642_CR45","doi-asserted-by":"publisher","unstructured":"Reimers N, Gurevych I (2019) Sentence-bert: Sentence embeddings using siamese bert-networks. In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp 3982\u20133992. https:\/\/doi.org\/10.18653\/v1\/D19-1410","DOI":"10.18653\/v1\/D19-1410"},{"key":"1642_CR46","doi-asserted-by":"publisher","unstructured":"Sennrich R, Haddow B, Birch A (2016) Neural machine translation of rare words with subword units. In: Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp 1715\u20131725. https:\/\/doi.org\/10.18653\/v1\/P16-1162","DOI":"10.18653\/v1\/P16-1162"},{"key":"1642_CR47","doi-asserted-by":"publisher","unstructured":"Han X, Zhu H, Yu P, Wang Z, Yao Y, Liu Z, Sun M (2018) Fewrel: A large-scale supervised few-shot relation classification dataset with state-of-the-art evaluation. In: Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp 4803\u20134809. https:\/\/doi.org\/10.18653\/v1\/D18-1514","DOI":"10.18653\/v1\/D18-1514"},{"key":"1642_CR48","doi-asserted-by":"publisher","unstructured":"Sorokin D, Gurevych I (2017) Context-aware representations for knowledge base relation extraction. In: Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pp 1784\u20131789. https:\/\/doi.org\/10.18653\/v1\/D17-1188","DOI":"10.18653\/v1\/D17-1188"},{"key":"1642_CR49","unstructured":"Kingma DP, Ba J (2014) Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980"},{"key":"1642_CR50","doi-asserted-by":"publisher","unstructured":"Chen Q, Zhu X, Ling Z.-H, Wei S, Jiang H, Inkpen D (2017) Enhanced LSTM for natural language inference. In: Barzilay R, Kan M-Y (eds) Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, Vancouver, pp 1657\u20131668. https:\/\/doi.org\/10.18653\/v1\/P17-1152","DOI":"10.18653\/v1\/P17-1152"},{"key":"1642_CR51","unstructured":"Rockt\u00e4schel T, Grefenstette E, Hermann KM, Ko\u010disk\u1ef3 T, Blunsom P (2015) Reasoning about entailment with neural attention. arXiv preprint arXiv:1509.06664"},{"issue":"8","key":"1642_CR52","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter S, Schmidhuber J (1997) Long short-term memory. Neural Comput 9(8):1735\u20131780. https:\/\/doi.org\/10.1162\/neco.1997.9.8.1735","journal-title":"Neural Comput"},{"issue":"5\u20136","key":"1642_CR53","doi-asserted-by":"publisher","first-page":"602","DOI":"10.1016\/j.neunet.2005.06.042","volume":"18","author":"A Graves","year":"2005","unstructured":"Graves A, Schmidhuber J (2005) Framewise phoneme classification with bidirectional lstm and other neural network architectures. Neural Netw 18(5\u20136):602\u2013610. https:\/\/doi.org\/10.1016\/j.neunet.2005.06.042","journal-title":"Neural Netw"},{"key":"1642_CR54","unstructured":"Maaten L, Hinton G (2008) Visualizing data using t-sne. J Mach Learn Res 9(11)"}],"container-title":["Complex &amp; Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01642-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s40747-024-01642-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01642-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,30]],"date-time":"2025-01-30T20:18:32Z","timestamp":1738268312000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s40747-024-01642-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,15]]},"references-count":54,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2025,1]]}},"alternative-id":["1642"],"URL":"https:\/\/doi.org\/10.1007\/s40747-024-01642-6","relation":{},"ISSN":["2199-4536","2198-6053"],"issn-type":[{"value":"2199-4536","type":"print"},{"value":"2198-6053","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,15]]},"assertion":[{"value":"25 October 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 August 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 November 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"All authors disclosed no relevant relationships","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Not applicable","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to participate"}},{"value":"Not applicable","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"Not applicable","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval"}}],"article-number":"42"}}