{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,12]],"date-time":"2025-11-12T14:21:32Z","timestamp":1762957292213,"version":"3.40.3"},"publisher-location":"Cham","reference-count":40,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031466700"},{"type":"electronic","value":"9783031466717"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-46671-7_2","type":"book-chapter","created":{"date-parts":[[2023,11,4]],"date-time":"2023-11-04T11:02:26Z","timestamp":1699095746000},"page":"18-32","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Soft Prompt Transfer for\u00a0Zero-Shot and\u00a0Few-Shot Learning in\u00a0EHR Understanding"],"prefix":"10.1007","author":[{"given":"Yang","family":"Wang","sequence":"first","affiliation":[]},{"given":"Xueping","family":"Peng","sequence":"additional","affiliation":[]},{"given":"Tao","family":"Shen","sequence":"additional","affiliation":[]},{"given":"Allison","family":"Clarke","sequence":"additional","affiliation":[]},{"given":"Clement","family":"Schlegel","sequence":"additional","affiliation":[]},{"given":"Paul","family":"Martin","sequence":"additional","affiliation":[]},{"given":"Guodong","family":"Long","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,11,5]]},"reference":[{"key":"2_CR1","unstructured":"Aribandi, V., et al.: Ext5: towards extreme multi-task scaling for transfer learning. arXiv preprint arXiv:2111.10952 (2021)"},{"key":"2_CR2","doi-asserted-by":"crossref","unstructured":"Beltagy, I., Lo, K., Cohan, A.: SciBERT: a pretrained language model for scientific text. arXiv preprint arXiv:1903.10676 (2019)","DOI":"10.18653\/v1\/D19-1371"},{"key":"2_CR3","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown, T., et al.: Language models are few-shot learners. Adv. Neural. Inf. Process. Syst. 33, 1877\u20131901 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"2_CR4","first-page":"22243","volume":"33","author":"T Chen","year":"2020","unstructured":"Chen, T., Kornblith, S., Swersky, K., Norouzi, M., Hinton, G.E.: Big self-supervised models are strong semi-supervised learners. NeurIPS 33, 22243\u201322255 (2020)","journal-title":"NeurIPS"},{"key":"2_CR5","doi-asserted-by":"crossref","unstructured":"Choi, E., Xu, Z., Li, Y., Dusenberry, M., Flores, G., Xue, E., Dai, A.: Learning the graphical structure of electronic health records with graph convolutional transformer. In: Proceedings of the AAAI. vol. 34, pp. 606\u2013613 (2020)","DOI":"10.1609\/aaai.v34i01.5400"},{"key":"2_CR6","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"2_CR7","unstructured":"Devlin, J., Chang, M., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: NAACL-HLT, pp. 4171\u20134186. ACL (2019)"},{"key":"2_CR8","doi-asserted-by":"crossref","unstructured":"Gu, Y., Han, X., Liu, Z., Huang, M.: Ppt: Pre-trained prompt tuning for few-shot learning. arXiv preprint arXiv:2109.04332 (2021)","DOI":"10.18653\/v1\/2022.acl-long.576"},{"key":"2_CR9","doi-asserted-by":"crossref","unstructured":"Gururangan, S., et al.: Don\u2019t stop pretraining: adapt language models to domains and tasks. arXiv preprint arXiv:2004.10964 (2020)","DOI":"10.18653\/v1\/2020.acl-main.740"},{"key":"2_CR10","doi-asserted-by":"publisher","first-page":"225","DOI":"10.1016\/j.aiopen.2021.08.002","volume":"2","author":"X Han","year":"2021","unstructured":"Han, X., et al.: Pre-trained models: past, present and future. AI Open 2, 225\u2013250 (2021)","journal-title":"AI Open"},{"key":"2_CR11","doi-asserted-by":"publisher","first-page":"423","DOI":"10.1162\/tacl_a_00324","volume":"8","author":"Z Jiang","year":"2020","unstructured":"Jiang, Z., Xu, F.F., Araki, J., Neubig, G.: How can we know what language models know? Trans. Assoc. Comput. Linguist. 8, 423\u2013438 (2020)","journal-title":"Trans. Assoc. Comput. Linguist."},{"key":"2_CR12","doi-asserted-by":"publisher","DOI":"10.1038\/sdata.2016.35","volume":"3","author":"AE Johnson","year":"2016","unstructured":"Johnson, A.E., et al.: Mimic-iii, a freely accessible critical care database. Sci. Data 3, 160035 (2016)","journal-title":"Sci. Data"},{"key":"2_CR13","unstructured":"Lan, Z., Chen, M., Goodman, S., Gimpel, K., Sharma, P., Soricut, R.: Albert: a lite BERT for self-supervised learning of language representations. arXiv preprint arXiv:1909.11942 (2019)"},{"key":"2_CR14","doi-asserted-by":"crossref","unstructured":"Lester, B., Al-Rfou, R., Constant, N.: The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691 (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"2_CR15","doi-asserted-by":"crossref","unstructured":"Li, X.L., Liang, P.: Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190 (2021)","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"2_CR16","unstructured":"Li, X.L., Liang, P.: Prefix-tuning: Optimizing continuous prompts for generation. In: Proceedings of ACL\/IJCNLP 2021, (Volume 1: Long Papers), Virtual Event, August 1\u20136, 2021. pp. 4582\u20134597. ACL (2021)"},{"issue":"1","key":"2_CR17","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1038\/s41598-019-56847-4","volume":"10","author":"Y Li","year":"2020","unstructured":"Li, Y., et al.: BEHRT: transformer for electronic health records. Sci. Rep. 10(1), 1\u201312 (2020)","journal-title":"Sci. Rep."},{"issue":"9","key":"2_CR18","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3560815","volume":"55","author":"P Liu","year":"2023","unstructured":"Liu, P., Yuan, W., Fu, J., Jiang, Z., Hayashi, H., Neubig, G.: Pre-train, prompt, and predict: a systematic survey of prompting methods in natural language processing. ACM Comput. Surv. 55(9), 1\u201335 (2023)","journal-title":"ACM Comput. Surv."},{"key":"2_CR19","doi-asserted-by":"crossref","unstructured":"Liu, X., Ji, K., Fu, Y., Du, Z., Yang, Z., Tang, J.: P-tuning v2: prompt tuning can be comparable to fine-tuning universally across scales and tasks. CoRR abs\/2110.07602 (2021)","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"2_CR20","doi-asserted-by":"crossref","unstructured":"Liu, X., et al.: P-tuning v2: prompt tuning can be comparable to fine-tuning universally across scales and tasks. arXiv preprint arXiv:2110.07602 (2021)","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"2_CR21","unstructured":"OpenAI: Gpt-4 technical report (2023)"},{"issue":"10","key":"2_CR22","doi-asserted-by":"publisher","first-page":"1345","DOI":"10.1109\/TKDE.2009.191","volume":"22","author":"SJ Pan","year":"2010","unstructured":"Pan, S.J., Yang, Q.: A survey on transfer learning. IEEE Trans. Knowl. Data Eng. 22(10), 1345\u20131359 (2010)","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"2_CR23","doi-asserted-by":"crossref","unstructured":"Peng, X., Long, G., Shen, T., Wang, S., Jiang, J.: Sequential diagnosis prediction with transformer and ontological representation. In: 2021 IEEE International Conference on Data Mining (ICDM), pp. 489\u2013498. IEEE (2021)","DOI":"10.1109\/ICDM51629.2021.00060"},{"key":"2_CR24","doi-asserted-by":"crossref","unstructured":"Peng, X., Long, G., Shen, T., Wang, S., Jiang, J., Zhang, C.: Bitenet: bidirectional temporal encoder network to predict medical outcomes. In: 2020 IEEE International Conference on Data Mining (ICDM), pp. 412\u2013421. IEEE (2020)","DOI":"10.1109\/ICDM50108.2020.00050"},{"key":"2_CR25","unstructured":"Peng, X., et al.: MIPO: mutual integration of patient journey and medical ontology for healthcare representation learning. arXiv preprint arXiv:2107.09288 (2021)"},{"key":"2_CR26","doi-asserted-by":"crossref","unstructured":"Qin, G., Eisner, J.: Learning how to ask: querying LMS with mixtures of soft prompts. arXiv preprint arXiv:2104.06599 (2021)","DOI":"10.18653\/v1\/2021.naacl-main.410"},{"key":"2_CR27","unstructured":"Radford, A., Narasimhan, K., Salimans, T., Sutskever, I., et al.: Improving language understanding by generative pre-training (2018)"},{"key":"2_CR28","doi-asserted-by":"crossref","unstructured":"Ren, H., Wang, J., Zhao, W.X., Wu, N.: Rapt: Pre-training of time-aware transformer for learning robust healthcare representation. In: Proceedings of the 27th ACM SIGKDD, pp. 3503\u20133511 (2021)","DOI":"10.1145\/3447548.3467069"},{"key":"2_CR29","doi-asserted-by":"crossref","unstructured":"Schick, T., Sch\u00fctze, H.: It\u2019s not just size that matters: Small language models are also few-shot learners. In: Proceedings of the 2021 Conference of the NAACL: Human Language Technologies, pp. 2339\u20132352 (2021)","DOI":"10.18653\/v1\/2021.naacl-main.185"},{"key":"2_CR30","doi-asserted-by":"crossref","unstructured":"Shin, T., Razeghi, Y., Logan IV, R.L., Wallace, E., Singh, S.: AutoPrompt: eliciting knowledge from language models with automatically generated prompts. arXiv preprint arXiv:2010.15980 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.346"},{"key":"2_CR31","doi-asserted-by":"publisher","DOI":"10.1016\/j.jbi.2021.103726","volume":"116","author":"Y Si","year":"2021","unstructured":"Si, Y., Bernstam, E.V., Roberts, K.: Generalized and transferable patient language representation for phenotyping with limited data. J. Biomed. Inform. 116, 103726 (2021)","journal-title":"J. Biomed. Inform."},{"key":"2_CR32","doi-asserted-by":"publisher","DOI":"10.1016\/j.jbi.2020.103637","volume":"113","author":"E Steinberg","year":"2021","unstructured":"Steinberg, E., Jung, K., Fries, J.A., Corbin, C.K., Pfohl, S.R., Shah, N.H.: Language models are an effective representation learning technique for electronic health record data. J. Biomed. Inform. 113, 103637 (2021)","journal-title":"J. Biomed. Inform."},{"key":"2_CR33","doi-asserted-by":"crossref","unstructured":"Taylor, N., Zhang, Y., Joyce, D., Nevado-Holgado, A., Kormilitzin, A.: Clinical prompt learning with frozen language models. arXiv preprint arXiv:2205.05535 (2022)","DOI":"10.1109\/TNNLS.2023.3294633"},{"key":"2_CR34","doi-asserted-by":"crossref","unstructured":"Thrun, S., Pratt, L.: Learning to learn: Introduction and overview. learning to learn, pp. 3\u201317 (1998)","DOI":"10.1007\/978-1-4615-5529-2_1"},{"key":"2_CR35","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS 2017, December 4\u20139, 2017, Long Beach, CA, USA, pp. 5998\u20136008 (2017)"},{"key":"2_CR36","doi-asserted-by":"crossref","unstructured":"Vu, T., Lester, B., Constant, N., Al-Rfou, R., Cer, D.: Spot: better frozen model adaptation through soft prompt transfer. arXiv preprint arXiv:2110.07904 (2021)","DOI":"10.18653\/v1\/2022.acl-long.346"},{"key":"2_CR37","doi-asserted-by":"crossref","unstructured":"Vu, T., Lester, B., Constant, N., Al-Rfou\u2019, R., Cer, D.: Spot: better frozen model adaptation through soft prompt transfer. In: Proceedings of ACL, pp. 5039\u20135059. Association for Computational Linguistics (2022)","DOI":"10.18653\/v1\/2022.acl-long.346"},{"key":"2_CR38","unstructured":"Wang, W., et al.: Structbert: Incorporating language structures into pre-training for deep language understanding. arXiv preprint arXiv:1908.04577 (2019)"},{"key":"2_CR39","doi-asserted-by":"crossref","unstructured":"Xu, H., Chen, Y., Du, Y., Shao, N., Wang, Y., Li, H., Yang, Z.: ZeroPrompt: scaling prompt-based pretraining to 1, 000 tasks improves zero-shot generalization. In: Findings of the Association for Computational Linguistics: EMNLP, pp. 4235\u20134252 (2022)","DOI":"10.18653\/v1\/2022.findings-emnlp.312"},{"key":"2_CR40","unstructured":"Zhao, Z., Wallace, E., Feng, S., Klein, D., Singh, S.: Calibrate before use: improving few-shot performance of language models. In: International Conference on Machine Learning, pp. 12697\u201312706. PMLR (2021)"}],"container-title":["Lecture Notes in Computer Science","Advanced Data Mining and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-46671-7_2","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,4]],"date-time":"2023-11-04T11:02:44Z","timestamp":1699095764000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-46671-7_2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031466700","9783031466717"],"references-count":40,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-46671-7_2","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"5 November 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ADMA","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Advanced Data Mining and Applications","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Shenyang","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 August 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 August 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"9","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"adma2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/adma2023.uqcloud.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes. Microsoft CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"503","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"216","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"43% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.97","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.77","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}