{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,8]],"date-time":"2025-09-08T05:41:28Z","timestamp":1757310088130,"version":"3.40.3"},"publisher-location":"Cham","reference-count":39,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030858957"},{"type":"electronic","value":"9783030858964"}],"license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021]]},"DOI":"10.1007\/978-3-030-85896-4_20","type":"book-chapter","created":{"date-parts":[[2021,8,18]],"date-time":"2021-08-18T10:06:38Z","timestamp":1629281198000},"page":"242-257","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["EMBERT: A Pre-trained Language Model for Chinese Medical Text Mining"],"prefix":"10.1007","author":[{"given":"Zerui","family":"Cai","sequence":"first","affiliation":[]},{"given":"Taolin","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Chengyu","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xiaofeng","family":"He","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,8,19]]},"reference":[{"key":"20_CR1","unstructured":"Alyafeai, Z., AlShaibani, M.S., Ahmad, I.: A survey on transfer learning in natural language processing. CoRR arXiv:2007.04239 (2020)"},{"issue":"142","key":"20_CR2","doi-asserted-by":"publisher","first-page":"5","DOI":"10.1515\/ling.1974.12.142.5","volume":"12","author":"JD Apresjan","year":"1974","unstructured":"Apresjan, J.D.: Regular polysemy. Linguistics 12(142), 5\u201332 (1974)","journal-title":"Linguistics"},{"key":"20_CR3","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv:1607.06450 (2016)"},{"key":"20_CR4","doi-asserted-by":"crossref","unstructured":"Baumann, A.: Multilingual language models for named entity recognition in German and English. In: RANLP, pp. 21\u201327 (2019)","DOI":"10.26615\/issn.2603-2821.2019_004"},{"key":"20_CR5","doi-asserted-by":"crossref","unstructured":"Chronopoulou, A., Baziotis, C., Potamianos, A.: An embarrassingly simple approach for transfer learning from pretrained language models. In: NAACL, pp. 2089\u20132095 (2019)","DOI":"10.18653\/v1\/N19-1213"},{"key":"20_CR6","unstructured":"Cui, Y., et al.: Pre-training with whole word masking for chinese BERT. CoRR arXiv:1906.08101 (2019)"},{"key":"20_CR7","unstructured":"Dai, A.M., Le, Q.V.: Semi-supervised sequence learning. In: NIPS, pp. 3079\u20133087 (2015)"},{"issue":"4","key":"20_CR8","doi-asserted-by":"publisher","first-page":"325","DOI":"10.1016\/0024-3841(88)90009-5","volume":"75","author":"PD Deane","year":"1988","unstructured":"Deane, P.D.: Polysemy and cognition. Lingua 75(4), 325\u2013361 (1988)","journal-title":"Lingua"},{"key":"20_CR9","unstructured":"Devlin, J., Chang, M., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: NAACL-HLT, pp. 4171\u20134186 (2019)"},{"key":"20_CR10","unstructured":"Gu, Y., et al.: Domain-specific language model pretraining for biomedical natural language processing. CoRR arXiv:2007.15779 (2020)"},{"key":"20_CR11","doi-asserted-by":"crossref","unstructured":"He, J., Fu, M., Tu, M.: Applying deep matching networks to Chinese medical question answering: A study and a dataset. BMC Med. Inf. Decis. Making 19(2), 91\u2013100 (2019)","DOI":"10.1186\/s12911-019-0761-8"},{"key":"20_CR12","unstructured":"Hendrycks, D., Gimpel, K.: Gaussian error linear units (gelus). arXiv:1606.08415 (2016)"},{"issue":"8","key":"20_CR13","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Comput. 9(8), 1735\u20131780 (1997)","journal-title":"Neural Comput."},{"key":"20_CR14","doi-asserted-by":"crossref","unstructured":"Hosseini, P., Hosseini, P., Broniatowski, D.A.: Content analysis of Persian\/Farsi tweets during COVID-19 pandemic in Iran using NLP. CoRR arXiv:2005.08400 (2020)","DOI":"10.18653\/v1\/2020.nlpcovid19-2.26"},{"key":"20_CR15","unstructured":"Huang, K., Altosaar, J., Ranganath, R.: Clinicalbert: modeling clinical notes and predicting hospital readmission. arXiv preprint arXiv:1904.05342 (2019)"},{"key":"20_CR16","unstructured":"Jiang, Z., El-Jaroudi, A., Hartmann, W., Karakos, D.G., Zhao, L.: Cross-lingual information retrieval with BERT. In: CLSSTS@LREC, pp. 26\u201331 (2020)"},{"key":"20_CR17","doi-asserted-by":"crossref","unstructured":"Shang, J., Liu, J., Jiang, M., Ren, X., Voss, C.R., Han, J.: Automated phrase mining from massive text corpora. IEEE Trans. Knowl. Data Eng. 30(10), 1825\u20131837 (2018)","DOI":"10.1109\/TKDE.2018.2812203"},{"key":"20_CR18","first-page":"64","volume":"8","author":"M Joshi","year":"2020","unstructured":"Joshi, M., Chen, D., Liu, Y., Weld, D.S., Zettlemoyer, L., Levy, O.: Spanbert: improving pre-training by representing and predicting spans. Comput. Linguist. 8, 64\u201377 (2020)","journal-title":"Comput. Linguist."},{"key":"20_CR19","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv:1412.6980 (2014)"},{"issue":"4","key":"20_CR20","doi-asserted-by":"crossref","first-page":"1234","DOI":"10.1093\/bioinformatics\/btz682","volume":"36","author":"J Lee","year":"2020","unstructured":"Lee, J., et al.: Biobert: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics 36(4), 1234\u20131240 (2020)","journal-title":"Bioinformatics"},{"issue":"8","key":"20_CR21","doi-asserted-by":"publisher","first-page":"1254","DOI":"10.1016\/j.knosys.2011.06.001","volume":"24","author":"Q Liu","year":"2011","unstructured":"Liu, Q., Wu, L., Yang, Z., Liu, Y.: Domain phrase identification using atomic word formation in Chinese text. Knowl. Based Syst. 24(8), 1254\u20131260 (2011)","journal-title":"Knowl. Based Syst."},{"key":"20_CR22","doi-asserted-by":"crossref","unstructured":"Liu, X., He, P., Chen, W., Gao, J.: Multi-task deep neural networks for natural language understanding. In: ACL, pp. 4487\u20134496 (2019)","DOI":"10.18653\/v1\/P19-1441"},{"key":"20_CR23","unstructured":"Liu, Y., et al.: Roberta: a robustly optimized BERT pretraining approach. CoRR arXiv:1907.11692 (2019)"},{"key":"20_CR24","doi-asserted-by":"crossref","unstructured":"Liu, Z., Huang, D., Huang, K., Li, Z., Zhao, J.: Finbert: a pre-trained financial language representation model for financial text mining. In: IJCAI, pp. 4513\u20134519 (2020)","DOI":"10.24963\/ijcai.2020\/622"},{"key":"20_CR25","doi-asserted-by":"crossref","unstructured":"Peng, Y., Yan, S., Lu, Z.: Transfer learning in biomedical natural language processing: an evaluation of BERT and ELMo on ten benchmarking datasets. arXiv:1906.05474 (2019)","DOI":"10.18653\/v1\/W19-5006"},{"key":"20_CR26","doi-asserted-by":"crossref","unstructured":"Peters, M.E., et al.: Deep contextualized word representations. In: NAACL, pp. 2227\u20132237 (2018)","DOI":"10.18653\/v1\/N18-1202"},{"issue":"8","key":"20_CR27","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners. OpenAI Blog 1(8), 9 (2019)","journal-title":"OpenAI Blog"},{"key":"20_CR28","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. CoRR arXiv:1910.10683 (2019)"},{"key":"20_CR29","doi-asserted-by":"crossref","unstructured":"Ravin, Y., Leacock, C.: Polysemy: An Overview. Polysemy: Theoretical and Computational Approaches, pp. 1\u201329 (2000)","DOI":"10.1093\/oso\/9780198238423.003.0001"},{"issue":"8","key":"20_CR30","doi-asserted-by":"publisher","first-page":"1310","DOI":"10.1093\/jamia\/ocaa116","volume":"27","author":"A Sarker","year":"2020","unstructured":"Sarker, A., Lakamana, S., Hogg-Bremer, W., Xie, A., Al-Garadi, M.A., Yang, Y.: Self-reported COVID-19 symptoms on Twitter: an analysis and a research resource. J. Am. Med. Inf. Assoc. 27(8), 1310\u20131315 (2020)","journal-title":"J. Am. Med. Inf. Assoc."},{"key":"20_CR31","unstructured":"Sun, Y., et al.: ERNIE: enhanced representation through knowledge integration. CoRR arXiv:1904.09223 (2019)"},{"key":"20_CR32","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NIPS, pp. 5998\u20136008 (2017)"},{"key":"20_CR33","unstructured":"Xu, H., Liu, B., Shu, L., Yu, P.S.: BERT post-training for review reading comprehension and aspect-based sentiment analysis. In: NAACL, pp. 2324\u20132335 (2019)"},{"issue":"12","key":"20_CR34","doi-asserted-by":"publisher","first-page":"4144","DOI":"10.3390\/app10124144","volume":"10","author":"S Xu","year":"2020","unstructured":"Xu, S., Shen, X., Fukumoto, F., Li, J., Suzuki, Y., Nishizaki, H.: Paraphrase identification with lexical, syntactic and sentential encodings. Appl. Sci. 10(12), 4144 (2020)","journal-title":"Appl. Sci."},{"key":"20_CR35","unstructured":"Yang, Z., Dai, Z., Yang, Y., Carbonell, J.G., Salakhutdinov, R., Le, Q.V.: Xlnet: generalized autoregressive pretraining for language understanding. In: NeurIPS, pp. 5754\u20135764 (2019)"},{"key":"20_CR36","unstructured":"Zhang, N., Jia, Q., Yin, K., Dong, L., Gao, F., Hua, N.: Conceptualized representation learning for Chinese biomedical text mining. In: WSDM 2020 HealthDay (2020)"},{"key":"20_CR37","doi-asserted-by":"crossref","unstructured":"Zhang, S., Zhang, X., Wang, H., Cheng, J., Li, P., Ding, Z.: Chinese medical question answer matching using end-to-end character-level multi-scale CNNs. Appl. Sci. 7(8), 767 (2017)","DOI":"10.3390\/app7080767"},{"key":"20_CR38","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Han, X., Liu, Z., Jiang, X., Sun, M., Liu, Q.: ERNIE: enhanced language representation with informative entities. In: ACL, pp. 1441\u20131451 (2019)","DOI":"10.18653\/v1\/P19-1139"},{"key":"20_CR39","doi-asserted-by":"crossref","unstructured":"Zhang, Z., et al.: Semantics-aware BERT for language understanding. In: AAAI, pp. 9628\u20139635 (2020)","DOI":"10.1609\/aaai.v34i05.6510"}],"container-title":["Lecture Notes in Computer Science","Web and Big Data"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-85896-4_20","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,7]],"date-time":"2023-11-07T14:17:23Z","timestamp":1699366643000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-85896-4_20"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"ISBN":["9783030858957","9783030858964"],"references-count":39,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-85896-4_20","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2021]]},"assertion":[{"value":"19 August 2021","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"APWeb-WAIM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Asia-Pacific Web (APWeb) and Web-Age Information Management (WAIM) Joint International Conference on Web and Big Data","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Guangzhou","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2021","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 August 2021","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25 August 2021","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"apwebwaim2021","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"184","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"44","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"24","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"24% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.6","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"6.38","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}