{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,7]],"date-time":"2025-05-07T05:03:54Z","timestamp":1746594234091,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":31,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819916474"},{"type":"electronic","value":"9789819916481"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-981-99-1648-1_23","type":"book-chapter","created":{"date-parts":[[2023,4,14]],"date-time":"2023-04-14T07:02:39Z","timestamp":1681455759000},"page":"272-283","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Diverse and\u00a0High-Quality Data Augmentation Using GPT for\u00a0Named Entity Recognition"],"prefix":"10.1007","author":[{"given":"Huanlei","family":"Chen","sequence":"first","affiliation":[]},{"given":"Weiwen","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Lianglun","family":"Cheng","sequence":"additional","affiliation":[]},{"given":"Haiming","family":"Ye","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,4,15]]},"reference":[{"key":"23_CR1","unstructured":"Brown, T., Mann, B., Ryder, N., et al.: Language models are few-shot learners. In: NeurIPS (2020)"},{"key":"23_CR2","unstructured":"Chen, X., et al.: Lightner: A lightweight generative framework with prompt-guided attention for low-resource NER. arXiv preprint arXiv:2109.00720 (2021)"},{"key":"23_CR3","doi-asserted-by":"crossref","unstructured":"Ciregan, D., Meier, U., Schmidhuber, J.: Multi-column deep neural networks for image classification. In: 2012 IEEE Conference on Computer Vision and Pattern Recognition (2012)","DOI":"10.1109\/CVPR.2012.6248110"},{"key":"23_CR4","doi-asserted-by":"crossref","unstructured":"Cui, L., Wu, Y., Liu, J., Yang, S., Zhang, Y.: Template-based named entity recognition using BART. In: Findings of the Association for Computational Linguistics: ACL-IJCNLP (2021)","DOI":"10.18653\/v1\/2021.findings-acl.161"},{"key":"23_CR5","doi-asserted-by":"crossref","unstructured":"Dai, X., Adel, H.: An analysis of simple data augmentation for named entity recognition. In: COLING (2020)","DOI":"10.18653\/v1\/2020.coling-main.343"},{"key":"23_CR6","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: Pre-training of deep bidirectional transformers for language understanding. In: NAACL-HLT (2019)"},{"key":"23_CR7","doi-asserted-by":"crossref","unstructured":"Ding, B., et al.: DAGA: Data augmentation with a generation approach for low-resource tagging tasks. In: EMNLP (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.488"},{"key":"23_CR8","doi-asserted-by":"crossref","unstructured":"Ding, N., Chen, Y., Han, X., et al.: Prompt-learning for fine-grained entity typing. arXiv preprint arXiv:2108.10604 (2021)","DOI":"10.18653\/v1\/2022.findings-emnlp.512"},{"key":"23_CR9","doi-asserted-by":"crossref","unstructured":"Fabbri, A., Han, S., et al.: Improving zero and few-shot abstractive summarization with intermediate fine-tuning and data augmentation. In: NAACL-HLT (2021)","DOI":"10.18653\/v1\/2021.naacl-main.57"},{"key":"23_CR10","doi-asserted-by":"crossref","unstructured":"Feng, S.Y., Gangal, V., Wei, J., et al.: A survey of data augmentation approaches for NLP. In: ACL\/IJCNLP (Findings) (2021)","DOI":"10.18653\/v1\/2021.findings-acl.84"},{"key":"23_CR11","doi-asserted-by":"crossref","unstructured":"Gao, T., Fisch, A., Chen, D.: Making pre-trained language models better few-shot learners. In: ACL\/IJCNLP (2021)","DOI":"10.18653\/v1\/2021.acl-long.295"},{"key":"23_CR12","doi-asserted-by":"crossref","unstructured":"Iyyer, M., Wieting, J., Gimpel, K., Zettlemoyer, L.: Adversarial example generation with syntactically controlled paraphrase networks. In: NAACL-HLT (2018)","DOI":"10.18653\/v1\/N18-1170"},{"key":"23_CR13","doi-asserted-by":"crossref","unstructured":"Ko, T., Peddinti, V., Povey, D., Khudanpur, S.: Audio augmentation for speech recognition. In: INTERSPEECH (2015)","DOI":"10.21437\/Interspeech.2015-711"},{"key":"23_CR14","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. In: NIPS (2012)"},{"key":"23_CR15","doi-asserted-by":"crossref","unstructured":"Lai, T., Cheng, L., Wang, D., Ye, H., Zhang, W.: RMAN: relational multi-head attention neural network for joint extraction of entities and relations. Appl. Intell. (2022)","DOI":"10.1007\/s10489-021-02600-2"},{"key":"23_CR16","doi-asserted-by":"crossref","unstructured":"Lewis, M., Liu, Y., et al.: BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In: ACL (2020)","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"23_CR17","doi-asserted-by":"crossref","unstructured":"Liu, J., Pasupat, P., Cyphers, S., Glass, J.: Asgard: A portable architecture for multilingual dialogue systems. In: ICASSP (2013)","DOI":"10.1109\/ICASSP.2013.6639301"},{"key":"23_CR18","doi-asserted-by":"crossref","unstructured":"Ma, R., Zhou, X., Gui, T., Tan, Y., Zhang, Q., Huang, X.: Template-free prompt tuning for few-shot NER. arXiv preprint arXiv:2109.13532 (2021)","DOI":"10.18653\/v1\/2022.naacl-main.420"},{"key":"23_CR19","unstructured":"Radford, A., Narasimhan, K.: Improving language understanding by generative pre-training (2018). https:\/\/cdn.openai.com\/research-covers\/language-unsupervised\/language_understanding_paper.pdf"},{"issue":"8","key":"23_CR20","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners. OpenAI Blog 1(8), 9 (2019)","journal-title":"OpenAI Blog"},{"key":"23_CR21","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res. (2020)"},{"key":"23_CR22","doi-asserted-by":"crossref","unstructured":"Ragni, A., Knill, K.M., Rath, S.P., Gales, M.J.F.: Data augmentation for low resource languages. In: INTERSPEECH (2014)","DOI":"10.21437\/Interspeech.2014-207"},{"key":"23_CR23","doi-asserted-by":"crossref","unstructured":"\u015eahin, G.G., Steedman, M.: Data augmentation via dependency tree morphing for low-resource languages. In: EMNLP (2018)","DOI":"10.18653\/v1\/D18-1545"},{"key":"23_CR24","doi-asserted-by":"crossref","unstructured":"Schick, T., Sch\u00fctze, H.: Exploiting cloze-questions for few-shot text classification and natural language inference. In: EACL (2021)","DOI":"10.18653\/v1\/2021.eacl-main.20"},{"key":"23_CR25","doi-asserted-by":"crossref","unstructured":"Shorten, C., Khoshgoftaar, T.M., Furht, B.: Text data augmentation for deep learning. J. Big Data (2021)","DOI":"10.21203\/rs.3.rs-650804\/v1"},{"key":"23_CR26","doi-asserted-by":"crossref","unstructured":"Tjong Kim Sang, E.F., De Meulder, F.: Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In: Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL (2003)","DOI":"10.3115\/1119176.1119195"},{"key":"23_CR27","unstructured":"Vaswani, A., Shazeer, N., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems (2017)"},{"key":"23_CR28","doi-asserted-by":"crossref","unstructured":"Wei, J., Zou, K.: EDA: Easy data augmentation techniques for boosting performance on text classification tasks. In: EMNLP-IJCNLP (2019)","DOI":"10.18653\/v1\/D19-1670"},{"key":"23_CR29","unstructured":"Yu, A.W., Dohan, D., Le, Q., Luong, T., Zhao, R., Chen, K.: Fast and accurate reading comprehension by combining self-attention and convolution. In: International Conference on Learning Representations (2018)"},{"key":"23_CR30","doi-asserted-by":"crossref","unstructured":"Zhang, H., Chen, Q., Zhang, W.: Improving entity linking with two adaptive features. In: Frontiers of Information Technology & Electronic Engineering (2022)","DOI":"10.1631\/FITEE.2100495"},{"key":"23_CR31","unstructured":"Zhang, X., Zhao, J., LeCun, Y.: Character-level convolutional networks for text classification. In: Advances in Neural Information Processing Systems (2015)"}],"container-title":["Communications in Computer and Information Science","Neural Information Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-1648-1_23","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,10]],"date-time":"2023-12-10T17:53:26Z","timestamp":1702230806000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-1648-1_23"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9789819916474","9789819916481"],"references-count":31,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-1648-1_23","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"15 April 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICONIP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Neural Information Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"New Delhi","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 November 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 November 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iconip2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/iconip2022.apnns.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easy Chair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"810","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"359","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"44% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.65","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"ICONIP 2022 consists of a two-volume set, LNCS & CCIS, which includes 146 and 213 papers","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}