{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,16]],"date-time":"2025-06-16T12:11:13Z","timestamp":1750075873683,"version":"3.37.3"},"reference-count":38,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2022,7,27]],"date-time":"2022-07-27T00:00:00Z","timestamp":1658880000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,7,27]],"date-time":"2022-07-27T00:00:00Z","timestamp":1658880000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"SeoulTech"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Neural Process Lett"],"published-print":{"date-parts":[[2023,4]]},"DOI":"10.1007\/s11063-022-10961-z","type":"journal-article","created":{"date-parts":[[2022,7,29]],"date-time":"2022-07-29T14:45:42Z","timestamp":1659105942000},"page":"1741-1757","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Mixing Approach for Text Data Augmentation Based on an Ensemble of Explainable Artificial Intelligence Methods"],"prefix":"10.1007","volume":"55","author":[{"given":"Jinyi","family":"Yu","sequence":"first","affiliation":[]},{"given":"Jinhae","family":"Choi","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4199-936X","authenticated-orcid":false,"given":"Younghoon","family":"Lee","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,7,27]]},"reference":[{"key":"10961_CR1","doi-asserted-by":"publisher","first-page":"997","DOI":"10.1016\/j.apenergy.2017.12.054","volume":"212","author":"MH Alobaidi","year":"2018","unstructured":"Alobaidi MH, Chebana F, Meguid MA (2018) Robust ensemble learning framework for day-ahead forecasting of household based energy consumption. Appl Energy 212:997\u20131012","journal-title":"Appl Energy"},{"issue":"17","key":"10961_CR2","doi-asserted-by":"publisher","first-page":"5978","DOI":"10.3390\/app10175978","volume":"10","author":"V Atliha","year":"2020","unstructured":"Atliha V, \u0160e\u0161ok D (2020) Text augmentation using BERT for image captioning. Appl Sci 10(17):5978","journal-title":"Appl Sci"},{"issue":"7","key":"10961_CR3","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0130140","volume":"10","author":"S Bach","year":"2015","unstructured":"Bach S, Binder A, Montavon G, Klauschen F, M\u00fcller KR, Samek W (2015) On pixel-wise explanations for non-linear classifier decisions by layer-wise relevance propagation. PloS One 10(7):e0130,140","journal-title":"PloS One"},{"unstructured":"Bayer M, Kaufhold MA, Reuter C (2021) A survey on data augmentation for text classification. arXiv:2107.03158","key":"10961_CR4"},{"unstructured":"Belinkov Y, Bisk Y (2017) Synthetic and natural noise both break neural machine translation. arXiv:1711.02173","key":"10961_CR5"},{"doi-asserted-by":"crossref","unstructured":"Chen J, Yang Z, Yang D (2020) Mixtext: linguistically-informed interpolation of hidden space for semi-supervised text classification. arXiv:2004.12239","key":"10961_CR6","DOI":"10.18653\/v1\/2020.acl-main.194"},{"doi-asserted-by":"crossref","unstructured":"Edunov S, Ott M, Auli M, Grangier D (2018) Understanding back-translation at scale. arXiv:1808.09381","key":"10961_CR7","DOI":"10.18653\/v1\/D18-1045"},{"doi-asserted-by":"crossref","unstructured":"Feng SY, Gangal V, Kang D, Mitamura T, Hovy E (2020) GenAug: data augmentation for finetuning text generators. arXiv:2010.01794","key":"10961_CR8","DOI":"10.18653\/v1\/2020.deelio-1.4"},{"doi-asserted-by":"crossref","unstructured":"Feng SY, Gangal V, Wei J, Chandar S, Vosoughi S, Mitamura T, Hovy E (2021) A survey of data augmentation approaches for NLP. arXiv:2105.03075","key":"10961_CR9","DOI":"10.18653\/v1\/2021.findings-acl.84"},{"doi-asserted-by":"crossref","unstructured":"Guo D, Kim Y, Rush AM (2020) Sequence-level mixed sample data augmentation. arXiv:2011.09039","key":"10961_CR10","DOI":"10.18653\/v1\/2020.emnlp-main.447"},{"issue":"04","key":"10961_CR11","first-page":"4044","volume":"34","author":"H Guo","year":"2020","unstructured":"Guo H (2020) Nonlinear mixup: out-of-manifold data augmentation for text classification. Proc AAAI Conf Artif Intell 34(04):4044\u20134051","journal-title":"Proc AAAI Conf Artif Intell"},{"doi-asserted-by":"crossref","unstructured":"Hayashi T, Watanabe S, Zhang Y, Toda T, Hori T, Astudillo R, Takeda K (2018) Back-translation-style data augmentation for end-to-end ASR. In: 2018 IEEE spoken language technology workshop (SLT). IEEE, pp 426\u2013433","key":"10961_CR12","DOI":"10.1109\/SLT.2018.8639619"},{"unstructured":"Hu L, Chen J, Nair VN, Sudjianto A (2018) Locally interpretable models and effects based on supervised partitioning (lime-sup). arXiv:1806.00663","key":"10961_CR13"},{"doi-asserted-by":"crossref","unstructured":"Ibrahim M, Torki M, El-Makky NM (2020) AlexU-BackTranslation-TL at SemEval-2020 task 12: improving offensive language detection using data augmentation and transfer learning. In: Proceedings of the fourteenth workshop on semantic evaluation, pp 1881\u20131890","key":"10961_CR14","DOI":"10.18653\/v1\/2020.semeval-1.248"},{"doi-asserted-by":"crossref","unstructured":"Karimi A, Rossi L, Prati A (2021) AEDA: an easier data augmentation technique for text classification. arXiv:2108.13230","key":"10961_CR15","DOI":"10.18653\/v1\/2021.findings-emnlp.234"},{"doi-asserted-by":"crossref","unstructured":"Kobayashi S (2018) Contextual augmentation: data augmentation by words with paradigmatic relations. arXiv:1805.06201","key":"10961_CR16","DOI":"10.18653\/v1\/N18-2072"},{"unstructured":"Kolomiyets O, Bethard S, Moens MF (2011) Model-portability experiments for textual temporal analysis. In: Proceedings of the 49th annual meeting of the association for computational linguistics: human language technologies, vol 2. ACL, East Stroudsburg, pp 271\u2013276","key":"10961_CR17"},{"unstructured":"Lee J, Chung SY (2019) Robust training with ensemble consensus. arXiv:1910.09792","key":"10961_CR18"},{"doi-asserted-by":"crossref","unstructured":"Lee Y, Park J, Cho S (2020) Extraction and prioritization of product attributes using an explainable neural network. Pattern Anal Appl","key":"10961_CR19","DOI":"10.1007\/s10044-020-00878-5"},{"doi-asserted-by":"crossref","unstructured":"Liu R, Xu G, Jia C, Ma W, Wang L, Vosoughi S (2020) Data boost: text data augmentation through reinforcement learning guided conditional generation. arXiv:2012.02952","key":"10961_CR20","DOI":"10.18653\/v1\/2020.emnlp-main.726"},{"unstructured":"Lundberg SM, Lee SI (2017) A unified approach to interpreting model predictions. In: Advances in neural information processing systems, pp 4765\u20134774","key":"10961_CR21"},{"doi-asserted-by":"crossref","unstructured":"Marivate V, Sefara T (2020) Improving short text classification through global augmentation methods. In: International cross-domain conference for machine learning and knowledge extraction. Springer, Berlin, pp 385\u2013399","key":"10961_CR22","DOI":"10.1007\/978-3-030-57321-8_21"},{"unstructured":"Mosolova A, Fomin V, Bondarenko I (2018) Text augmentation for neural networks. In: AIST (Supplement), pp 104\u2013109","key":"10961_CR23"},{"doi-asserted-by":"crossref","unstructured":"Qiu S, Xu B, Zhang J, Wang Y, Shen X, De\u00a0Melo G, Long C, Li X (2020) Easyaug: an automatic textual data augmentation platform for classification tasks. In: Companion proceedings of the web conference 2020, pp 249\u2013252","key":"10961_CR24","DOI":"10.1145\/3366424.3383552"},{"doi-asserted-by":"crossref","unstructured":"Ribeiro MT, Singh S, Guestrin C (2016) Why should i trust you?: explaining the predictions of any classifier. In: Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining. ACM, pp 1135\u20131144","key":"10961_CR25","DOI":"10.1145\/2939672.2939778"},{"doi-asserted-by":"crossref","unstructured":"Rizos G, Hemker K, Schuller B (2019) Augment to prevent: short-text data augmentation in deep learning for hate-speech classification. In: Proceedings of the 28th ACM international conference on information and knowledge management, pp 991\u20131000","key":"10961_CR26","DOI":"10.1145\/3357384.3358040"},{"doi-asserted-by":"crossref","unstructured":"Selvaraju RR, Cogswell M, Das A, Vedantam R, Parikh D, Batra D (2017) Grad-cam: Visual explanations from deep networks via gradient-based localization. In: Proceedings of the IEEE international conference on computer vision, pp 618\u2013626","key":"10961_CR27","DOI":"10.1109\/ICCV.2017.74"},{"doi-asserted-by":"crossref","unstructured":"Sugiyama A, Yoshinaga N (2019) Data augmentation using back-translation for context-aware neural machine translation. In: Proceedings of the fourth workshop on discourse in machine translation (DiscoMT 2019), pp 35\u201344","key":"10961_CR28","DOI":"10.18653\/v1\/D19-6504"},{"doi-asserted-by":"crossref","unstructured":"Sun L, Xia C, Yin W, Liang T, Yu PS, He L (2020) Mixup-transformer: dynamic data augmentation for NLP tasks. arXiv:2010.02394","key":"10961_CR29","DOI":"10.18653\/v1\/2020.coling-main.305"},{"issue":"9","key":"10961_CR30","doi-asserted-by":"publisher","first-page":"5439","DOI":"10.1007\/s11042-018-5748-4","volume":"79","author":"X Sun","year":"2020","unstructured":"Sun X, He J (2020) A novel approach to generate a large scale of supervised data for short text sentiment analysis. Multimed Tools Appl 79(9):5439\u20135459","journal-title":"Multimed Tools Appl"},{"doi-asserted-by":"crossref","unstructured":"Wang X, Pham H, Dai, Z, Neubig G (2018) Switchout: an efficient data augmentation algorithm for neural machine translation. arXiv:1808.07512","key":"10961_CR31","DOI":"10.18653\/v1\/D18-1100"},{"doi-asserted-by":"crossref","unstructured":"Wei J, Zou K (2019) EDA: easy data augmentation techniques for boosting performance on text classification tasks. arXiv:1901.11196","key":"10961_CR32","DOI":"10.18653\/v1\/D19-1670"},{"doi-asserted-by":"crossref","unstructured":"Wu X, Lv S, Zang L, Han J, Hu S (2019) Conditional BERT contextual augmentation. In: International conference on computational science. Springer, Berlin, pp 84\u201395","key":"10961_CR33","DOI":"10.1007\/978-3-030-22747-0_7"},{"key":"10961_CR34","first-page":"6256","volume":"33","author":"Q Xie","year":"2020","unstructured":"Xie Q, Dai Z, Hovy E, Luong T, Le Q (2020) Unsupervised data augmentation for consistency training. Adv Neural Inf Process Syst 33:6256\u20136268","journal-title":"Adv Neural Inf Process Syst"},{"doi-asserted-by":"crossref","unstructured":"Yang Z, Yang D, Dyer C, He X, Smola A, Hovy E (2016) Hierarchical attention networks for document classification. In: Proceedings of the 2016 conference of the North American chapter of the association for computational linguistics: human language technologies, pp 1480\u20131489","key":"10961_CR35","DOI":"10.18653\/v1\/N16-1174"},{"doi-asserted-by":"crossref","unstructured":"Yoo KM, Lee H, Dernoncourt F, Bui T, Chang W, Lee Sg (2020) Variational hierarchical dialog autoencoder for dialog state tracking data augmentation. arXiv:2001.08604","key":"10961_CR36","DOI":"10.18653\/v1\/2020.emnlp-main.274"},{"doi-asserted-by":"crossref","unstructured":"Zhang H, Cisse M, Dauphin YN, Lopez-Paz D (2017) mixup: Beyond empirical risk minimization. arXiv:1710.09412","key":"10961_CR37","DOI":"10.1007\/978-1-4899-7687-1_79"},{"doi-asserted-by":"crossref","unstructured":"Zhang R, Yu Y, Zhang C (2020) Seqmix: augmenting active sequence labeling via sequence mixup. arXiv:2010.02322","key":"10961_CR38","DOI":"10.18653\/v1\/2020.emnlp-main.691"}],"container-title":["Neural Processing Letters"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11063-022-10961-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11063-022-10961-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11063-022-10961-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,4,27]],"date-time":"2023-04-27T02:32:25Z","timestamp":1682562745000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11063-022-10961-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,27]]},"references-count":38,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2023,4]]}},"alternative-id":["10961"],"URL":"https:\/\/doi.org\/10.1007\/s11063-022-10961-z","relation":{},"ISSN":["1370-4621","1573-773X"],"issn-type":[{"type":"print","value":"1370-4621"},{"type":"electronic","value":"1573-773X"}],"subject":[],"published":{"date-parts":[[2022,7,27]]},"assertion":[{"value":"4 July 2022","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 July 2022","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}