{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T20:51:23Z","timestamp":1743108683087,"version":"3.40.3"},"publisher-location":"Cham","reference-count":17,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783031048180"},{"type":"electronic","value":"9783031048197"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-04819-7_33","type":"book-chapter","created":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T10:02:41Z","timestamp":1652695361000},"page":"333-342","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["NER in\u00a0Archival Finding Aids: Next Level"],"prefix":"10.1007","author":[{"given":"Lu\u00eds Filipe","family":"da Costa Cunha","sequence":"first","affiliation":[]},{"given":"Jos\u00e9 Carlos","family":"Ramalho","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,5,17]]},"reference":[{"key":"33_CR1","unstructured":"Alammar, J.: The illustrated transformer. http:\/\/jalammar.github.io\/illustrated-transformer. Accessed 18 July 2021"},{"key":"33_CR2","unstructured":"Alvi, A., Kharya, P.: Using deepspeed and megatron to train megatron-turing NLG 530b, the world\u2019s largest and most powerful generative language model (2021). https:\/\/www.microsoft.com\/en-us\/research\/blog\/using-deepspeed-and-megatron-to-train-megatron-turing-nlg-530b-the-worlds-largest-and-most-powerful-generative-language-model\/. Accessed 15 Oct 2021"},{"key":"33_CR3","unstructured":"Bahdanau, D., Cho, K., Bengio, Y.: Neural machine translation by jointly learning to align and translate (2016)"},{"key":"33_CR4","unstructured":"Cunha, L.F.C., Ramalho, J.C.: http:\/\/ner.epl.di.uminho.pt\/"},{"key":"33_CR5","doi-asserted-by":"publisher","unstructured":"Cunha, L.F.C., Ramalho, J.C.: NER in Archival Finding Aids (2021). https:\/\/doi.org\/10.4230\/OASIcs.SLATE.2021.8","DOI":"10.4230\/OASIcs.SLATE.2021.8"},{"key":"33_CR6","unstructured":"Devlin, J., Chang, M., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. CoRR abs\/1810.04805 (2018). http:\/\/arxiv.org\/abs\/1810.04805"},{"key":"33_CR7","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding (2019)"},{"key":"33_CR8","doi-asserted-by":"publisher","unstructured":"Howard, J., Ruder, S.: Universal language model fine-tuning for text classification (2018). https:\/\/doi.org\/10.18653\/v1\/p18-1031","DOI":"10.18653\/v1\/p18-1031"},{"key":"33_CR9","unstructured":"Mikolov, T., Chen, K., Corrado, G., Dean, J.: Efficient estimation of word representations in vector space (2013)"},{"key":"33_CR10","doi-asserted-by":"publisher","unstructured":"Pennington, J., Socher, R., Manning, C.: GloVe: Global vectors for word representation (2014). https:\/\/doi.org\/10.3115\/v1\/D14-1162","DOI":"10.3115\/v1\/D14-1162"},{"key":"33_CR11","unstructured":"Radford, A., Narasimhan, K.: Improving language understanding by generative pre-training (2018)"},{"key":"33_CR12","unstructured":"Ruder, S.: NLP\u2019s ImageNet moment has arrived. https:\/\/ruder.io\/nlp-imagenet\/ (2018). Accessed 07 Oct 2021"},{"key":"33_CR13","doi-asserted-by":"crossref","unstructured":"Russakovsky, O., et al.: ImageNet large scale visual recognition challenge (2015)","DOI":"10.1007\/s11263-015-0816-y"},{"key":"33_CR14","doi-asserted-by":"crossref","unstructured":"Souza, F., Nogueira, R., Lotufo, R.: BERTimbau: pretrained BERT models for Brazilian Portuguese. In: 9th Brazilian Conference on Intelligent Systems, BRACIS, Rio Grande do Sul, Brazil, October 20-23 (2020). (to appear)","DOI":"10.1007\/978-3-030-61377-8_28"},{"key":"33_CR15","unstructured":"Vaswani, A., et al.: Attention is all you need (2017)"},{"key":"33_CR16","unstructured":"Wagner, J., Wilkens, R., Idiart, M., Villavicencio, A.: The brWaC corpus: a new open resource for Brazilian Portuguese (2018)"},{"key":"33_CR17","doi-asserted-by":"crossref","unstructured":"Wolf, T., et al.: Huggingface\u2019s transformers: state-of-the-art natural language processing (2020)","DOI":"10.18653\/v1\/2020.emnlp-demos.6"}],"container-title":["Lecture Notes in Networks and Systems","Information Systems and Technologies"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-04819-7_33","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T10:07:35Z","timestamp":1652695655000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-04819-7_33"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031048180","9783031048197"],"references-count":17,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-04819-7_33","relation":{},"ISSN":["2367-3370","2367-3389"],"issn-type":[{"type":"print","value":"2367-3370"},{"type":"electronic","value":"2367-3389"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"17 May 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"WorldCIST","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"World Conference on Information Systems and Technologies","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Budva","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Montenegro","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 April 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 April 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"10","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"worldcist2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/worldcist.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}