{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,24]],"date-time":"2026-01-24T15:43:34Z","timestamp":1769269414903,"version":"3.49.0"},"publisher-location":"Cham","reference-count":41,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031705458","type":"print"},{"value":"9783031705465","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-70546-5_13","type":"book-chapter","created":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T05:02:47Z","timestamp":1725944567000},"page":"218-235","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Self-supervised Pre-training of\u00a0Text Recognizers"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6853-0508","authenticated-orcid":false,"given":"Martin","family":"Ki\u0161\u0161","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6364-129X","authenticated-orcid":false,"given":"Michal","family":"Hradi\u0161","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,11]]},"reference":[{"key":"13_CR1","unstructured":"Aberdam, A., Ganz, R., Mazor, S., Litman, R.: Multimodal semi-supervised learning for text recognition (2022)"},{"key":"13_CR2","doi-asserted-by":"crossref","unstructured":"Aberdam, A., et al.: Sequence-to-sequence contrastive learning for text recognition, pp. 15302\u201315312 (2021)","DOI":"10.1109\/CVPR46437.2021.01505"},{"key":"13_CR3","doi-asserted-by":"crossref","unstructured":"Assran, M., et al.: Self-supervised learning from images with a joint-embedding predictive architecture (2023)","DOI":"10.1109\/CVPR52729.2023.01499"},{"key":"13_CR4","unstructured":"Baevski, A., Zhou, Y., Mohamed, A., Auli, M.: wav2vec 2.0: a framework for self-supervised learning of speech representations. In: Advances in Neural Information Processing Systems, vol.\u00a033, pp. 12449\u201312460. Curran Associates, Inc (2020)"},{"key":"13_CR5","unstructured":"Bao, H., Dong, L., Piao, S., Wei, F.: BEiT: BERT pre-training of image transformers (2022)"},{"key":"13_CR6","unstructured":"Bardes, A., Ponce, J., LeCun, Y.: VICReg: variance-invariance-covariance regularization for self-supervised learning (2022)"},{"key":"13_CR7","first-page":"8799","volume":"35","author":"A Bardes","year":"2022","unstructured":"Bardes, A., Ponce, J., LeCun, Y.: VICRegL: self-supervised learning of local visual features. Adv. Neural. Inf. Process. Syst. 35, 8799\u20138810 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"13_CR8","unstructured":"Berthelot, D., Carlini, N., Goodfellow, I., Papernot, N., Oliver, A., Raffel, C.A.: MixMatch: a holistic approach to semi-supervised learning. In: Advances in Neural Information Processing System, vol.\u00a032. Curran Associates, Inc. (2019)"},{"key":"13_CR9","unstructured":"Betker, J., et al.: Improving image generation with better captions (2023)"},{"key":"13_CR10","unstructured":"Brown, T., et al.: Language models are few-shot learners. In: Advances in Neural Information Processing Systems, vol.\u00a033, pp. 1877\u20131901. Curran Associates, Inc. (2020)"},{"key":"13_CR11","unstructured":"Caron, M., Misra, I., Mairal, J., Goyal, P., Bojanowski, P., Joulin, A.: Unsupervised learning of visual features by contrasting cluster assignments. In: Advances in Neural Information Processing Systems, vol.\u00a033, pp. 9912\u20139924. Curran Associates, Inc (2020)"},{"key":"13_CR12","doi-asserted-by":"crossref","unstructured":"Caron, M., et al.: Emerging properties in self-supervised vision transformers, pp. 9650\u20139660 (2021)","DOI":"10.1109\/ICCV48922.2021.00951"},{"issue":"6","key":"13_CR13","doi-asserted-by":"publisher","first-page":"1505","DOI":"10.1109\/JSTSP.2022.3188113","volume":"16","author":"S Chen","year":"2022","unstructured":"Chen, S., et al.: WavLM: large-scale self-supervised pre-training for full stack speech processing. IEEE J. Sel. Topics Signal Process. 16(6), 1505\u20131518 (2022)","journal-title":"IEEE J. Sel. Topics Signal Process."},{"key":"13_CR14","unstructured":"Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations (2020)"},{"key":"13_CR15","unstructured":"Chiu, C.C., Qin, J., Zhang, Y., Yu, J., Wu, Y.: Self-supervised learning with random-projection quantizer for speech recognition. In: Proceedings of the 39th International Conference on Machine Learning, pp. 3915\u20133924. PMLR (2022). ISSN 2640-3498"},{"key":"13_CR16","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding abs\/1810.04805 (2018)"},{"key":"13_CR17","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale (2021)"},{"key":"13_CR18","doi-asserted-by":"crossref","unstructured":"Graves, A., Fernandez, S., Gomez, F., Schmidhuber, J.: Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks, p.\u00a08 (2006)","DOI":"10.1145\/1143844.1143891"},{"key":"13_CR19","doi-asserted-by":"crossref","unstructured":"Guan, T., Shen, W., Yang, X., Feng, Q., Jiang, Z., Yang, X.: Self-supervised character-to-character distillation for text recognition, pp. 19473\u201319484 (2023)","DOI":"10.1109\/ICCV51070.2023.01784"},{"key":"13_CR20","doi-asserted-by":"publisher","first-page":"3451","DOI":"10.1109\/TASLP.2021.3122291","volume":"29","author":"WN Hsu","year":"2021","unstructured":"Hsu, W.N., Bolte, B., Tsai, Y.H.H., Lakhotia, K., Salakhutdinov, R., Mohamed, A.: HuBERT: self-supervised speech representation learning by masked prediction of hidden units. IEEE\/ACM Trans. Audio Speech Lang. Process. 29, 3451\u20133460 (2021)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"13_CR21","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"463","DOI":"10.1007\/978-3-030-86337-1_31","volume-title":"Document Analysis and Recognition \u2013 ICDAR 2021","author":"M Ki\u0161\u0161","year":"2021","unstructured":"Ki\u0161\u0161, M., Bene\u0161, K., Hradi\u0161, M.: AT-ST: self-training adaptation strategy for OCR in domains with limited transcriptions. In: Llad\u00f3s, J., Lopresti, D., Uchida, S. (eds.) ICDAR 2021. LNCS, vol. 12824, pp. 463\u2013477. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86337-1_31"},{"key":"13_CR22","doi-asserted-by":"crossref","unstructured":"Ki\u0161\u0161, M., Hradi\u0161, M., Bene\u0161, K., Buchal, P., Kula, M.: SoftCTC-semi-supervised learning for text recognition using soft pseudo-labels (2023)","DOI":"10.1007\/s10032-023-00452-9"},{"key":"13_CR23","doi-asserted-by":"crossref","unstructured":"Ki\u0161\u0161, M., Hradi\u0161, M., Kodym, O.: Brno mobile OCR dataset. In: 2019 International Conference on Document Analysis and Recognition (ICDAR), pp. 1352\u20131357 (2019). ISSN 1520-5363","DOI":"10.1109\/ICDAR.2019.00218"},{"key":"13_CR24","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"492","DOI":"10.1007\/978-3-030-86331-9_32","volume-title":"Document Analysis and Recognition \u2013 ICDAR 2021","author":"O Kodym","year":"2021","unstructured":"Kodym, O., Hradi\u0161, M.: Page layout analysis system for\u00a0unconstrained historic documents. In: Llad\u00f3s, J., Lopresti, D., Uchida, S. (eds.) ICDAR 2021. LNCS, vol. 12822, pp. 492\u2013506. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86331-9_32"},{"key":"13_CR25","unstructured":"Kurakin, A., et al.: ReMixMatch: semi-supervised learning with distribution matching and augmentation anchoring. In: ICLR (2020)"},{"key":"13_CR26","unstructured":"Lee, D.H.: Pseudo-label: the simple and efficient semi-supervised learning method for deep neural networks. In: ICML 2013 Workshop: Challenges in Representation Learning (WREPL) (2013)"},{"key":"13_CR27","doi-asserted-by":"crossref","unstructured":"Li, J., Xu, Y., Lv, T., Cui, L., Zhang, C., Wei, F.: DiT: self-supervised pre-training for document image transformer. In: Proceedings of the 30th ACM International Conference on Multimedia, MM 2022, pp. 3530\u20133539. Association for Computing Machinery (2022)","DOI":"10.1145\/3503161.3547911"},{"key":"13_CR28","unstructured":"Oquab, M., et al.: DINOv2: learning robust visual features without supervision (2024)"},{"key":"13_CR29","unstructured":"Peng, Z., Dong, L., Bao, H., Ye, Q., Wei, F.: BEiT v2: masked image modeling with vector-quantized visual tokenizers (2022)"},{"key":"13_CR30","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision (2021)"},{"key":"13_CR31","unstructured":"Radford, A., Kim, J.W., Xu, T., Brockman, G., McLeavey, C., Sutskever, I.: Robust speech recognition via large-scale weak supervision (2022)"},{"key":"13_CR32","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"13_CR33","doi-asserted-by":"crossref","unstructured":"Souibgui, M.A., et al.: Text-DIAE: a self-supervised degradation invariant autoencoder for text recognition and document enhancement. In: Proceedings of the Thirty-Seventh AAAI Conference on Artificial Intelligence and Thirty-Fifth Conference on Innovative Applications of Artificial Intelligence and Thirteenth Symposium on Educational Advances in Artificial Intelligence, AAAI 2023\/IAAI 2023\/EAAI 2023, vol.\u00a037, pp. 2330\u20132338. AAAI Press (2023)","DOI":"10.1609\/aaai.v37i2.25328"},{"key":"13_CR34","doi-asserted-by":"crossref","unstructured":"S\u00e1nchez, J.A., Romero, V., Toselli, A.H., Vidal, E.: ICFHR2014 competition on handwritten text recognition on transcriptorium datasets (HTRtS). In: 2014 14th International Conference on Frontiers in Handwriting Recognition, pp. 785\u2013790, September 2014. iSSN 2167-6445","DOI":"10.1109\/ICFHR.2014.137"},{"key":"13_CR35","unstructured":"Touvron, H., et al.: Llama 2: open foundation and fine-tuned chat models (2023)"},{"key":"13_CR36","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol.\u00a030. Curran Associates, Inc (2017)"},{"key":"13_CR37","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.109695","volume":"142","author":"E Vidal","year":"2023","unstructured":"Vidal, E., Toselli, A.H., R\u00edos-Vila, A., Calvo-Zaragoza, J.: End-to-end page-level assessment of handwritten text recognition. Pattern Recogn. 142, 109695 (2023)","journal-title":"Pattern Recogn."},{"key":"13_CR38","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: Image as a foreign language: BEiT pretraining for vision and vision-language tasks, pp. 19175\u201319186 (2023)","DOI":"10.1109\/CVPR52729.2023.01838"},{"key":"13_CR39","doi-asserted-by":"crossref","unstructured":"Xie, Q., Luong, M.T., Hovy, E., Le, Q.V.: Self-training with noisy student improves imagenet classification. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020","DOI":"10.1109\/CVPR42600.2020.01070"},{"key":"13_CR40","doi-asserted-by":"crossref","unstructured":"Yang, M., et al.: Reading and writing: discriminative and generative modeling for self-supervised text recognition. In: Proceedings of the 30th ACM International Conference on Multimedia, MM 2022. Association for Computing Machinery, pp. 4214\u20134223","DOI":"10.1145\/3503161.3547784"},{"key":"13_CR41","unstructured":"Zhou, J., et al.: iBOT: image BERT pre-training with online tokenizer (2022)"}],"container-title":["Lecture Notes in Computer Science","Document Analysis and Recognition - ICDAR 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-70546-5_13","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T05:05:41Z","timestamp":1725944741000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-70546-5_13"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031705458","9783031705465"],"references-count":41,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-70546-5_13","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"11 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICDAR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Document Analysis and Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Athens","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30 August 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 September 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icdar2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icdar2024.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}