{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,30]],"date-time":"2026-04-30T21:33:20Z","timestamp":1777584800962,"version":"3.51.4"},"publisher-location":"Cham","reference-count":43,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031416781","type":"print"},{"value":"9783031416798","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-41679-8_21","type":"book-chapter","created":{"date-parts":[[2023,8,18]],"date-time":"2023-08-18T07:02:59Z","timestamp":1692342179000},"page":"366-383","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["TBM-GAN: Synthetic Document Generation with\u00a0Degraded Background"],"prefix":"10.1007","author":[{"given":"Arnab","family":"Poddar","sequence":"first","affiliation":[]},{"given":"Soumyadeep","family":"Dey","sequence":"additional","affiliation":[]},{"given":"Pratik","family":"Jawanpuria","sequence":"additional","affiliation":[]},{"given":"Jayanta","family":"Mukhopadhyay","sequence":"additional","affiliation":[]},{"given":"Prabir","family":"Kumar Biswas","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,8,19]]},"reference":[{"key":"21_CR1","unstructured":"Alberti, M., Seuret, M., Ingold, R., Liwicki, M.: A pitfall of unsupervised pre-training. arXiv preprint arXiv:1703.04332 (2017)"},{"key":"21_CR2","doi-asserted-by":"publisher","unstructured":"Baird, H.S., Bunke, H., Yamamoto, K.: Structured Document Image Analysis. Springer, Science & Business Media (2012). https:\/\/doi.org\/10.1007\/978-3-642-77281-8","DOI":"10.1007\/978-3-642-77281-8"},{"key":"21_CR3","doi-asserted-by":"crossref","unstructured":"Bhunia, A.K., Bhunia, A.K., Sain, A., Roy, P.P.: Improving document binarization via adversarial noise-texture augmentation. In: 2019 IEEE International Conference on Image Processing (ICIP), pp. 2721\u20132725. IEEE (2019)","DOI":"10.1109\/ICIP.2019.8803348"},{"key":"21_CR4","doi-asserted-by":"crossref","unstructured":"Bhunia, A.K., Khan, S., Cholakkal, H., Anwer, R.M., Khan, F.S., Shah, M.: Handwriting transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 1086\u20131094 (2021)","DOI":"10.1109\/ICCV48922.2021.00112"},{"key":"21_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"555","DOI":"10.1007\/978-3-030-86334-0_36","volume-title":"Document Analysis and Recognition \u2013 ICDAR 2021","author":"S Biswas","year":"2021","unstructured":"Biswas, S., Riba, P., Llad\u00f3s, J., Pal, U.: DocSynth: a layout guided approach for controllable document image synthesis. In: Llad\u00f3s, J., Lopresti, D., Uchida, S. (eds.) ICDAR 2021. LNCS, vol. 12823, pp. 555\u2013568. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86334-0_36"},{"key":"21_CR6","doi-asserted-by":"crossref","unstructured":"Cai, M., Zhang, H., Huang, H., Geng, Q., Li, Y., Huang, G.: Frequency domain image translation: more photo-realistic, better identity-preserving. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 13930\u201313940 (2021)","DOI":"10.1109\/ICCV48922.2021.01367"},{"key":"21_CR7","doi-asserted-by":"crossref","unstructured":"Capobianco, S., Marinai, S.: Docemul: a toolkit to generate structured historical documents. In: 2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR), vol. 1, pp. 1186\u20131191. IEEE (2017)","DOI":"10.1109\/ICDAR.2017.196"},{"key":"21_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"238","DOI":"10.1007\/978-3-030-86334-0_16","volume-title":"Document Analysis and Recognition \u2013 ICDAR 2021","author":"S Dey","year":"2021","unstructured":"Dey, S., Jawanpuria, P.: Light-weight document image cleanup using perceptual loss. In: Llad\u00f3s, J., Lopresti, D., Uchida, S. (eds.) ICDAR 2021. LNCS, vol. 12823, pp. 238\u2013253. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86334-0_16"},{"key":"21_CR9","doi-asserted-by":"crossref","unstructured":"Dutta, K., Krishnan, P., Mathew, M., Jawahar, C.: Improving CNN-RNN hybrid networks for handwriting recognition. In: 2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR), pp. 80\u201385. IEEE (2018)","DOI":"10.1109\/ICFHR-2018.2018.00023"},{"key":"21_CR10","doi-asserted-by":"crossref","unstructured":"Fogel, S., Averbuch-Elor, H., Cohen, S., Mazor, S., Litman, R.: Scrabblegan: semi-supervised varying length handwritten text generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4324\u20134333 (2020)","DOI":"10.1109\/CVPR42600.2020.00438"},{"key":"21_CR11","doi-asserted-by":"crossref","unstructured":"Gatys, L.A., Ecker, A.S., Bethge, M.: Image style transfer using convolutional neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2414\u20132423 (2016)","DOI":"10.1109\/CVPR.2016.265"},{"key":"21_CR12","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: Advances in Neural Information Processing Systems (NeurIPS), pp. 2672\u20132680 (2014)"},{"key":"21_CR13","doi-asserted-by":"crossref","unstructured":"Guan, M., Ding, H., Chen, K., Huo, Q.: Improving handwritten OCR with augmented text line images synthesized from online handwriting samples by style-conditioned GAN. In: 2020 17th International Conference on Frontiers in Handwriting Recognition (ICFHR), pp. 151\u2013156. IEEE (2020)","DOI":"10.1109\/ICFHR2020.2020.00037"},{"key":"21_CR14","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1125\u20131134 (2017)","DOI":"10.1109\/CVPR.2017.632"},{"key":"21_CR15","doi-asserted-by":"crossref","unstructured":"Jiang, L., Dai, B., Wu, W., Loy, C.C.: Focal frequency loss for image reconstruction and synthesis. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 13919\u201313929 (2021)","DOI":"10.1109\/ICCV48922.2021.01366"},{"issue":"4","key":"21_CR16","doi-asserted-by":"publisher","first-page":"62","DOI":"10.3390\/jimaging3040062","volume":"3","author":"N Journet","year":"2017","unstructured":"Journet, N., Visani, M., Mansencal, B., Van-Cuong, K., Billy, A.: Doccreator: a new software for creating synthetic ground-truthed document images. J. Imaging 3(4), 62 (2017)","journal-title":"J. Imaging"},{"key":"21_CR17","doi-asserted-by":"crossref","unstructured":"Kang, L., Riba, P., Rusinol, M., Fornes, A., Villegas, M.: Content and style aware generation of text-line images for handwriting recognition. IEEE Trans. Pattern Anal. Mach. Intell. (T-PAMI) 44(12), 8846\u20138860 (2021)","DOI":"10.1109\/TPAMI.2021.3122572"},{"key":"21_CR18","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"273","DOI":"10.1007\/978-3-030-58592-1_17","volume-title":"Computer Vision \u2013 ECCV 2020","author":"L Kang","year":"2020","unstructured":"Kang, L., Riba, P., Wang, Y., Rusi\u00f1ol, M., Forn\u00e9s, A., Villegas, M.: GANwriting: content-conditioned generation of styled handwritten word images. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12368, pp. 273\u2013289. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58592-1_17"},{"key":"21_CR19","unstructured":"Kieu, V., Visani, M., Journet, N., Domenger, J.P., Mullot, R.: A character degradation model for grayscale ancient document images. In: Proceedings of the 21st International Conference on Pattern Recognition (ICPR), pp. 685\u2013688. IEEE (2012)"},{"key":"21_CR20","unstructured":"Larson, S., Lim, G., Ai, Y., Kuang, D., Leach, K.: Evaluating out-of-distribution performance on document image classifiers. In: Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (NeurIPS) (2022)"},{"key":"21_CR21","unstructured":"Lee, Y., Hong, T., Kim, S.: Data augmentations for document images. In: SDU@ AAAI (2021)"},{"key":"21_CR22","doi-asserted-by":"crossref","unstructured":"Lin, Y.H., Chen, W.C., Chuang, Y.Y.: Bedsr-net: a deep shadow removal network from a single document image. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 12905\u201312914 (2020)","DOI":"10.1109\/CVPR42600.2020.01292"},{"key":"21_CR23","doi-asserted-by":"crossref","unstructured":"Maini, S., Groleau, A., Chee, K.W., Larson, S., Boarman, J.: Augraphy: A data augmentation library for document images. arXiv preprint arXiv:2208.14558 (2022)","DOI":"10.1007\/978-3-031-41682-8_24"},{"issue":"1","key":"21_CR24","doi-asserted-by":"publisher","first-page":"39","DOI":"10.1007\/s100320200071","volume":"5","author":"UV Marti","year":"2002","unstructured":"Marti, U.V., Bunke, H.: The iam-database: an English sentence database for offline handwriting recognition. Int. J. Doc. Anal. Recogn. 5(1), 39\u201346 (2002)","journal-title":"Int. J. Doc. Anal. Recogn."},{"issue":"1","key":"21_CR25","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1145\/3503250","volume":"65","author":"B Mildenhall","year":"2021","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: representing scenes as neural radiance fields for view synthesis. Commun. ACM 65(1), 99\u2013106 (2021)","journal-title":"Commun. ACM"},{"key":"21_CR26","unstructured":"Mirza, M., Osindero, S.: Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784 (2014)"},{"issue":"1","key":"21_CR27","doi-asserted-by":"publisher","first-page":"62","DOI":"10.1109\/TSMC.1979.4310076","volume":"9","author":"N Otsu","year":"1979","unstructured":"Otsu, N.: A threshold selection method from gray-level histograms. IEEE Trans. Syst. Man Cybern. 9(1), 62\u201366 (1979)","journal-title":"IEEE Trans. Syst. Man Cybern."},{"key":"21_CR28","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"98","DOI":"10.1007\/978-3-030-86159-9_7","volume-title":"Document Analysis and Recognition \u2013 ICDAR 2021 Workshops","author":"A Poddar","year":"2021","unstructured":"Poddar, A., Chakraborty, A., Mukhopadhyay, J., Biswas, P.K.: Detection and localisation of\u00a0struck-out-strokes in\u00a0handwritten\u00a0manuscripts. In: Barney Smith, E.H., Pal, U. (eds.) ICDAR 2021. LNCS, vol. 12917, pp. 98\u2013112. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86159-9_7"},{"key":"21_CR29","doi-asserted-by":"crossref","unstructured":"Poddar, A., Chakraborty, A., Mukhopadhyay, J., Biswas, P.K.: Texrgan: a deep adversarial framework for text restoration from deformed handwritten documents. In: Proceedings of the Twelfth Indian Conference on Computer Vision, Graphics and Image Processing (ICVGIP), pp. 1\u20139 (2021)","DOI":"10.1145\/3490035.3490306"},{"key":"21_CR30","doi-asserted-by":"crossref","unstructured":"Pondenkandath, V., Alberti, M., Diatta, M., Ingold, R., Liwicki, M.: Historical document synthesis with generative adversarial networks. In: 2019 International Conference on Document Analysis and Recognition Workshops (ICDARW), vol. 5, pp. 146\u2013151. IEEE (2019)","DOI":"10.1109\/ICDARW.2019.40096"},{"key":"21_CR31","unstructured":"Rahaman, N., et al.: On the spectral bias of neural networks. In: International Conference on Machine Learning (ICML), pp. 5301\u20135310. PMLR (2019)"},{"key":"21_CR32","doi-asserted-by":"crossref","unstructured":"Seuret, M., Chen, K., Eichenbergery, N., Liwicki, M., Ingold, R.: Gradient-domain degradations for improving historical documents images layout analysis. In: 2015 13th International Conference on Document Analysis and Recognition (ICDAR), pp. 1006\u20131010. IEEE (2015)","DOI":"10.1109\/ICDAR.2015.7333913"},{"key":"21_CR33","doi-asserted-by":"crossref","unstructured":"Shi, B., Bai, X., Yao, C.: An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition. IEEE Trans. Pattern Anal. Mach. Intell. (T-PAMI) 39(11), 2298\u20132304 (2016)","DOI":"10.1109\/TPAMI.2016.2646371"},{"key":"21_CR34","doi-asserted-by":"crossref","unstructured":"Souibgui, M.A., Kessentini, Y.: De-GAN: a conditional generative adversarial network for document enhancement. IEEE Trans. Patteren Anal. Mach. Intell. (T-PAMI) 44(3), 1180\u20131191 (2020)","DOI":"10.1109\/TPAMI.2020.3022406"},{"key":"21_CR35","doi-asserted-by":"crossref","unstructured":"Strau\u00df, T., Leifert, G., Labahn, R., Hodel, T., M\u00fchlberger, G.: Icfhr 2018 competition on automated text recognition on a read dataset. In: 2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR), pp. 477\u2013482. IEEE (2018)","DOI":"10.1109\/ICFHR-2018.2018.00089"},{"key":"21_CR36","first-page":"7537","volume":"33","author":"M Tancik","year":"2020","unstructured":"Tancik, M., et al.: Fourier features let networks learn high frequency functions in low dimensional domains. Adv. Neural Inf. Process. Syst. (NeurIPS) 33, 7537\u20137547 (2020)","journal-title":"Adv. Neural Inf. Process. Syst. (NeurIPS)"},{"key":"21_CR37","doi-asserted-by":"crossref","unstructured":"Tensmeyer, C., Brodie, M., Saunders, D., Martinez, T.: Generating realistic binarization data with generative adversarial networks. In: 2019 International Conference on Document Analysis and Recognition (ICDAR), pp. 172\u2013177. IEEE (2019)","DOI":"10.1109\/ICDAR.2019.00036"},{"issue":"3","key":"21_CR38","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s42979-020-00176-1","volume":"1","author":"C Tensmeyer","year":"2020","unstructured":"Tensmeyer, C., Martinez, T.: Historical document image binarization: a review. SN Comput. Sci. 1(3), 1\u201326 (2020)","journal-title":"SN Comput. Sci."},{"key":"21_CR39","doi-asserted-by":"crossref","unstructured":"Toshevska, M., Gievska, S.: A review of text style transfer using deep learning. IEEE Trans. Artif. Intell. (T-AI) 3, 669\u2013684 (2021)","DOI":"10.1109\/TAI.2021.3115992"},{"key":"21_CR40","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"610","DOI":"10.1007\/978-3-030-86334-0_40","volume-title":"Document Analysis and Recognition \u2013 ICDAR 2021","author":"L V\u00f6gtlin","year":"2021","unstructured":"V\u00f6gtlin, L., Drazyk, M., Pondenkandath, V., Alberti, M., Ingold, R.: Generating synthetic handwritten historical documents with OCR constrained GANs. In: Llad\u00f3s, J., Lopresti, D., Uchida, S. (eds.) ICDAR 2021. LNCS, vol. 12823, pp. 610\u2013625. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86334-0_40"},{"key":"21_CR41","doi-asserted-by":"crossref","unstructured":"Wigington, C., Stewart, S., Davis, B., Barrett, B., Price, B., Cohen, S.: Data augmentation for recognition of handwritten words and lines using a CNN-LSTM network. In: 2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR), vol. 1, pp. 639\u2013645. IEEE (2017)","DOI":"10.1109\/ICDAR.2017.110"},{"key":"21_CR42","doi-asserted-by":"crossref","unstructured":"Zhong, Z., Zheng, L., Kang, G., Li, S., Yang, Y.: Random erasing data augmentation. In: Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), vol. 34, pp. 13001\u201313008 (2020)","DOI":"10.1609\/aaai.v34i07.7000"},{"key":"21_CR43","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV), pp. 2223\u20132232 (2017)","DOI":"10.1109\/ICCV.2017.244"}],"container-title":["Lecture Notes in Computer Science","Document Analysis and Recognition - ICDAR 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-41679-8_21","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,19]],"date-time":"2023-12-19T07:13:32Z","timestamp":1702970012000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-41679-8_21"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031416781","9783031416798"],"references-count":43,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-41679-8_21","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"19 August 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICDAR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Document Analysis and Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"San Jos\u00e9, CA","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 August 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 August 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icdar2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icdar2023.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"316","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"154","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"49% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.89","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1.50","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Number and type of other papers accepted : IJDAR track papers","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}