{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T16:30:03Z","timestamp":1775233803889,"version":"3.50.1"},"reference-count":47,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"9","license":[{"start":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T00:00:00Z","timestamp":1598918400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T00:00:00Z","timestamp":1598918400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T00:00:00Z","timestamp":1598918400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Strategic Information and Communications R&D Promotion Programme of Ministry of Internal Affairs and Communications, Japan","award":["172107101"],"award-info":[{"award-number":["172107101"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2020,9]]},"DOI":"10.1109\/tcsvt.2019.2935128","type":"journal-article","created":{"date-parts":[[2019,8,13]],"date-time":"2019-08-13T20:13:34Z","timestamp":1565727214000},"page":"2917-2931","source":"Crossref","is-referenced-by-count":336,"title":["Data Augmentation Using Random Image Cropping and Patching for Deep CNNs"],"prefix":"10.1109","volume":"30","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0723-0119","authenticated-orcid":false,"given":"Ryo","family":"Takahashi","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0642-4800","authenticated-orcid":false,"given":"Takashi","family":"Matsubara","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7160-3752","authenticated-orcid":false,"given":"Kuniaki","family":"Uehara","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","first-page":"1","article-title":"Shake-shake regularization","author":"gastaldi","year":"2017","journal-title":"Proc ICLR Workshop"},{"key":"ref38","first-page":"1026","article-title":"Delving deep into rectifiers: Surpassing human-level performance on imagenet classification","author":"he","year":"2016","journal-title":"Proc IEEE Int Conf Comput Vis (ICCV)"},{"key":"ref33","first-page":"562","article-title":"Deeply-supervised nets","volume":"2","author":"lee","year":"2015","journal-title":"Proc 14th Int Conf Artif Intell Statist (AISTATS)"},{"key":"ref32","first-page":"1","article-title":"Distilling the knowledge in a neural network","author":"hinton","year":"2014","journal-title":"Proc Workshop Adv Neural Inf Process Syst (NIPS)"},{"key":"ref31","first-page":"1","article-title":"Neural architecture search with reinforcement learning","author":"zoph","year":"2017","journal-title":"Proc Int Conf Learn Represent (ICLR)"},{"key":"ref30","article-title":"AutoAugment: Learning augmentation policies from data","author":"cubuk","year":"2018","journal-title":"arXiv 1805 09501"},{"key":"ref37","first-page":"807","article-title":"Rectified linear units improve restricted boltzmann machines","author":"nair","year":"2010","journal-title":"Proc 27th Int Conf Mach Learn (ICML)"},{"key":"ref36","first-page":"448","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"ioffe","year":"2015","journal-title":"Proc 32nd Int Conf Mach Learn (ICML)"},{"key":"ref35","first-page":"1","article-title":"Striving for simplicity: The all convolutional net","author":"springenberg","year":"2015","journal-title":"Proc Int Conf Learn Represent (ICLR)"},{"key":"ref34","first-page":"1","article-title":"FitNets: Hints for thin deep nets","author":"romero","year":"2015","journal-title":"Proc Int Conf Learn Represent (ICLR)"},{"key":"ref10","first-page":"1","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2015","journal-title":"Proc Int Conf Learn Represent (ICLR)"},{"key":"ref40","first-page":"1","article-title":"Unifying visual-semantic embeddings with multimodal neural language models","author":"kiros","year":"2014","journal-title":"Proc Workshop Adv Neural Inf Process Syst (NIPS)"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref12","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref13","article-title":"Improving neural networks by preventing co-adaptation of feature detectors","author":"hinton","year":"2012","journal-title":"arXiv 1207 0580"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2019.03.013"},{"key":"ref15","first-page":"630","article-title":"Identity mappings in deep residual networks","volume":"9908","author":"he","year":"2016","journal-title":"Vision Computer"},{"key":"ref16","first-page":"87.1","article-title":"Wide residual networks","author":"zagoruyko","year":"2016","journal-title":"Proc Brit Mach Vis Conf (BMVC)"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.668"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.634"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/0893-6080(91)90033-2"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.97"},{"key":"ref27","first-page":"786","article-title":"RICAP: Random image cropping and patching data augmentation for deep CNNs","author":"takahashi","year":"2018","journal-title":"Proc Asian Conf Mach Learn"},{"key":"ref3","first-page":"1","article-title":"OverFeat: Integrated recognition, localization and detection using convolutional networks","author":"sermanet","year":"2014","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref6","first-page":"1237","article-title":"Flexible, high performance convolutional neural networks for image classification","author":"ciresan","year":"2011","journal-title":"Proc Int Joint Conf Artif Intell (IJCAI)"},{"key":"ref29","first-page":"1","article-title":"Explaining and harnessing adversarial examples","author":"goodfellow","year":"2015","journal-title":"Proc Int Conf Learn Represent (ICLR)"},{"key":"ref5","first-page":"1","article-title":"Visualizing deep neural network decisions: Prediction difference analysis","author":"zintgraf","year":"2017","journal-title":"Proc Int Conf Learn Represent (ICLR)"},{"key":"ref8","first-page":"1","article-title":"Learning multiple layers of features from tiny images","author":"krizhevsky","year":"2009"},{"key":"ref7","first-page":"3642","article-title":"Multi-column deep neural networks for image classification","author":"cire?an","year":"2012","journal-title":"Proc IEEE Comput Soc Conf Comput Vis Pattern Recognit (CVPR)"},{"key":"ref2","first-page":"818","article-title":"Visualizing and understanding convolutional networks","author":"zeiler","year":"2014","journal-title":"Proc Eur Conf Comput Vis (ECCV)"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1989.1.4.541"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.91"},{"key":"ref20","article-title":"Improved regularization of convolutional neural networks with cutout","author":"devries","year":"2017","journal-title":"arXiv 1708 04552"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.133"},{"key":"ref22","first-page":"1","article-title":"mixup: Beyond empirical risk minimization","author":"zhang","year":"2018","journal-title":"Proc Int Conf Learn Represent (ICLR)"},{"key":"ref47","article-title":"YOLOv3: An incremental improvement","author":"redmon","year":"2018","journal-title":"arXiv 1804 02767"},{"key":"ref21","article-title":"Random erasing data augmentation","author":"zhong","year":"2017","journal-title":"arXiv 1708 04896"},{"key":"ref42","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv 1412 6980"},{"key":"ref24","article-title":"ImageNet large scale visual recognition challenge","author":"russakovsky","year":"2014","journal-title":"arXiv 1409 0575"},{"key":"ref41","first-page":"1","article-title":"VSE++: Improving visual-semantic embeddings with hard negatives","author":"faghri","year":"2018","journal-title":"Proc Brit Mach Vis Conf (BMVC)"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref44","first-page":"480","article-title":"Beyond part models: Person retrieval with refined part pooling (and a strong convolutional baseline)","author":"sun","year":"2018","journal-title":"Proc Eur Conf Comput Vis (ECCV)"},{"key":"ref26","first-page":"740","article-title":"Microsoft COCO: Common objects in context","author":"lin","year":"2014","journal-title":"Proc Eur Conf Comput Vis (ECCV)"},{"key":"ref43","first-page":"13:1","article-title":"Person re-identification: Past, present and future","volume":"14","author":"zheng","year":"2017","journal-title":"ACM Trans Multimedia Comput Commun Appl"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.319"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/76\/9185141\/08795523.pdf?arnumber=8795523","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T14:39:52Z","timestamp":1651070392000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8795523\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,9]]},"references-count":47,"journal-issue":{"issue":"9"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2019.2935128","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,9]]}}}