{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T00:22:29Z","timestamp":1775607749151,"version":"3.50.1"},"reference-count":65,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2022,2,2]],"date-time":"2022-02-02T00:00:00Z","timestamp":1643760000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,2,2]],"date-time":"2022-02-02T00:00:00Z","timestamp":1643760000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2022,3]]},"DOI":"10.1007\/s11263-021-01566-5","type":"journal-article","created":{"date-parts":[[2022,2,2]],"date-time":"2022-02-02T04:43:13Z","timestamp":1643776993000},"page":"820-835","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Network Adjustment: Channel and Block Search Guided by Resource Utilization Ratio"],"prefix":"10.1007","volume":"130","author":[{"given":"Zhengsu","family":"Chen","sequence":"first","affiliation":[]},{"given":"Lingxi","family":"Xie","sequence":"additional","affiliation":[]},{"given":"Jianwei","family":"Niu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8163-3237","authenticated-orcid":false,"given":"Xuefeng","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Longhui","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Qi","family":"Tian","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,2,2]]},"reference":[{"key":"1566_CR1","unstructured":"Cai, H., Zhu, L., & Han, S. (2018). Proxylessnas: Direct neural architecture search on target task and hardware. arXiv:1812.00332"},{"key":"1566_CR2","unstructured":"Chen, K., Wang, J., Pang, J., Cao, Y., Xiong, Y., Li, X., Sun, S., Feng, W., Liu, Z., Xu,J., Zhang, Z., Cheng, D., Zhu, C., Cheng, T., Zhao, Q., Li, B., Lu, X., Zhu, R., Wu, Y., Dai, J., Wang, J., Shi, J., Ouyang, W., Loy, C. C., & Lin, D. (2019a). MMDetection: Open mmlab detection toolbox and benchmark. arXiv:1906.07155"},{"key":"1566_CR3","unstructured":"Chen, Y., Yang, T., Zhang, X., Meng, G., Pan, C., & Sun, J. (2019b). Detnas: Neural architecture search on object detection. arXiv:1903.10979"},{"key":"1566_CR4","doi-asserted-by":"crossref","unstructured":"Chen, Z., Niu, J., Xie, L., Liu, X., Wei, L., & Tian, Q. (2020). Network adjustment: Channel search guided by flops utilization ratio. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR42600.2020.01067"},{"key":"1566_CR5","unstructured":"Chu, X., Zhang, B., Xu, R., & Li, J. (2019). Fairnas: Rethinking evaluation fairness of weight sharing neural architecture search. arXiv:1907.01845"},{"key":"1566_CR6","unstructured":"Dong, X., & Yang, Y. (2019). Network pruning via transformable architecture search. arXiv:1905.09717"},{"key":"1566_CR7","doi-asserted-by":"crossref","unstructured":"Dong, X., Huang, J., Yang, Y., & Yan, S. (2017). More is less: A more complicated network with less inference complexity. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2017.205"},{"key":"1566_CR8","unstructured":"Frankle, J., & Carbin, M. (2018). The lottery ticket hypothesis: Finding sparse, trainable neural networks. arXiv:1803.03635"},{"key":"1566_CR9","unstructured":"Ghiasi, G., Lin, T. Y., & Le, Q. V. (2018). Dropblock: A regularization method for convolutional networks. arXiv:1810.12890"},{"key":"1566_CR10","doi-asserted-by":"crossref","unstructured":"Girshick, R., Donahue, J., Darrell, T., & Malik, J. (2014). Rich feature hierarchies for accurate object detection and semantic segmentation. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2014.81"},{"key":"1566_CR11","doi-asserted-by":"crossref","unstructured":"Gordon, A., Eban, E., Nachum, O., Chen, B., Wu, H., Yang, T. J., & Choi, E. (2018). Morphnet: Fast & simple resource-constrained structure learning of deep networks. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2018.00171"},{"key":"1566_CR12","doi-asserted-by":"crossref","unstructured":"Guo, Z., Zhang, X., Mu, H., Heng, W., Liu, Z., Wei, Y., & Sun, J. (2019). Single path one-shot neural architecture search with uniform sampling. arXiv:1904.00420","DOI":"10.1007\/978-3-030-58517-4_32"},{"key":"1566_CR13","doi-asserted-by":"crossref","unstructured":"Han, D., Kim, J., & Kim, J. (2017). Deep pyramidal residual networks. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2017.668"},{"key":"1566_CR14","unstructured":"Han, S., Mao, H., & Dally, W. J. (2015a). Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv:1510.00149"},{"key":"1566_CR15","unstructured":"Han, S., Pool, J., Tran, J., & Dally, W. (2015b). Learning both weights and connections for efficient neural network. In: Advances in Neural Information Processing Systems"},{"key":"1566_CR16","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2016.90"},{"key":"1566_CR17","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., & Girshick, R. (2017). Mask r-cnn. In: International Conference on Computer Vision, IEEE","DOI":"10.1109\/ICCV.2017.322"},{"key":"1566_CR18","doi-asserted-by":"crossref","unstructured":"He, Y., Kang, G., Dong, X., Fu, Y., & Yang, Y. (2018). Soft filter pruning for accelerating deep convolutional neural networks. arXiv:1808.06866","DOI":"10.24963\/ijcai.2018\/309"},{"key":"1566_CR19","doi-asserted-by":"crossref","unstructured":"He, Y., Liu, P., Wang, Z., Hu, Z., & Yang, Y. (2019). Filter pruning via geometric median for deep convolutional neural networks acceleration. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2019.00447"},{"key":"1566_CR20","unstructured":"Hinton, G., Vinyals, O., & Dean, J. (2015) Distilling the knowledge in a neural network. arXiv:1503.02531"},{"key":"1566_CR21","doi-asserted-by":"crossref","unstructured":"Howard, A., Sandler, M., Chu, G., Chen, L. C., Chen, B., & Tan, M., Wang W, Zhu Y, Pang R, Vasudevan V, et\u00a0al. (2019). Searching for mobilenetv3. arXiv:1905.02244","DOI":"10.1109\/ICCV.2019.00140"},{"key":"1566_CR22","doi-asserted-by":"crossref","unstructured":"Huang, G., Sun, Y., Liu, Z., Sedra, D., & Weinberger, K. Q. (2016) Deep networks with stochastic depth. In: European Conference on Computer Vision. Springer","DOI":"10.1007\/978-3-319-46493-0_39"},{"key":"1566_CR23","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van, Der\u00a0Maaten, L., & Weinberger, K. Q. (2017) Densely connected convolutional networks. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2017.243"},{"key":"1566_CR24","doi-asserted-by":"crossref","unstructured":"Huang, Z., & Wang, N. (2018). Data-driven sparse structure selection for deep neural networks. In: European Conference on Computer Vision","DOI":"10.1007\/978-3-030-01270-0_19"},{"key":"1566_CR25","unstructured":"Ioffe, S., & Szegedy, C. (2015). Batch normalization: Accelerating deep network training by reducing internal covariate shift. arXiv:1502.03167"},{"key":"1566_CR26","unstructured":"Krizhevsky, A., & Hinton, G. (2009). Learning multiple layers of features from tiny images"},{"key":"1566_CR27","unstructured":"Krizhevsky, A., Sutskever, I., & Hinton, G. E. (2012). Imagenet classification with deep convolutional neural networks. In: Advances in Neural Information Processing Systems"},{"key":"1566_CR28","unstructured":"Larsson, G., Maire, M., & Shakhnarovich, G. (2016). Fractalnet: Ultra-deep neural networks without residuals. arXiv:1605.07648"},{"issue":"7553","key":"1566_CR29","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1038\/nature14539","volume":"521","author":"Y LeCun","year":"2015","unstructured":"LeCun, Y., Bengio, Y., & Hinton, G. (2015). Deep learning. Nature, 521(7553), 436\u2013444.","journal-title":"Nature"},{"key":"1566_CR30","unstructured":"Li, H., Kadav, A., Durdanovic, I., Samet, H, & Graf, H. P. (2016). Pruning filters for efficient convnets. arXiv:1608.08710"},{"key":"1566_CR31","doi-asserted-by":"crossref","unstructured":"Li, X., Chen, S., Hu, X., & Yang, J. (2019). Understanding the disharmony between dropout and batch normalization by variance shift. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2019.00279"},{"key":"1566_CR32","unstructured":"Liu, H., Simonyan, K., & Yang, Y. (2018a). Darts: Differentiable architecture search. arXiv:1806.09055"},{"key":"1566_CR33","doi-asserted-by":"crossref","unstructured":"Liu, Z., Li, J., Shen, Z., Huang, G., Yan, S., & Zhang, C. (2017). Learning efficient convolutional networks through network slimming. In: International Conference on Computer Vision","DOI":"10.1109\/ICCV.2017.298"},{"key":"1566_CR34","unstructured":"Liu, Z., Sun, M., Zhou, T., Huang, G., & Darrell, T. (2018b). Rethinking the value of network pruning. arXiv:1810.05270"},{"key":"1566_CR35","doi-asserted-by":"crossref","unstructured":"Long, J., Shelhamer, E., & Darrell, T. (2015). Fully convolutional networks for semantic segmentation. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"1566_CR36","doi-asserted-by":"crossref","unstructured":"Lym, S., Choukse, E., Zangeneh, S., Wen, W., Erez, M.&, Shanghavi, S. (2019). Prunetrain: Gradual structured pruning from scratch for faster neural network training. arXiv:1901.09290","DOI":"10.1145\/3295500.3356156"},{"key":"1566_CR37","unstructured":"Paszke, A., Gross, S., Chintala, S., Chanan, G., Yang, E., DeVito, Z., Lin, Z., Desmaison, A., Antiga, L., & Lerer, A. (2017). Automatic differentiation in pytorch"},{"key":"1566_CR38","unstructured":"Pham, H., Guan, M. Y., Zoph, B., Le, Q. V., & Dean, J. (2018). Efficient neural architecture search via parameter sharing. arXiv:1802.03268"},{"key":"1566_CR39","doi-asserted-by":"crossref","unstructured":"Qiao, S., Lin, Z., Zhang, J.,&Yuille, A. L. (2019). Neural rejuvenation: Improving deep network training by enhancing computational resource utilization. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 61\u201371","DOI":"10.1109\/CVPR.2019.00015"},{"key":"1566_CR40","doi-asserted-by":"crossref","unstructured":"Real, E., Aggarwal, A., Huang, Y., & Le, Q. V. (2019). Regularized evolution for image classifier architecture search. In: AAAI Conference on Artificial Intelligence, 33, 4780\u20134789","DOI":"10.1609\/aaai.v33i01.33014780"},{"key":"1566_CR41","unstructured":"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards real-time object detection with region proposal networks. In: Advances in Neural Information Processing Systems"},{"issue":"3","key":"1566_CR42","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., et al. (2015). Imagenet large scale visual recognition challenge. International Journal of Computer Vision, 115(3), 211\u2013252.","journal-title":"International Journal of Computer Vision"},{"key":"1566_CR43","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., & Chen, L. C. (2018). Mobilenetv2: Inverted residuals and linear bottlenecks. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2018.00474"},{"key":"1566_CR44","unstructured":"Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv:1409.1556"},{"issue":"1","key":"1566_CR45","first-page":"1929","volume":"15","author":"N Srivastava","year":"2014","unstructured":"Srivastava, N., Hinton, G. E., Krizhevsky, A., Sutskever, I., & Salakhutdinov, R. (2014). Dropout: A simple way to prevent neural networks from overfitting. Journal of Machine Learning Research, 15(1), 1929\u20131958.","journal-title":"Journal of Machine Learning Research"},{"key":"1566_CR46","doi-asserted-by":"crossref","unstructured":"Stamoulis, D., Ding, R., Wang, D., Lymberopoulos, D., Priyantha, B., Liu, J., & Marculescu, D. (2019) Single-path nas: Designing hardware-efficient convnets in less than 4 hours. arXiv:1904.02877","DOI":"10.1007\/978-3-030-46147-8_29"},{"key":"1566_CR47","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., Erhan, D., Vanhoucke, V., & Rabinovich, A. (2015). Going deeper with convolutions. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"1566_CR48","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Ioffe, S., Vanhoucke, V., & Alemi, A. (2016a). Inception-v4, inception-resnet and the impact of residual connections on learning. arXiv:1602.07261","DOI":"10.1609\/aaai.v31i1.11231"},{"key":"1566_CR49","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., & Wojna, Z. (2016b). Rethinking the inception architecture for computer vision. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2016.308"},{"key":"1566_CR50","unstructured":"Tan, M., & Le, Q. V. (2019). Efficientnet: Rethinking model scaling for convolutional neural networks. arXiv:1905.11946"},{"key":"1566_CR51","unstructured":"Tan, M., & Le, Q. V. (2021). Efficientnetv2: Smaller models and faster training. arXiv:2104.00298"},{"key":"1566_CR52","doi-asserted-by":"crossref","unstructured":"Tan, M., Chen, B., Pang, R., Vasudevan, V., Sandler, M., Howard, A., & Le, Q. V. (2019) Mnasnet: Platform-aware neural architecture search for mobile. In: Computer Vision and Pattern Recognition, pp. 2820\u20132828","DOI":"10.1109\/CVPR.2019.00293"},{"key":"1566_CR53","doi-asserted-by":"crossref","unstructured":"Tompson, J., Goroshin, R., Jain, A., LeCun, Y., & Bregler, C. (2015). Efficient object localization using convolutional networks. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2015.7298664"},{"key":"1566_CR54","unstructured":"Veit, A., Wilber, M. J., & Belongie, S. (2016). Residual networks behave like ensembles of relatively shallow networks. In: Advances in Neural Information Processing Systems"},{"key":"1566_CR55","doi-asserted-by":"crossref","unstructured":"Wan, A., Dai, X., Zhang, P., He, Z., Tian, Y., Xie, S., Wu, B., Yu, M., Xu, T.,&Chen, K., et\u00a0al. (2020). Fbnetv2: Differentiable neural architecture search for spatial and channel dimensions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 12965\u201312974","DOI":"10.1109\/CVPR42600.2020.01298"},{"key":"1566_CR56","unstructured":"Wang, J., Bai, H., Wu, J., Shi, X., Huang, J., King, I., Lyu, M., & Cheng, J. (2020a). Revisiting parameter sharing for automatic neural channel number search. Advances in Neural Information Processing Systems 33"},{"key":"1566_CR57","doi-asserted-by":"crossref","unstructured":"Wang, X., Yu, F., Dou, Z. Y., Darrell, T., & Gonzalez, J. E. (2018). Skipnet: Learning dynamic routing in convolutional networks. In: European Conference on Computer Vision","DOI":"10.1007\/978-3-030-01261-8_25"},{"key":"1566_CR58","doi-asserted-by":"crossref","unstructured":"Wang, Y., Zhang, X., Xie, L., Zhou, J., Su, H., Zhang, B., & Hu, X. (2020b). Pruning from scratch. In: AAAI Conference on Artificial Intelligence, pp 12273\u201312280","DOI":"10.1609\/aaai.v34i07.6910"},{"key":"1566_CR59","unstructured":"Wen, W., Wu, C., Wang, Y., Chen, Y., & Li, H. (2016). Learning structured sparsity in deep neural networks. In: Advances in Neural Information Processing Systems"},{"key":"1566_CR60","doi-asserted-by":"crossref","unstructured":"Wu, B., Dai, X., Zhang, P., Wang, Y., Sun, F., Wu, Y., Tian, Y., Vajda, P., Jia, Y., & Keutzer, K. (2019). Fbnet: Hardware-aware efficient convnet design via differentiable neural architecture search. In: Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2019.01099"},{"key":"1566_CR61","doi-asserted-by":"crossref","unstructured":"Xie, S., Girshick, R., Doll\u00e1r, P., Tu, Z., & He, K. (2016). Aggregated residual transformations for deep neural networks. arXiv:1611.05431","DOI":"10.1109\/CVPR.2017.634"},{"key":"1566_CR62","unstructured":"Xu, Y., Xie, L., Zhang, X., Chen, X., Shi, B., Tian, Q., & Xiong, H. (2020) Latency-aware differentiable neural architecture search. arXiv preprint arXiv:2001.06392"},{"key":"1566_CR63","unstructured":"Yu, J., & Huang, T. (2019). Network slimming by slimmable networks: Towards one-shot architecture search for channel numbers. arXiv:1903.11728"},{"key":"1566_CR64","doi-asserted-by":"crossref","unstructured":"Zagoruyko, S., & Komodakis, N. (2016). Wide residual networks. arXiv:1605.07146","DOI":"10.5244\/C.30.87"},{"key":"1566_CR65","doi-asserted-by":"crossref","unstructured":"Zoph, B., Vasudevan, V., Shlens, J., & Le, Q. V. (2017). Learning transferable architectures for scalable image recognition. arXiv:1707.07012","DOI":"10.1109\/CVPR.2018.00907"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-021-01566-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-021-01566-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-021-01566-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,25]],"date-time":"2023-01-25T14:53:44Z","timestamp":1674658424000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-021-01566-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,2,2]]},"references-count":65,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2022,3]]}},"alternative-id":["1566"],"URL":"https:\/\/doi.org\/10.1007\/s11263-021-01566-5","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,2,2]]},"assertion":[{"value":"31 March 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 December 2021","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 February 2022","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}