{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T19:06:25Z","timestamp":1761419185651,"version":"build-2065373602"},"reference-count":39,"publisher":"Springer Science and Business Media LLC","issue":"14","license":[{"start":{"date-parts":[[2025,10,3]],"date-time":"2025-10-03T00:00:00Z","timestamp":1759449600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,10,3]],"date-time":"2025-10-03T00:00:00Z","timestamp":1759449600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["32373184"],"award-info":[{"award-number":["32373184"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"National Key Technology R&D Program of China","award":["2022YFD2001701"],"award-info":[{"award-number":["2022YFD2001701"]}]},{"name":"Reform and Development Project of Beijing Academy of Agricultural and Forestry Sciences","award":["GGFZ20250410"],"award-info":[{"award-number":["GGFZ20250410"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1007\/s11760-025-04834-w","type":"journal-article","created":{"date-parts":[[2025,10,3]],"date-time":"2025-10-03T18:48:29Z","timestamp":1759517309000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["LMVR: Lightweight multitask visual recognition with Cross Residual Log-likelihood Estimation"],"prefix":"10.1007","volume":"19","author":[{"given":"Zhenxi","family":"Zhao","sequence":"first","affiliation":[]},{"given":"Chunjiang","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Xinting","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Chao","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,10,3]]},"reference":[{"key":"4834_CR1","doi-asserted-by":"publisher","first-page":"3873","DOI":"10.1109\/TIP.2025.3579200","volume":"34","author":"R Jiao","year":"2025","unstructured":"Jiao, R., Liu, Q., Zhang, Y., Pu, B., Xue, B., Cheng, Y., Yang, K., Liu, X., Qu, J., Jin, C.: Zhang: recistsurv: hybrid multi-task transformer for hepatocellular carcinoma response and survival evaluation. IEEE Trans. Image Process. 34, 3873\u20133888 (2025)","journal-title":"IEEE Trans. Image Process."},{"key":"4834_CR2","doi-asserted-by":"crossref","unstructured":"Chengfei\u00a0Yang, J.G.C.W., Xing\u00a0Shao,C.Q.: Bearing fault diagnosis method based on multi-scale residual and multi task depthwise separable network. Signal, Image and Video Processing. 19, 771 (2025)","DOI":"10.1007\/s11760-025-04300-7"},{"key":"4834_CR3","doi-asserted-by":"crossref","unstructured":"Bahroun, S.: Face recognition in unconstrained environments with multimodal 2d\/3d bilstm-cnn parallel model. Signal, Image and Video Process. 19(570) (2025)","DOI":"10.1007\/s11760-025-04136-1"},{"key":"4834_CR4","doi-asserted-by":"publisher","first-page":"760","DOI":"10.1007\/s11760-025-04370-7","volume":"19","author":"W Chen","year":"2025","unstructured":"Chen, W., Liang, Z.: Lmf-yolo: An improved yolo algorithm for road object detection in autonomous driving. Signal, Image and Video Process 19, 760 (2025)","journal-title":"Signal, Image and Video Process"},{"key":"4834_CR5","first-page":"3072","volume":"37","author":"Y Xu","year":"2023","unstructured":"Xu, Y., Yang, Y., Zhang, L.: Demt: Deformable mixer transformer for multi-task learning of dense prediction. Proc. AAAI Conf. Artif. Intell 37, 3072\u20133080 (2023)","journal-title":"Proc. AAAI Conf. Artif. Intell"},{"key":"4834_CR6","doi-asserted-by":"crossref","unstructured":"Heuer, F., Mantowsky, S., Bukhari, S., Schneider, G.: Multitask-centernet (mcn): Efficient and diverse multitask learning using an anchor free approach. In: Proc. IEEE\/CVF Int. Conf. Comput. Vis., pp. 997\u20131005 (2021)","DOI":"10.1109\/ICCVW54120.2021.00116"},{"key":"4834_CR7","doi-asserted-by":"publisher","first-page":"3703","DOI":"10.1109\/TIP.2025.3572760","volume":"34","author":"M Zha","year":"2025","unstructured":"Zha, M., Wang, G., Pei, Y., Li, T., Tang, X., Li, C., Yang, Y., Tao Shen, H.: Heterogeneous experts and hierarchical perception for underwater salient object detection. IEEE Trans. Image Process. 34, 3703\u20133717 (2025)","journal-title":"IEEE Trans. Image Process."},{"key":"4834_CR8","doi-asserted-by":"crossref","unstructured":"Jiang, H., Zhang, R., Zhou, Y., Wang, Y., Chen, H.: Donet: Deep de-overlapping network for cytology instance segmentation. In: Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit., pp. 15641\u201315650 (2023)","DOI":"10.1109\/CVPR52729.2023.01501"},{"key":"4834_CR9","doi-asserted-by":"publisher","first-page":"60","DOI":"10.1109\/TIP.2024.3490401","volume":"34","author":"X Wan","year":"2025","unstructured":"Wan, X., Chen, Z., Zhao, X.: Rsb-pose: robust short-baseline binocular 3d human pose estimation with occlusion handling. IEEE Transactions on Image Process. 34, 60\u201372 (2025)","journal-title":"IEEE Transactions on Image Process."},{"issue":"7553","key":"4834_CR10","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1038\/nature14539","volume":"521","author":"Y LeCun","year":"2015","unstructured":"LeCun, Y., Bengio, Y., Hinton, G.: Deep learning. Nature 521(7553), 436\u2013444 (2015)","journal-title":"Nature"},{"key":"4834_CR11","doi-asserted-by":"crossref","unstructured":"Papandreou, G., Zhu, T., Chen, L.-C., Gidaris, S., Tompson, J., Murphy, K.: Personlab: Person pose estimation and instance segmentation with a bottom-up, part-based, geometric embedding model. In: Proc. Eur. Conf. Comput. Vis. (ECCV), pp. 269\u2013286 (2018)","DOI":"10.1007\/978-3-030-01264-9_17"},{"key":"4834_CR12","unstructured":"Gevorgyan, Z.: Siou loss: More powerful learning for bounding box regression. arXiv Prepr. arXiv:2205.12740 (2022)"},{"key":"4834_CR13","unstructured":"Duan, K., Xie, L., Qi, H., Bai, S., Huang, Q., Tian, Q.: Location-sensitive visual recognition with cross-iou loss. arXiv Prepr. arXiv:2104.04899 (2021)"},{"key":"4834_CR14","doi-asserted-by":"crossref","unstructured":"Wei, F., Sun, X., Li, H., Wang, J., Lin, S.: Point-set anchors for object detection, instance segmentation and pose estimation. In: Proc. Eur. Conf. Comput. Vis. (ECCV), pp. 527\u2013544 (2020)","DOI":"10.1007\/978-3-030-58607-2_31"},{"key":"4834_CR15","doi-asserted-by":"crossref","unstructured":"Duan, K., Bai, S., Xie, L., Qi, H., Huang, Q., Tian, Q.: Centernet: Keypoint triplets for object detection. In: Proc. IEEE\/CVF Int. Conf. Comput. Vis., pp. 6569\u20136578 (2019)","DOI":"10.1109\/ICCV.2019.00667"},{"key":"4834_CR16","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"2","key":"4834_CR17","doi-asserted-by":"publisher","first-page":"652","DOI":"10.1109\/TPAMI.2019.2938758","volume":"43","author":"S-H Gao","year":"2019","unstructured":"Gao, S.-H., Cheng, M.-M., Zhao, K., Zhang, X.-Y., Yang, M.-H., Torr, P.: Res2net: a new multi-scale backbone architecture. IEEE Trans. Pattern Anal. Mach. Intell. 43(2), 652\u2013662 (2019)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"4834_CR18","doi-asserted-by":"crossref","unstructured":"Li, D., Hu, J., Wang, C., Li, X., She, Q., Zhu, L., Zhang, T., Chen, Q.: Involution: Inverting the inherence of convolution for visual recognition. In: Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit., pp. 12321\u201312330 (2021)","DOI":"10.1109\/CVPR46437.2021.01214"},{"key":"4834_CR19","doi-asserted-by":"crossref","unstructured":"Li, J., Bian, S., Zeng, A., Wang, C., Pang, B., Liu, W., Lu, C.: Human pose regression with residual log-likelihood estimation. In: Proc. IEEE\/CVF Int. Conf. Comput. Vis., pp. 11025\u201311034 (2021)","DOI":"10.1109\/ICCV48922.2021.01084"},{"key":"4834_CR20","doi-asserted-by":"crossref","unstructured":"Yang, Z., Liu, S., Hu, H., Wang, L., Lin, S.: Reppoints: Point set representation for object detection. In: Proc. IEEE\/CVF Int. Conf. Comput. Vis. (ICCV), pp. 9657\u20139666 (2019)","DOI":"10.1109\/ICCV.2019.00975"},{"issue":"6","key":"4834_CR21","doi-asserted-by":"publisher","first-page":"1137","DOI":"10.1109\/TPAMI.2016.2577031","volume":"39","author":"S Ren","year":"2017","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: towards real-time object detection with region proposal networks. IEEE Trans. Pattern Anal. Mach. Intell. 39(6), 1137\u20131149 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"4834_CR22","doi-asserted-by":"crossref","unstructured":"Cai, Z., Vasconcelos, N.: Cascade r-cnn: Delving into high quality object detection. In: Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 6154\u20136162 (2018)","DOI":"10.1109\/CVPR.2018.00644"},{"key":"4834_CR23","doi-asserted-by":"crossref","unstructured":"Wang, C.-Y., Bochkovskiy, A., Liao, H.-Y.M.: Yolov7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors. In: Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR), pp. 7464\u20137475 (2023)","DOI":"10.1109\/CVPR52729.2023.00721"},{"key":"4834_CR24","doi-asserted-by":"crossref","unstructured":"Zand, M., Etemad, A., Greenspan, M.: Objectbox: From centers to boxes for anchor-free object detection. In: European Conference on Computer Vision. European Conference on Computer Vision, pp. 390\u2013406 (2022)","DOI":"10.1007\/978-3-031-20080-9_23"},{"key":"4834_CR25","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask r-cnn. In: Proc. IEEE Int. Conf. Comput. Vis., pp. 2961\u20132969 (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"4834_CR26","doi-asserted-by":"crossref","unstructured":"Xie, E., Sun, P., Song, X., Wang, W., Liu, X., Liang, D., Shen, C., Luo, P.: Polarmask: Single shot instance segmentation with polar representation. In: Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR), pp. 12193\u201312202 (2020)","DOI":"10.1109\/CVPR42600.2020.01221"},{"key":"4834_CR27","doi-asserted-by":"crossref","unstructured":"Peng, S., Jiang, W., Pi, H., Li, X., Bao, H., Zhou, X.: Deep snake for real-time instance segmentation. In: Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. (CVPR), pp. 8533\u20138542 (2020)","DOI":"10.1109\/CVPR42600.2020.00856"},{"key":"4834_CR28","doi-asserted-by":"crossref","unstructured":"Liu, J.-J., Hou, Q., Cheng, M.-M., Wang, C., Feng, J.: Improving convolutional networks with self-calibrated convolutions. In: Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 10096\u201310105 (2020)","DOI":"10.1109\/CVPR42600.2020.01011"},{"key":"4834_CR29","first-page":"5621","volume":"33","author":"Y Chen","year":"2020","unstructured":"Chen, Y., Zhang, Z., Cao, Y., Wang, L., Lin, S., Hu, H.: Reppoints v2: verification meets regression for object detection. Adv. Neural. Inf. Process. Syst. 33, 5621\u20135631 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"4834_CR30","doi-asserted-by":"crossref","unstructured":"Mao, W., Ge, Y., Shen, C., Tian, Z., Wang, X., Wang, Z., Hengel, A.V.: Poseur: Direct human pose regression with transformers. In: Eur. Conf. Comput. Vis., pp. 72\u201388 (2022)","DOI":"10.1007\/978-3-031-20068-7_5"},{"key":"4834_CR31","doi-asserted-by":"crossref","unstructured":"Tian, Z., Shen, C., Chen, H., He, T.: Fcos: Fully convolutional one-stage object detection. In: Proc. IEEE\/CVF Int. Conf. Comput. Vis. (ICCV), pp. 9627\u20139636 (2019)","DOI":"10.1109\/ICCV.2019.00972"},{"key":"4834_CR32","doi-asserted-by":"crossref","unstructured":"Zhang, S., Chi, C., Yao, Y., Lei, Z., Li, S.Z.: Bridging the gap between anchor-based and anchor-free detection via adaptive training sample selection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9759\u20139768 (2020)","DOI":"10.1109\/CVPR42600.2020.00978"},{"key":"4834_CR33","doi-asserted-by":"crossref","unstructured":"Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Doll\u00e1r, P., Zitnick, C.L.: Microsoft coco: Common objects in context. In: Comput. Vis. ECCV 2014, Pt V, pp. 740\u2013755 (2014)","DOI":"10.1007\/978-3-319-10602-1_48"},{"issue":"3","key":"4834_CR34","first-page":"2567","volume":"36","author":"Y Wang","year":"2022","unstructured":"Wang, Y., Zhang, X., Yang, T., Sun, J.: Anchor detr: query design for transformer-based detector. Proc. AAAI Conf. Artif. Intell. 36(3), 2567\u20132575 (2022)","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"4834_CR35","unstructured":"Jocher, G., Chaurasia, A., Qiu, J.: YOLO by Ultralytics (2023). Accessed: February 30, 2023"},{"key":"4834_CR36","doi-asserted-by":"crossref","unstructured":"Neff, C., Sheth, A., Furgurson, S., Tabkhi, H.: Efficienthrnet: Efficient scaling for lightweight high-resolution multi-person pose estimation. arXiv preprint arXiv:2007.08090 (2020)","DOI":"10.1007\/s11554-021-01132-9"},{"key":"4834_CR37","doi-asserted-by":"crossref","unstructured":"Maji, D., Nagori, S., Mathew, M., Poddar, D.: Yolo-pose: Enhancing yolo for multi person pose estimation using object keypoint similarity loss. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2637\u20132646 (2022)","DOI":"10.1109\/CVPRW56347.2022.00297"},{"key":"4834_CR38","unstructured":"Mehta, S., Rastegari, M.: Separable self-attention for mobile vision transformers. arXiv preprint arXiv:2206.02680 (2022)"},{"key":"4834_CR39","unstructured":"Yu, G., Chang, Q., Lv, W.: Pp-picodet: A better real-time object detector on mobile devices. arXiv preprint arXiv:2111.00902 (2021)"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-025-04834-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-025-04834-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-025-04834-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T18:59:17Z","timestamp":1761418757000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-025-04834-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,3]]},"references-count":39,"journal-issue":{"issue":"14","published-print":{"date-parts":[[2025,12]]}},"alternative-id":["4834"],"URL":"https:\/\/doi.org\/10.1007\/s11760-025-04834-w","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"type":"print","value":"1863-1703"},{"type":"electronic","value":"1863-1711"}],"subject":[],"published":{"date-parts":[[2025,10,3]]},"assertion":[{"value":"2 July 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 August 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 September 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 October 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval and consent to participate"}},{"value":"The authors declare no competing interests.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"1236"}}