{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T04:43:06Z","timestamp":1774413786195,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":20,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819964970","type":"print"},{"value":"9789819964987","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-981-99-6498-7_5","type":"book-chapter","created":{"date-parts":[[2023,10,12]],"date-time":"2023-10-12T20:41:21Z","timestamp":1697143281000},"page":"48-59","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Detection and Positioning of Workpiece Grinding Area in Dark Scenes with Large Exposure"],"prefix":"10.1007","author":[{"given":"Zhentao","family":"Guo","sequence":"first","affiliation":[]},{"given":"Guiyu","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Jinyue","family":"Bian","sequence":"additional","affiliation":[]},{"given":"Hongbin","family":"Ma","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,13]]},"reference":[{"key":"5_CR1","doi-asserted-by":"crossref","unstructured":"Zhao, X., Lu, H., Yu, W., Tao, B., Ding, H.: Vision-based mobile robotic grinding for large-scale workpiece and its accuracy analysis. IEEE\/ASME Transactions on Mechatronics (2022)","DOI":"10.1109\/TMECH.2022.3212911"},{"key":"5_CR2","doi-asserted-by":"publisher","first-page":"2195","DOI":"10.1007\/s00170-023-10822-6","volume":"125","author":"C Chen","year":"2023","unstructured":"Chen, C., Cai, Z., Chen, T., Li, Z., Yang, F., Liang, X.: A vision-based calibration method for aero-engine blade-robotic grinding system. Int. J. Adv. Manufact. Technol. 125, 2195\u20132209 (2023)","journal-title":"Int. J. Adv. Manufact. Technol."},{"issue":"11\u201312","key":"5_CR3","doi-asserted-by":"publisher","first-page":"7627","DOI":"10.1007\/s00170-022-09758-0","volume":"121","author":"J Ge","year":"2022","unstructured":"Ge, J., et al.: An efficient system based on model segmentation for weld seam grinding robot. Int. J. Adv. Manufact. Technol. 121(11\u201312), 7627\u20137641 (2022)","journal-title":"Int. J. Adv. Manufact. Technol."},{"issue":"9","key":"5_CR4","first-page":"1634","volume":"55","author":"Y Ding","year":"2021","unstructured":"Ding, Y., et al.: Calibration method of laser displacement sensor based on binocular vision. J. Zhejiang Univ. Eng. Sci. 55(9), 1634\u20131642 (2021)","journal-title":"J. Zhejiang Univ. Eng. Sci."},{"issue":"10\u201311","key":"5_CR5","doi-asserted-by":"publisher","first-page":"941","DOI":"10.1177\/027836490302210010","volume":"22","author":"J Baeten","year":"2003","unstructured":"Baeten, J., Bruyninckx, H., De Schutter, J.: Integrated vision\/force robotic servoing in the task frame formalism. Int. J. Robot. Res. 22(10\u201311), 941\u2013954 (2003)","journal-title":"Int. J. Robot. Res."},{"key":"5_CR6","doi-asserted-by":"publisher","first-page":"688275","DOI":"10.3389\/frobt.2021.688275","volume":"8","author":"A Rastegarpanah","year":"2021","unstructured":"Rastegarpanah, A., Hathaway, J., Stolkin, R.: Vision-guided MPC for robotic path following using learned memory-augmented model. Front. Robot. AI 8, 688275 (2021)","journal-title":"Front. Robot. AI"},{"key":"5_CR7","series-title":"Lecture Notes in Computer Science (Lecture Notes in Artificial Intelligence)","doi-asserted-by":"publisher","first-page":"475","DOI":"10.1007\/978-3-030-82153-1_39","volume-title":"Knowledge Science, Engineering and Management","author":"J Wang","year":"2021","unstructured":"Wang, J.: An improved YOLO algorithm for object detection in all day scenarios. In: Qiu, H., Zhang, C., Fei, Z., Qiu, M., Kung, S.-Y. (eds.) KSEM 2021. LNCS (LNAI), vol. 12817, pp. 475\u2013486. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-82153-1_39"},{"key":"5_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"345","DOI":"10.1007\/978-3-030-58589-1_21","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y Sasagawa","year":"2020","unstructured":"Sasagawa, Y., Nagahara, H.: YOLO in the dark - domain adaptation method for merging multiple models. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12366, pp. 345\u2013359. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58589-1_21"},{"issue":"3","key":"5_CR9","doi-asserted-by":"publisher","first-page":"035010","DOI":"10.1117\/1.JRS.11.035010","volume":"11","author":"J Zhou","year":"2017","unstructured":"Zhou, J., Kwan, C., Ayhan, B.: Improved target detection for hyperspectral images using hybrid in-scene calibration. J. Appl. Remote Sens. 11(3), 035010\u2013035010 (2017)","journal-title":"J. Appl. Remote Sens."},{"key":"5_CR10","unstructured":"Ying, Z., Li, G., Gao, W.: A bio-inspired multi-exposure fusion framework for low-light image enhancement. arXiv preprint arXiv:1711.00591 (2017)"},{"key":"5_CR11","doi-asserted-by":"crossref","unstructured":"Ying, Z., Li, G., Ren, Y., Wang, R., Wang, W.: A new low-light image enhancement algorithm using camera response model. In: Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 3015\u20133022 (2017)","DOI":"10.1109\/ICCVW.2017.356"},{"key":"5_CR12","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1016\/j.sbspro.2013.12.027","volume":"106","author":"GK Uyan\u0131k","year":"2013","unstructured":"Uyan\u0131k, G.K., G\u00fcler, N.: A study on multiple linear regression analysis. Procedia. Soc. Behav. Sci. 106, 234\u2013240 (2013)","journal-title":"Procedia. Soc. Behav. Sci."},{"key":"5_CR13","doi-asserted-by":"crossref","unstructured":"Bian, J., Wang, X., Liao, Z., Ma, H.: Accurate positioning for refueling plug with coarse-to-fine registration and pose correction. In: 2022 China Automation Congress (CAC), pp. 2433\u20132438. IEEE (2022)","DOI":"10.1109\/CAC57257.2022.10055996"},{"key":"5_CR14","unstructured":"Fu, C.Y., Liu, W., Ranga, A., Tyagi, A., Berg, A.C.: DSSD: deconvolutional single shot detector. arXiv preprint arXiv:1701.06659 (2017)"},{"key":"5_CR15","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: Advances in Neural Information Processing Systems, vol. 28 (2015)"},{"key":"5_CR16","unstructured":"Jocher, G.: YOLOv5 release v6.1 (2022). https:\/\/github.com\/ultralytics\/yolov5\/releases\/tag\/v6.1. Accessed 29 Apr 2023"},{"key":"5_CR17","doi-asserted-by":"crossref","unstructured":"Wang, C.Y., Bochkovskiy, A., Liao, H.Y.M.: YOLOv7: trainable bag-of-freebies sets new state-of-the-art for real-time object detectors. arXiv preprint arXiv:2207.02696 (2022)","DOI":"10.1109\/CVPR52729.2023.00721"},{"key":"5_CR18","first-page":"2675950","volume":"2022","author":"M Qi","year":"2022","unstructured":"Qi, M., et al.: Multi-region nonuniform brightness correction algorithm based on l-channel gamma transform. Secur. Commun. Networks 2022, 2675950 (2022)","journal-title":"Secur. Commun. Networks"},{"key":"5_CR19","doi-asserted-by":"publisher","unstructured":"Zhao, G., Ma, H., Jin, Y.: A method for robust object recognition and pose estimation of rigid body based on point cloud. In: Liu, H., et al. (eds.) Intelligent Robotics and Applications. ICIRA 2022. Lecture Notes in Computer Science, vol. 13458 pp. 468\u2013480. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-13841-6_43","DOI":"10.1007\/978-3-031-13841-6_43"},{"key":"5_CR20","unstructured":"Paszke, A., et al.: PyTorch: an imperative style, high-performance deep learning library. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"}],"container-title":["Lecture Notes in Computer Science","Intelligent Robotics and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-6498-7_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,8]],"date-time":"2025-02-08T09:58:31Z","timestamp":1739008711000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-6498-7_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9789819964970","9789819964987"],"references-count":20,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-6498-7_5","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"13 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIRA","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Intelligent Robotics and Applications","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Hangzhou","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 July 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"7 July 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icira2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icira2023.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}