{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,23]],"date-time":"2026-03-23T22:54:52Z","timestamp":1774306492091,"version":"3.50.1"},"publisher-location":"Cham","reference-count":45,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030585198","type":"print"},{"value":"9783030585204","type":"electronic"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-58520-4_39","type":"book-chapter","created":{"date-parts":[[2020,11,18]],"date-time":"2020-11-18T10:08:18Z","timestamp":1605694098000},"page":"665-681","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":136,"title":["Cross-Modal Weighting Network for RGB-D Salient Object Detection"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7324-1196","authenticated-orcid":false,"given":"Gongyang","family":"Li","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8428-1131","authenticated-orcid":false,"given":"Zhi","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7375-452X","authenticated-orcid":false,"given":"Linwei","family":"Ye","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9447-1791","authenticated-orcid":false,"given":"Yang","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4094-8413","authenticated-orcid":false,"given":"Haibin","family":"Ling","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,11,19]]},"reference":[{"issue":"2","key":"39_CR1","doi-asserted-by":"publisher","first-page":"117","DOI":"10.1007\/s41095-019-0149-9","volume":"5","author":"A Borji","year":"2019","unstructured":"Borji, A., Cheng, M.-M., Hou, Q., Jiang, H., Li, J.: Salient object detection: a survey. Comput. Vis. Media 5(2), 117\u2013150 (2019)","journal-title":"Comput. Vis. Media"},{"issue":"12","key":"39_CR2","first-page":"5706","volume":"24","author":"A Borji","year":"2015","unstructured":"Borji, A., Cheng, M.M., Jiang, H., Li, J.: Salient object detection: a benchmark. IEEE TIP 24(12), 5706\u20135722 (2015)","journal-title":"IEEE TIP"},{"key":"39_CR3","doi-asserted-by":"crossref","unstructured":"Bottou, L.: Large-scale machine learning with stochastic gradient descent. In: COMPSTAT (2010)","DOI":"10.1007\/978-3-7908-2604-3_16"},{"key":"39_CR4","doi-asserted-by":"crossref","unstructured":"Chen, H., Li, Y.: Progressively complementarity-aware fusion network for RGB-D salient object detection. In: IEEE CVPR (2018)","DOI":"10.1109\/CVPR.2018.00322"},{"issue":"6","key":"39_CR5","first-page":"2825","volume":"28","author":"H Chen","year":"2019","unstructured":"Chen, H., Li, Y.: Three-stream attention-aware network for RGB-D salient object detection. IEEE TIP 28(6), 2825\u20132835 (2019)","journal-title":"IEEE TIP"},{"key":"39_CR6","doi-asserted-by":"publisher","first-page":"376","DOI":"10.1016\/j.patcog.2018.08.007","volume":"86","author":"H Chen","year":"2019","unstructured":"Chen, H., Li, Y., Su, D.: Multi-modal fusion network with multi-scale multi-path and cross-modal interactions for RGB-D salient object detection. Pattern Recogn. 86, 376\u2013385 (2019)","journal-title":"Pattern Recogn."},{"key":"39_CR7","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Fu, H., Wei, X., Xiao, J., Cao, X.: Depth enhanced saliency detection method. In: ACM ICIMCS (2014)","DOI":"10.1145\/2632856.2632866"},{"key":"39_CR8","doi-asserted-by":"publisher","first-page":"3627","DOI":"10.1109\/TCYB.2019.2932005","volume":"50","author":"R Cong","year":"2019","unstructured":"Cong, R., Lei, J., Fu, H., Hou, J., Huang, Q., Kwong, S.: Going from RGB to RGBD saliency: a depth-guided transformation model. IEEE TCYB 50, 3627\u20133639 (2019). https:\/\/doi.org\/10.1109\/TCYB.2019.2932005","journal-title":"IEEE TCYB"},{"issue":"6","key":"39_CR9","first-page":"819","volume":"23","author":"R Cong","year":"2016","unstructured":"Cong, R., Lei, J., Zhang, C., Huang, Q., Cao, X., Hou, C.: Saliency detection for stereoscopic images based on depth confidence analysis and multiple cues fusion. IEEE SPL 23(6), 819\u2013823 (2016)","journal-title":"IEEE SPL"},{"key":"39_CR10","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.jvcir.2019.03.019","volume":"61","author":"Y Ding","year":"2019","unstructured":"Ding, Y., Liu, Z., Huang, M., Shi, R., Wang, X.: Depth-aware saliency detection using convolutional neural networks. J. Vis. Commun. Image Represent. 61, 1\u20139 (2019)","journal-title":"J. Vis. Commun. Image Represent."},{"key":"39_CR11","doi-asserted-by":"crossref","unstructured":"Fan, D.P., Cheng, M.M., Liu, Y., Li, T., Borji, A.: Structure-measure: a new way to evaluate foreground maps. In: IEEE ICCV (2017)","DOI":"10.1109\/ICCV.2017.487"},{"key":"39_CR12","doi-asserted-by":"crossref","unstructured":"Fan, D.P., Gong, C., Cao, Y., Ren, B., Cheng, M.M., Borji, A.: Enhanced-alignment measure for binary foreground map evaluation. In: IJCAI (2018)","DOI":"10.24963\/ijcai.2018\/97"},{"key":"39_CR13","unstructured":"Fan, D.P., et al.: Rethinking RGB-D salient object detection: models, datasets, and large-scale benchmarks. arXiv preprint arXiv:1907.06781 (2019)"},{"key":"39_CR14","doi-asserted-by":"crossref","unstructured":"Fan, X., Liu, Z., Sun, G.: Salient region detection for stereoscopic images. In: IEEE DSP (2014)","DOI":"10.1109\/ICDSP.2014.6900706"},{"issue":"6","key":"39_CR15","first-page":"2625","volume":"23","author":"Y Fang","year":"2014","unstructured":"Fang, Y., Wang, J., Narwaria, M., Callet, P.L., Lin, W.: Saliency detection for stereoscopic images. IEEE TIP 23(6), 2625\u20132636 (2014)","journal-title":"IEEE TIP"},{"key":"39_CR16","doi-asserted-by":"crossref","unstructured":"Feng, D., Barnes, N., You, S., McCarthy, C.: Local background enclosure for RGB-D salient object detection. In: IEEE CVPR (2016)","DOI":"10.1109\/CVPR.2016.257"},{"key":"39_CR17","unstructured":"Glorot, X., Bengio, Y.: Understanding the difficulty of training deep feedforward neural networks. In: AISTATS (2010)"},{"key":"39_CR18","doi-asserted-by":"crossref","unstructured":"Guo, J., Ren, T., Bei, J.: Salient object detection for RGB-D image via saliency evolution. In: IEEE ICME (2016)","DOI":"10.1109\/ICME.2016.7552907"},{"key":"39_CR19","doi-asserted-by":"crossref","unstructured":"Guo, J., Ren, T., Jia, B., Zhu, Y.: Salient object detection in RGB-D image based on saliency fusion and propagation. In: ACM ICIMCS (2015)","DOI":"10.1145\/2808492.2808551"},{"issue":"11","key":"39_CR20","first-page":"3171","volume":"48","author":"J Han","year":"2018","unstructured":"Han, J., Chen, H., Liu, N., Yan, C., Li, X.: CNNs-based RGB-D saliency detection via cross-view transfer and multiview fusion. IEEE TCYB 48(11), 3171\u20133183 (2018)","journal-title":"IEEE TCYB"},{"issue":"11","key":"39_CR21","doi-asserted-by":"publisher","first-page":"1254","DOI":"10.1109\/34.730558","volume":"20","author":"L Itti","year":"1998","unstructured":"Itti, L., Koch, C., Niebur, E.: A model of saliency-based visual attention for rapid scene analysis. IEEE TPAMI 20(11), 1254\u20131259 (1998)","journal-title":"IEEE TPAMI"},{"key":"39_CR22","doi-asserted-by":"crossref","unstructured":"Jia, Y., et al.: Caffe: convolutional architecture for fast feature embedding. In: ACM MM (2014)","DOI":"10.1145\/2647868.2654889"},{"key":"39_CR23","doi-asserted-by":"crossref","unstructured":"Ju, R., Ge, L., Geng, W., Ren, T., Wu, G.: Depth saliency based on anisotropic center-surround difference. In: IEEE ICIP (2014)","DOI":"10.1109\/ICIP.2014.7025222"},{"key":"39_CR24","doi-asserted-by":"crossref","unstructured":"Li, G., Zhu, C.: A three-pathway psychobiological framework of salient object detection using stereoscopic technology. In: IEEE ICCVW (2017)","DOI":"10.1109\/ICCVW.2017.355"},{"key":"39_CR25","doi-asserted-by":"crossref","unstructured":"Li, N., Ye, J., Ji, Y., Ling, H., Yu, J.: Saliency detection on light field. In: IEEE CVPR (2014)","DOI":"10.1109\/CVPR.2014.359"},{"key":"39_CR26","doi-asserted-by":"publisher","first-page":"46","DOI":"10.1016\/j.neucom.2019.07.012","volume":"363","author":"Z Liu","year":"2019","unstructured":"Liu, Z., Shi, S., Duan, Q., Zhang, W., Zhao, P.: Salient object detection for RGB-D image by single stream recurrent convolution neural network. Neurocomputing 363, 46\u201357 (2019)","journal-title":"Neurocomputing"},{"key":"39_CR27","doi-asserted-by":"crossref","unstructured":"Margolin, R., Zelnik-Manor, L., Tal, A.: How to evaluate foreground maps. In: IEEE CVPR (2014)","DOI":"10.1109\/CVPR.2014.39"},{"key":"39_CR28","unstructured":"Niu, Y., Geng, Y., Li, X., Liu, F.: Leveraging stereopsis for saliency analysis. In: IEEE CVPR (2012)"},{"key":"39_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"92","DOI":"10.1007\/978-3-319-10578-9_7","volume-title":"Computer Vision \u2013 ECCV 2014","author":"H Peng","year":"2014","unstructured":"Peng, H., Li, B., Xiong, W., Hu, W., Ji, R.: RGBD salient object detection: a benchmark and algorithms. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8691, pp. 92\u2013109. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10578-9_7"},{"key":"39_CR30","doi-asserted-by":"crossref","unstructured":"Piao, Y., Ji, W., Li, J., Zhang, M., Lu, H.: Depth-induced multi-scale recurrent attention network for saliency detection. In: IEEE ICCV (2019)","DOI":"10.1109\/ICCV.2019.00735"},{"issue":"5","key":"39_CR31","first-page":"2274","volume":"26","author":"L Qu","year":"2017","unstructured":"Qu, L., He, S., Zhang, J., Tian, J., Tang, Y., Yang, Q.: RGBD salient object detection via deep fusion. IEEE TIP 26(5), 2274\u20132285 (2017)","journal-title":"IEEE TIP"},{"key":"39_CR32","doi-asserted-by":"crossref","unstructured":"Ren, J., Gong, X., Yu, L., Zhou, W., Yang, M.Y.: Exploiting global priors for RGB-D saliency detection. In: IEEE CVPRW (2015)","DOI":"10.1109\/CVPRW.2015.7301391"},{"key":"39_CR33","doi-asserted-by":"crossref","unstructured":"Shigematsu, R., Feng, D., You, S., Barnes, N.: Learning RGB-D salient object detection using background enclosure, depth contrast, and top-down features. In: IEEE ICCVW (2017)","DOI":"10.1109\/ICCVW.2017.323"},{"key":"39_CR34","doi-asserted-by":"crossref","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: ICLR (2015)","DOI":"10.1109\/ICCV.2015.314"},{"key":"39_CR35","doi-asserted-by":"crossref","unstructured":"Song, H., Liu, Z., Du, H., Sun, G., Bai, C.: Saliency detection for RGBD images. In: ACM ICIMCS (2015)","DOI":"10.1145\/2808492.2808565"},{"issue":"9","key":"39_CR36","first-page":"4204","volume":"26","author":"H Song","year":"2017","unstructured":"Song, H., Liu, Z., Du, H., Sun, G., Olivier, L.M., Ren, T.: Depth-aware salient object detection and segmentation via multiscale discriminative saliency fusion and bootstrap learning. IEEE TIP 26(9), 4204\u20134216 (2017)","journal-title":"IEEE TIP"},{"issue":"5","key":"39_CR37","first-page":"663","volume":"24","author":"A Wang","year":"2017","unstructured":"Wang, A., Wang, M.: RGB-D salient object detection via minimum barrier distance transform and saliency fusion. IEEE SPL 24(5), 663\u2013667 (2017)","journal-title":"IEEE SPL"},{"key":"39_CR38","doi-asserted-by":"publisher","first-page":"55277","DOI":"10.1109\/ACCESS.2019.2913107","volume":"7","author":"N Wang","year":"2019","unstructured":"Wang, N., Gong, X.: Adaptive fusion for RGB-D salient object detection. IEEE Access 7, 55277\u201355284 (2019)","journal-title":"IEEE Access"},{"key":"39_CR39","unstructured":"Wang, W., Lai, Q., Fu, H., Shen, J., Ling, H.: Salient object detection in the deep learning era: an in-depth survey. arXiv preprint arXiv:1904.09146 (2019)"},{"key":"39_CR40","doi-asserted-by":"crossref","unstructured":"Xie, S., Tu, Z.: Holistically-nested edge detection. In: IEEE ICCV (2015)","DOI":"10.1109\/ICCV.2015.164"},{"key":"39_CR41","unstructured":"Yu, F., Koltun, V.: Multi-scale context aggregation by dilated convolutions. In: ICLR (2016)"},{"key":"39_CR42","doi-asserted-by":"crossref","unstructured":"Zhao, J.X., Cao, Y., Fan, D.P., Cheng, M.M., Li, X.Y., Zhang, L.: Contrast prior and fluid pyramid integration for RGBD salient object detection. In: IEEE CVPR (2019)","DOI":"10.1109\/CVPR.2019.00405"},{"key":"39_CR43","unstructured":"Zhou, Z., Wang, Z., Lu, H., Wang, S., Sun, M.: Global and local sensitivity guided key salient object re-augmentation for video saliency detection. arXiv preprint arXiv:1811.07480 (2018)"},{"key":"39_CR44","doi-asserted-by":"crossref","unstructured":"Zhu, C., Cai, X., Huang, K., Li, T.H., Li, G.: PDNet: prior-model guided depth-enhanced network for salient object detection. In: IEEE ICME (2019)","DOI":"10.1109\/ICME.2019.00042"},{"key":"39_CR45","doi-asserted-by":"crossref","unstructured":"Zhu, C., Li, G., Wang, W., Wang, R.: An innovative salient object detection using center-dark channel prior. In: IEEE ICCVW (2017)","DOI":"10.1109\/ICCVW.2017.178"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2020"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-58520-4_39","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,18]],"date-time":"2024-11-18T00:26:19Z","timestamp":1731889579000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-58520-4_39"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030585198","9783030585204"],"references-count":45,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-58520-4_39","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"19 November 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Glasgow","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 August 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2020.eu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"OpenReview","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5025","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1360","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"27% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"The conference was held virtually due to the COVID-19 pandemic. From the ECCV Workshops 249 full papers, 18 short papers, and 21 further contributions were published out of a total of 467 submissions.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}