{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T17:52:15Z","timestamp":1774720335567,"version":"3.50.1"},"reference-count":52,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62121002"],"award-info":[{"award-number":["62121002"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62022076"],"award-info":[{"award-number":["62022076"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U1936210"],"award-info":[{"award-number":["U1936210"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002858","name":"China Postdoctoral Science Foundation","doi-asserted-by":"publisher","award":["2021M703081"],"award-info":[{"award-number":["2021M703081"]}],"id":[{"id":"10.13039\/501100002858","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Multimedia"],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/tmm.2022.3151145","type":"journal-article","created":{"date-parts":[[2022,2,14]],"date-time":"2022-02-14T23:27:49Z","timestamp":1644881269000},"page":"2774-2787","source":"Crossref","is-referenced-by-count":27,"title":["Learning Cross-Channel Representations for Semantic Segmentation"],"prefix":"10.1109","volume":"25","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5441-4714","authenticated-orcid":false,"given":"Lingfeng","family":"Ma","sequence":"first","affiliation":[{"name":"School of Information Science and Technology, University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6249-5315","authenticated-orcid":false,"given":"Hongtao","family":"Xie","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2840-6235","authenticated-orcid":false,"given":"Chuanbin","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1151-1792","authenticated-orcid":false,"given":"Yongdong","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, University of Science and Technology of China, Hefei, China"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00813"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00897"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00069"},{"key":"ref14","first-page":"267","article-title":"PsaNet: Point-wise spatial attention network for scene parsing","author":"zhao","year":"0","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-87358-5_42"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00388"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00326"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00068"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.125"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1038\/ncomms12815"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00960"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01243"},{"key":"ref46","first-page":"177","article-title":"Large-scale machine learning with stochastic gradient descent","author":"bottou","year":"0","journal-title":"Proc COMPSTAT&#x2019;2010"},{"key":"ref45","article-title":"Automatic differentiation in Pytorch","author":"paszke","year":"2017"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.189"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.549"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.350"},{"key":"ref41","article-title":"Graph attention networks","author":"veli?kovi?","year":"2018"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.119"},{"key":"ref43","first-page":"633","article-title":"Scene parsing through ade20 k dataset","author":"zhou","year":"0","journal-title":"Proc IEEE Conf Comput Vis Pattern Recognit"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2962685"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2020.2983686"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00309"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"834","DOI":"10.1109\/TPAMI.2017.2699184","article-title":"DeepLab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected CRFs","volume":"40","author":"chen","year":"2018","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.660"},{"key":"ref6","first-page":"801","article-title":"Encoder-decoder with atrous separable convolution for semantic image segmentation","author":"chen","year":"0","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref5","article-title":"Rethinking atrous convolution for semantic image segmentation","volume":"abs 1706 5587","author":"chen","year":"2017","journal-title":"CoRR"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00406"},{"key":"ref35","first-page":"9245","article-title":"Beyond grids: Learning graph representations for visual recognition","author":"li","year":"0","journal-title":"Proc 32nd Int Conf Neural Inf Process Syst"},{"key":"ref34","first-page":"3","article-title":"CBAM: Convolutional block attention module","author":"woo","year":"0","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref37","first-page":"399","article-title":"Videos as space-time region graphs","author":"wang","year":"0","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref36","article-title":"Semi-supervised classification with graph convolutional networks","author":"kipf","year":"2016"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01155"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00745"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00246"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00747"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.3035231"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2018.2812600"},{"key":"ref39","first-page":"315","article-title":"Deep sparse rectifier neural networks","author":"glorot","year":"0","journal-title":"Proc 14th Int Conf Artif Intell Statist"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58545-7_16"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58520-4_26"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6805"},{"key":"ref26","first-page":"350","author":"chen","year":"0","journal-title":"Adv Neural Inf Proc Sys 31 Annu Conf Neural Inf Proc Sys"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.683"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2015.2428055"},{"key":"ref22","first-page":"418","article-title":"Unified perceptual parsing for scene understanding","author":"xiao","year":"0","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2018.07.031"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3093727"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475402"},{"key":"ref29","first-page":"9401","article-title":"Gather-excite: Exploiting feature context in convolutional neural networks","volume":"31","author":"hu","year":"2018","journal-title":"Adv Neural Inf Process Syst"}],"container-title":["IEEE Transactions on Multimedia"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6046\/10016790\/09713684.pdf?arnumber=9713684","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,7]],"date-time":"2023-08-07T18:18:20Z","timestamp":1691432300000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9713684\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":52,"URL":"https:\/\/doi.org\/10.1109\/tmm.2022.3151145","relation":{},"ISSN":["1520-9210","1941-0077"],"issn-type":[{"value":"1520-9210","type":"print"},{"value":"1941-0077","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]}}}