{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T07:17:14Z","timestamp":1773991034539,"version":"3.50.1"},"reference-count":26,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100011933","name":"Chongqing Science and Technology Foundation","doi-asserted-by":"publisher","award":["KJQN202101510"],"award-info":[{"award-number":["KJQN202101510"]}],"id":[{"id":"10.13039\/501100011933","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012669","name":"Natural Science Foundation Project of Chongqing","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012669","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005230","name":"Natural Science Foundation of Chongqing Municipality","doi-asserted-by":"publisher","award":["CSTB2022NSCQ-MSX0398"],"award-info":[{"award-number":["CSTB2022NSCQ-MSX0398"]}],"id":[{"id":"10.13039\/501100005230","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005230","name":"Natural Science Foundation of Chongqing Municipality","doi-asserted-by":"publisher","award":["CSTB2022NSCQ-MSX1425"],"award-info":[{"award-number":["CSTB2022NSCQ-MSX1425"]}],"id":[{"id":"10.13039\/501100005230","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Signal Processing: Image Communication"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1016\/j.image.2026.117491","type":"journal-article","created":{"date-parts":[[2026,1,19]],"date-time":"2026-01-19T16:40:16Z","timestamp":1768840816000},"page":"117491","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Global and interactive graph channel attention for robust stereo matching"],"prefix":"10.1016","volume":"143","author":[{"given":"Jun","family":"Yu","sequence":"first","affiliation":[]},{"given":"Xiaofeng","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yingying","family":"Su","sequence":"additional","affiliation":[]},{"given":"Zhiheng","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Jiameng","family":"Sun","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"issue":"4","key":"10.1016\/j.image.2026.117491_bib0001","doi-asserted-by":"crossref","first-page":"1738","DOI":"10.1109\/TPAMI.2020.3032602","article-title":"A survey on deep learning techniques for stereo-based depth estimation","volume":"44","author":"Laga","year":"2022","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"9","key":"10.1016\/j.image.2026.117491_bib0002","doi-asserted-by":"crossref","first-page":"10761","DOI":"10.1109\/TII.2024.3392270","article-title":"Robust depth estimation based on parallax attention for aerial scene perception [J]","volume":"20","author":"Tong","year":"2024","journal-title":"IEEE Trans. Ind. Inform."},{"key":"10.1016\/j.image.2026.117491_bib0003","series-title":"Conference on Computer Vision and Pattern Recognition","first-page":"8063","article-title":"UnOS: unified unsupervised optical-flow and stereo-depth estimation by watching videos","author":"Wang","year":"2019"},{"key":"10.1016\/j.image.2026.117491_bib0004","first-page":"1","article-title":"Self-calibrating sparse far-field photometric stereo with collocated light","volume":"71","author":"Wang","year":"2022","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"10.1016\/j.image.2026.117491_bib0005","series-title":"Conference on Computer Vision and Pattern Recognition","first-page":"17776","article-title":"Pats: patch area transportation with subdivision for local feature matching","author":"Ni","year":"2023"},{"issue":"3","key":"10.1016\/j.image.2026.117491_bib0006","doi-asserted-by":"crossref","first-page":"853","DOI":"10.1109\/TETC.2025.3528972","article-title":"Software-defined number formats for high-speed belief propagation","volume":"13","author":"Molahosseini","year":"2025","journal-title":"IEEE Trans. Emerg. Top. Comput."},{"key":"10.1016\/j.image.2026.117491_bib0007","series-title":"International Conference on Intelligent Computing","first-page":"469","article-title":"Self-supervised learning of psmnet via generative adversarial networks","author":"Yang","year":"2024"},{"issue":"1","key":"10.1016\/j.image.2026.117491_bib0008","doi-asserted-by":"crossref","first-page":"912","DOI":"10.1109\/TIV.2022.3155469","article-title":"Adaptive cost volume representation for unsupervised high-resolution stereo matching","volume":"8","author":"Tong","year":"2023","journal-title":"IEEE Trans. Intell. Veh."},{"key":"10.1016\/j.image.2026.117491_bib0009","series-title":"IEEE Conference on Conference on Computer Vision and Pattern Recognition","first-page":"5410","article-title":"Pyramid stereo matching network","author":"Chang","year":"2018"},{"key":"10.1016\/j.image.2026.117491_bib0010","series-title":"Conference on Computer Vision and Pattern Recognition","first-page":"12971","article-title":"Attention concatenation volume for accurate and efficient stereo matching","author":"Xu","year":"2022"},{"key":"10.1016\/j.image.2026.117491_bib0011","series-title":"IEEE International Conference on Robotics and Automation","first-page":"8738","article-title":"Lightstereo: Channel boost is all you need for efficient 2d cost aggregation","author":"Guo","year":"2025"},{"key":"10.1016\/j.image.2026.117491_bib0012","doi-asserted-by":"crossref","first-page":"50828","DOI":"10.1109\/ACCESS.2020.2980243","article-title":"A convolutional attention residual network for stereo matching","volume":"8","author":"Huang","year":"2020","journal-title":"IEEE Access"},{"key":"10.1016\/j.image.2026.117491_bib0013","series-title":"International Conference on Pattern Recognition","first-page":"4973","article-title":"Attention stereo matching network","author":"Zhang","year":"2020"},{"key":"10.1016\/j.image.2026.117491_bib0014","series-title":"IEEE International Conference on Computer Vision","first-page":"66","article-title":"End-to-end learning of geometry and context for deep stereo regression","author":"Kendall","year":"2017"},{"key":"10.1016\/j.image.2026.117491_bib0015","series-title":"European conference on computer vision","first-page":"3","article-title":"Cbam: convolutional block attention module","author":"Woo","year":"2018"},{"key":"10.1016\/j.image.2026.117491_bib0016","series-title":"Conference on Computer Vision and Pattern Recognition","first-page":"16000","article-title":"Masked autoencoders are scalable vision learners","author":"He","year":"2022"},{"key":"10.1016\/j.image.2026.117491_bib0017","series-title":"Conference on Computer Vision and Pattern Recognition","first-page":"4937","article-title":"SuperGlue: learning feature matching with graph neural networks","author":"Sarlin","year":"2020"},{"key":"10.1016\/j.image.2026.117491_bib0018","series-title":"Conference on Computer Vision and Pattern Recognition","first-page":"651","article-title":"Sasic: Stereo image compression with latent shifts and stereo attention","author":"W\u00f6dlinger","year":"2022"},{"key":"10.1016\/j.image.2026.117491_bib0019","series-title":"Conference on Computer Vision and Pattern Recognition","first-page":"5510","article-title":"Hierarchical deep stereo matching on high-resolution images","author":"Yang","year":"2019"},{"key":"10.1016\/j.image.2026.117491_bib0020","series-title":"International Conference on Computer Vision","first-page":"7483","article-title":"Semantic stereo matching with pyramid cost volumes","author":"Wu","year":"2019"},{"key":"10.1016\/j.image.2026.117491_bib0021","doi-asserted-by":"crossref","DOI":"10.1016\/j.imavis.2025.105771","article-title":"DPDNet: the lightweight stereo matching network based on disparity probability distribution consistency","author":"Liu","year":"2025","journal-title":"Image Vis. Comput."},{"key":"10.1016\/j.image.2026.117491_bib0022","unstructured":"W Yun, W Long, Z Cheng, et al. Learning robust stereo matching in the wild with selective mixture-of-experts. International Conference on Computer Vision. 2025, pp. 21276-21287."},{"key":"10.1016\/j.image.2026.117491_bib0023","series-title":"International Conference on Computer Vision","first-page":"4383","article-title":"DeepPruner: learning efficient stereo matching via differentiable PatchMatch","author":"Duggal","year":"2019"},{"key":"10.1016\/j.image.2026.117491_bib0024","series-title":"Conference on Computer Vision and Pattern Recognition","first-page":"8938","article-title":"SMD-Nets: stereo mixture density networks","author":"Tosi","year":"2021"},{"issue":"4","key":"10.1016\/j.image.2026.117491_bib0025","doi-asserted-by":"crossref","first-page":"2378","DOI":"10.1109\/TPAMI.2023.3330866","article-title":"Learning optical flow and scene flow with bidirectional camera-LiDAR Fusion","volume":"46","author":"Liu","year":"2024","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"12","key":"10.1016\/j.image.2026.117491_bib0026","doi-asserted-by":"crossref","first-page":"24686","DOI":"10.1109\/TITS.2022.3193421","article-title":"Normal assisted pixel-visibility learning with cost aggregation for multiview stereo","volume":"23","author":"Tong","year":"2023","journal-title":"IEEE Trans. Intell. Transp. Syst."}],"container-title":["Signal Processing: Image Communication"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0923596526000147?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0923596526000147?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T05:06:19Z","timestamp":1773983179000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0923596526000147"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4]]},"references-count":26,"alternative-id":["S0923596526000147"],"URL":"https:\/\/doi.org\/10.1016\/j.image.2026.117491","relation":{},"ISSN":["0923-5965"],"issn-type":[{"value":"0923-5965","type":"print"}],"subject":[],"published":{"date-parts":[[2026,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Global and interactive graph channel attention for robust stereo matching","name":"articletitle","label":"Article Title"},{"value":"Signal Processing: Image Communication","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.image.2026.117491","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"117491"}}