{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T04:52:35Z","timestamp":1776142355192,"version":"3.50.1"},"reference-count":49,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001843","name":"Science and Engineering Research Board","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001843","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100020918","name":"Indian National Academy of Engineering","doi-asserted-by":"publisher","award":["2023\/DGRI\/Cat-2\/06"],"award-info":[{"award-number":["2023\/DGRI\/Cat-2\/06"]}],"id":[{"id":"10.13039\/100020918","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Computers &amp; Graphics"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1016\/j.cag.2026.104551","type":"journal-article","created":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T19:56:21Z","timestamp":1771358181000},"page":"104551","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":1,"special_numbering":"C","title":["RIFLe-Net: Rotation Invariant Feature Learning Network towards affordance detection in 3D point clouds"],"prefix":"10.1016","volume":"135","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3211-9439","authenticated-orcid":false,"given":"Ramesh Ashok","family":"Tabib","sequence":"first","affiliation":[]},{"given":"Dikshit","family":"Hegde","sequence":"additional","affiliation":[]},{"given":"Uma","family":"Mudenagudi","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"issue":"2","key":"10.1016\/j.cag.2026.104551_b1","doi-asserted-by":"crossref","first-page":"26","DOI":"10.1109\/MIC.2012.20","article-title":"Semantic perception: Converting sensory observations to abstractions","volume":"16","author":"Henson","year":"2012","journal-title":"IEEE Internet Comput"},{"issue":"8","key":"10.1016\/j.cag.2026.104551_b2","doi-asserted-by":"crossref","first-page":"951","DOI":"10.1177\/0278364913478446","article-title":"Learning human activities and object affordances from rgb-d videos","volume":"32","author":"Koppula","year":"2013","journal-title":"Int J Robot Res"},{"issue":"1","key":"10.1016\/j.cag.2026.104551_b3","doi-asserted-by":"crossref","first-page":"14","DOI":"10.1109\/TPAMI.2015.2430335","article-title":"Anticipating human activities using object affordances for reactive robotic response","volume":"38","author":"Koppula","year":"2015","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"10.1016\/j.cag.2026.104551_b4","doi-asserted-by":"crossref","unstructured":"Chuang CY, Li J, Torralba A, Fidler S. Learning to act properly: Predicting and explaining affordances from images. In: Proceedings of the IEEE conference on computer vision and pattern recognition. 2018, p. 975\u201383.","DOI":"10.1109\/CVPR.2018.00108"},{"issue":"3","key":"10.1016\/j.cag.2026.104551_b5","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3446370","article-title":"Visual affordance and function understanding: A survey","volume":"54","author":"Hassanin","year":"2021","journal-title":"ACM Comput Surv"},{"key":"10.1016\/j.cag.2026.104551_b6","series-title":"The ecological approach to visual perception: classic edition","author":"Gibson","year":"2014"},{"key":"10.1016\/j.cag.2026.104551_b7","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2022.108626","article-title":"Rotation invariant point cloud analysis: Where local geometry meets global topology","volume":"127","author":"Zhao","year":"2022","journal-title":"Pattern Recognit","ISSN":"https:\/\/id.crossref.org\/issn\/0031-3203","issn-type":"print"},{"key":"10.1016\/j.cag.2026.104551_b8","doi-asserted-by":"crossref","unstructured":"Tabib RA, Upasi N, Anvekar T, Hegde D, Mudenagudi U. IPD-Net: SO (3) Invariant Primitive Decompositional Network for 3D Point Clouds. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. 2023, p. 2735\u201343.","DOI":"10.1109\/CVPRW59228.2023.00274"},{"issue":"2","key":"10.1016\/j.cag.2026.104551_b9","doi-asserted-by":"crossref","first-page":"798","DOI":"10.1109\/TASE.2015.2396014","article-title":"Learning to detect visual grasp affordance","volume":"13","author":"Song","year":"2015","journal-title":"IEEE Trans Autom Sci Eng"},{"key":"10.1016\/j.cag.2026.104551_b10","series-title":"Computer vision\u2013ECCV 2016: 14th European conference, Amsterdam, the Netherlands, October 11\u201314, 2016, proceedings, part IV 14","first-page":"186","article-title":"A multi-scale cnn for affordance segmentation in rgb images","author":"Roy","year":"2016"},{"key":"10.1016\/j.cag.2026.104551_b11","doi-asserted-by":"crossref","first-page":"302","DOI":"10.1007\/s11263-018-1140-0","article-title":"Semantic understanding of scenes through the ade20k dataset","volume":"127","author":"Zhou","year":"2019","journal-title":"Int J Comput Vis"},{"key":"10.1016\/j.cag.2026.104551_b12","series-title":"2015 IEEE international conference on robotics and automation","first-page":"1374","article-title":"Affordance detection of tool parts from geometric features","author":"Myers","year":"2015"},{"key":"10.1016\/j.cag.2026.104551_b13","series-title":"2017 IEEE\/RSJ international conference on intelligent robots and systems","first-page":"5908","article-title":"Object-based affordances detection with convolutional neural networks and dense conditional random fields","author":"Nguyen","year":"2017"},{"key":"10.1016\/j.cag.2026.104551_b14","doi-asserted-by":"crossref","unstructured":"Sawatzky J, Srikantha A, Gall J. Weakly supervised affordance detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition. 2017, p. 2795\u2013804.","DOI":"10.1109\/CVPR.2017.552"},{"key":"10.1016\/j.cag.2026.104551_b15","doi-asserted-by":"crossref","unstructured":"Deng S, Xu X, Wu C, Chen K, Jia K. 3D AffordanceNet: A benchmark for visual object affordance understanding. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. 2021, p. 1778\u201387.","DOI":"10.1109\/CVPR46437.2021.00182"},{"key":"10.1016\/j.cag.2026.104551_b16","doi-asserted-by":"crossref","unstructured":"Mo K, Zhu S, Chang AX, Yi L, Tripathi S, Guibas LJ, Su H. PartNet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. 2019, p. 909\u201318.","DOI":"10.1109\/CVPR.2019.00100"},{"key":"10.1016\/j.cag.2026.104551_b17","series-title":"2024 IEEE\/CVF conference on computer vision and pattern recognition","first-page":"14251","article-title":"LASO: Language-guided affordance segmentation on 3D object","author":"Li","year":"2024"},{"key":"10.1016\/j.cag.2026.104551_b18","series-title":"2022 IEEE-RAS 21st international conference on humanoid robots (humanoids)","first-page":"873","article-title":"Affordance detection with Dynamic-Tree Capsule Networks","author":"Rodr\u00edguez-S\u00e1nchez","year":"2022"},{"issue":"2","key":"10.1016\/j.cag.2026.104551_b19","doi-asserted-by":"crossref","first-page":"1672","DOI":"10.1109\/LRA.2024.3524904","article-title":"Variation-robust few-shot 3D affordance segmentation for robotic manipulation","volume":"10","author":"Hu","year":"2025","journal-title":"IEEE Robot Autom Lett"},{"key":"10.1016\/j.cag.2026.104551_b20","series-title":"Open-vocabulary affordance detection in 3d point clouds","author":"Nguyen","year":"2023"},{"key":"10.1016\/j.cag.2026.104551_b21","series-title":"Learning so(3) equivariant representations with spherical cnns","author":"Esteves","year":"2017"},{"key":"10.1016\/j.cag.2026.104551_b22","series-title":"EqvAfford: SE (3) equivariance for point-level affordance learning","author":"Chen","year":"2024"},{"issue":"5","key":"10.1016\/j.cag.2026.104551_b23","doi-asserted-by":"crossref","first-page":"1383","DOI":"10.1111\/j.1467-8659.2009.01515.x","article-title":"A concise and provably informative multi-scale signature based on heat diffusion","volume":"28","author":"Sun","year":"2009","journal-title":"Comput Graph Forum"},{"issue":"5","key":"10.1016\/j.cag.2026.104551_b24","first-page":"1626","article-title":"The wave kernel signature: A quantum mechanical approach to shape analysis","volume":"30","author":"Aubry","year":"2011","journal-title":"Comput Graph Forum"},{"key":"10.1016\/j.cag.2026.104551_b25","series-title":"Advances in neural information processing systems 37 (neurIPS 2024) posters","article-title":"Improving neural network surface processing with principal curvatures","author":"Harrison","year":"2024"},{"key":"10.1016\/j.cag.2026.104551_b26","unstructured":"Qi CR, Su H, Mo K, Guibas LJ. PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. 2017."},{"key":"10.1016\/j.cag.2026.104551_b27","series-title":"Point Convolutional Neural Networks by Extension Operators","author":"Atzmon","year":"2018"},{"key":"10.1016\/j.cag.2026.104551_b28","doi-asserted-by":"crossref","DOI":"10.1145\/3326362","article-title":"Dynamic graph CNN for learning on point clouds","author":"Wang","year":"2019","journal-title":"ACM Trans Graph"},{"key":"10.1016\/j.cag.2026.104551_b29","article-title":"PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space","volume":"vol. 30","author":"Qi","year":"2017"},{"key":"10.1016\/j.cag.2026.104551_b30","series-title":"Advances in neural information processing systems (neurIPS 2022)","article-title":"PointNeXt: Revisiting PointNet++ with improved training and scaling strategies","author":"Qian","year":"2022"},{"issue":"2","key":"10.1016\/j.cag.2026.104551_b31","doi-asserted-by":"crossref","first-page":"187","DOI":"10.1007\/s41095-021-0229-5","article-title":"PCT: Point Cloud Transformer","volume":"7","author":"Guo","year":"2021","journal-title":"Comput Vis Media","ISSN":"https:\/\/id.crossref.org\/issn\/2096-0662","issn-type":"print"},{"key":"10.1016\/j.cag.2026.104551_b32","article-title":"Geometric Back-projection Network for Point Cloud Classification","author":"Qiu","year":"2021","journal-title":"IEEE Trans Multimed"},{"key":"10.1016\/j.cag.2026.104551_b33","doi-asserted-by":"crossref","unstructured":"Anvekar T, Bazazian D. GPr-Net: Geometric Prototypical Network for Point Cloud Few-Shot Learning. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. 2023, p. 4178\u201387.","DOI":"10.1109\/CVPRW59228.2023.00440"},{"key":"10.1016\/j.cag.2026.104551_b34","doi-asserted-by":"crossref","unstructured":"Liu Y, Fan B, Xiang S, Pan C. Relation-Shape Convolutional Neural Network for Point Cloud Analysis. In: IEEE conference on computer vision and pattern recognition. 2019, p. 8895\u2013904.","DOI":"10.1109\/CVPR.2019.00910"},{"key":"10.1016\/j.cag.2026.104551_b35","doi-asserted-by":"crossref","unstructured":"Tabib RA, Hegde D, Mudenagudi U. LGAfford-Net: A Local Geometry Aware Affordance Detection Network for 3D Point Clouds. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (CVPR) workshops. 2024, p. 5261\u201370.","DOI":"10.1109\/CVPRW63382.2024.00535"},{"key":"10.1016\/j.cag.2026.104551_b36","series-title":"Tensor field networks: Rotation- and translation-equivariant neural networks for 3D point clouds","author":"Thomas","year":"2018"},{"key":"10.1016\/j.cag.2026.104551_b37","article-title":"Spatial transformer networks","volume":"28","author":"Jaderberg","year":"2015","journal-title":"Adv Neural Inf Process Syst"},{"issue":"5","key":"10.1016\/j.cag.2026.104551_b38","doi-asserted-by":"crossref","first-page":"403","DOI":"10.1007\/BF02163027","article-title":"Singular value decomposition and least squares solutions","volume":"14","author":"Golub","year":"1970","journal-title":"Numer Math","ISSN":"https:\/\/id.crossref.org\/issn\/0945-3245","issn-type":"print"},{"key":"10.1016\/j.cag.2026.104551_b39","series-title":"Canonical and compact point cloud representation for shape classification","author":"Fujiwara","year":"2018"},{"key":"10.1016\/j.cag.2026.104551_b40","series-title":"3D model retrieval","author":"Vranic","year":"2004"},{"key":"10.1016\/j.cag.2026.104551_b41","series-title":"Dimensionality reduction with unsupervised nearest neighbors","first-page":"13","article-title":"K-nearest neighbors","author":"Kramer","year":"2013"},{"key":"10.1016\/j.cag.2026.104551_b42","doi-asserted-by":"crossref","unstructured":"Zhou H, Feng Y, Fang M, Wei M, Qin J, Lu T. Adaptive graph convolution for point cloud analysis. In: Proceedings of the IEEE\/CVF international conference on computer vision. 2021, p. 4965\u201374.","DOI":"10.1109\/ICCV48922.2021.00492"},{"key":"10.1016\/j.cag.2026.104551_b43","article-title":"Understanding batch normalization","volume":"31","author":"Bjorck","year":"2018","journal-title":"Adv Neural Inf Process Syst"},{"issue":"6","key":"10.1016\/j.cag.2026.104551_b44","doi-asserted-by":"crossref","first-page":"1458","DOI":"10.1109\/TBDATA.2023.3291558","article-title":"A survey of visual affordance recognition based on deep learning","volume":"9","author":"Chen","year":"2023","journal-title":"IEEE Trans Big Data"},{"key":"10.1016\/j.cag.2026.104551_b45","series-title":"Interpretable affordance detection on 3D point clouds with probabilistic prototypes","author":"Li","year":"2025"},{"key":"10.1016\/j.cag.2026.104551_b46","doi-asserted-by":"crossref","unstructured":"Long J, Shelhamer E, Darrell T. Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. 2015, p. 3431\u201340.","DOI":"10.1109\/CVPR.2015.7298965"},{"issue":"2","key":"10.1016\/j.cag.2026.104551_b47","doi-asserted-by":"crossref","first-page":"303","DOI":"10.1007\/s11263-009-0275-4","article-title":"The pascal visual object classes (voc) challenge","volume":"88","author":"Everingham","year":"2010","journal-title":"Int J Comput Vis"},{"issue":"2","key":"10.1016\/j.cag.2026.104551_b48","doi-asserted-by":"crossref","first-page":"171","DOI":"10.1023\/A:1010920819831","article-title":"A simple generalisation of the area under the ROC curve for multiple class classification problems","volume":"45","author":"Hand","year":"2001","journal-title":"Mach Learn"},{"key":"10.1016\/j.cag.2026.104551_b49","volume":"vol. 4","author":"Bishop","year":"2006"}],"container-title":["Computers &amp; Graphics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0097849326000221?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0097849326000221?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T04:14:13Z","timestamp":1776140053000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0097849326000221"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4]]},"references-count":49,"alternative-id":["S0097849326000221"],"URL":"https:\/\/doi.org\/10.1016\/j.cag.2026.104551","relation":{},"ISSN":["0097-8493"],"issn-type":[{"value":"0097-8493","type":"print"}],"subject":[],"published":{"date-parts":[[2026,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"RIFLe-Net: Rotation Invariant Feature Learning Network towards affordance detection in 3D point clouds","name":"articletitle","label":"Article Title"},{"value":"Computers & Graphics","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.cag.2026.104551","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier Ltd. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"104551"}}