{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T13:49:08Z","timestamp":1765547348722,"version":"3.40.3"},"publisher-location":"Cham","reference-count":55,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031783531"},{"type":"electronic","value":"9783031783548"}],"license":[{"start":{"date-parts":[[2024,12,4]],"date-time":"2024-12-04T00:00:00Z","timestamp":1733270400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,4]],"date-time":"2024-12-04T00:00:00Z","timestamp":1733270400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-78354-8_14","type":"book-chapter","created":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T10:30:21Z","timestamp":1733221821000},"page":"212-228","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Towards Open-Set Egocentric Action Recognition with Uncertainty Estimation"],"prefix":"10.1007","author":[{"given":"Yishan","family":"Zou","sequence":"first","affiliation":[]},{"given":"Christopher","family":"Nugent","sequence":"additional","affiliation":[]},{"given":"Matthew","family":"Burns","sequence":"additional","affiliation":[]},{"given":"Xiaoming","family":"Xi","sequence":"additional","affiliation":[]},{"given":"Meng","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,4]]},"reference":[{"key":"14_CR1","doi-asserted-by":"crossref","unstructured":"Minlong Lu, Danping Liao, and Ze-Nian Li, \u201cLearning spatiotemporal attention for egocentric action recognition,\u201d in Proceedings of the IEEE\/CVF International Conference on Computer Vision Workshops, 2019, pp. 4425-4434","DOI":"10.1109\/ICCVW.2019.00543"},{"key":"14_CR2","doi-asserted-by":"crossref","unstructured":"Walter J Scheirer, Anderson de Rezende Rocha, Archana Sapkota, and Terrance E Boult, \u201cToward open set recognition,\u201d in IEEE Transactions on Pattern Analysis and Machine Intelligence, 2012, pp. 1757\u20131772","DOI":"10.1109\/TPAMI.2012.256"},{"key":"14_CR3","doi-asserted-by":"crossref","unstructured":"Chuanxing Geng, Sheng-jun Huang, and Songcan Chen, \u201cRecent advances in open set recognition: A survey,\u201d in IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020, pp. 3614\u20133631","DOI":"10.1109\/TPAMI.2020.2981604"},{"key":"14_CR4","unstructured":"Wentao Bao, Qi Yu, and Yu Kong, \u201cEvidential deep learning for open set action recognition,\u201d in Proceedings of the IEEE\/CVF International Conference on Computer Vision, 2021, pp. 13349\u201313358"},{"key":"14_CR5","unstructured":"Wentao Bao, Qi Yu, and Yu Kong, \u201cOpental: Towards open set temporal action localization,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 2979\u20132989"},{"key":"14_CR6","doi-asserted-by":"crossref","unstructured":"Murat Sensoy, Lance Kaplan, Federico Cerutti, and Maryam Saleki, \u201cUncertainty-aware deep classifiers using generative models,\u201d in Proceedings of the AAAI Conference on Artificial Intelligence, 2020, pp. 5620\u20135627","DOI":"10.1609\/aaai.v34i04.6015"},{"key":"14_CR7","unstructured":"Kunchang Li, Yali Wang, Peng Gao, Guanglu Song, Yu Liu, Hongsheng Li, and Yu Qiao, \u201cUniformer: Unified transformer for efficient spatiotemporal representation learning,\u201d arXiv preprint arXiv:2201.04676, 2022"},{"key":"14_CR8","unstructured":"Murat Sensoy, Lance Kaplan, and Melih Kandemir, \u201cEvidential deep learning to quantify classification uncertainty,\u201d in Advances in Neural Information Processing Systems, 2018, pp. 3183\u20133193"},{"key":"14_CR9","unstructured":"Glenn Shafer, \u201cDempster-shafer theory,\u201d in Encyclopedia of Artificial Intelligence, 1992, pp. 330\u2013331"},{"key":"14_CR10","doi-asserted-by":"crossref","unstructured":"Audun J\u00f8sang, Subjective logic, Springer, 2016","DOI":"10.1007\/978-3-319-42337-1"},{"key":"14_CR11","doi-asserted-by":"crossref","unstructured":"Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al., \u201cScaling egocentric vision: The epic-kitchens dataset,\u201d in Proceedings of the European Conference on Computer Vision, 2018, pp. 720\u2013736","DOI":"10.1007\/978-3-030-01225-0_44"},{"key":"14_CR12","doi-asserted-by":"crossref","unstructured":"Yin Li, Miao Liu, and James M. Rehg, \u201cIn the eye of beholder: Joint learning of gaze and actions in first person video,\u201d in Proceedings of the European Conference on Computer Vision, 2018, pp. 619\u2013635","DOI":"10.1007\/978-3-030-01228-1_38"},{"key":"14_CR13","doi-asserted-by":"crossref","unstructured":"Yilin Wen, Hao Pan, Lei Yang, Jia Pan, Taku Komura, and Wenping Wang, \u201cHierarchical temporal transformer for 3d hand pose estimation and action recognition from egocentric rgb videos,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 21243\u201321253","DOI":"10.1109\/CVPR52729.2023.02035"},{"key":"14_CR14","doi-asserted-by":"crossref","unstructured":"Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo, \u201cSwin transformer: Hierarchical vision transformer using shifted windows,\u201d in Proceedings of the IEEE\/CVF International Conference on Computer Vision, 2021, pp. 10012\u201310022","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"14_CR15","doi-asserted-by":"crossref","unstructured":"Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu, \u201cVideo swin transformer,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 3202\u20133211","DOI":"10.1109\/CVPR52688.2022.00320"},{"key":"14_CR16","unstructured":"D. Damen, H. Doughty, G. M. Farinella, A. Furnari, E. Kazakos, J. Ma, D. Moltisanti, J. Munro, T. Perrett, W. Price et al., \u201cRescaling egocentric vision,\u201d arXiv preprint arXiv:2006.13256, 2020"},{"key":"14_CR17","unstructured":"K. Grauman, A. Westbury, E. Byrne, Z. Chavis, A. Furnari, R. Girdhar, J. Hamburger, H. Jiang, M. Liu, X. Liu et al., \u201cEgo4d: Around the world in 3,000 hours of egocentric video,\u201d in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022, pp. 18995\u201319012"},{"key":"14_CR18","doi-asserted-by":"crossref","unstructured":"A. Fathi, A. Farhadi, and J. M. Rehg, \"Understanding egocentric activities,\" in Proceedings of the International Conference on Computer Vision, 2011, pp. 407-414","DOI":"10.1109\/ICCV.2011.6126269"},{"key":"14_CR19","doi-asserted-by":"crossref","unstructured":"Nguyen, T.H.C., Nebel, J.C. and Florez-Revuelta, F., \u201cRecognition of activities of daily living with egocentric vision: A review,\" Sensors, 2016, pp. 72","DOI":"10.3390\/s16010072"},{"key":"14_CR20","unstructured":"C. Dibyadip and S. Fadime and M. Shugao and Y. Angela, \"Opening the vocabulary of egocentric actions,\" Advances in Neural Information Processing Systems, 2024, pp. 33174\u201333187"},{"key":"14_CR21","doi-asserted-by":"crossref","unstructured":"Michael Land and Benjamin Tatler, \"Looking and acting: vision and eye movements in natural behaviour,\" Oxford University Press, 2009","DOI":"10.1093\/acprof:oso\/9780198570943.001.0001"},{"key":"14_CR22","doi-asserted-by":"crossref","unstructured":"A. Bulling, J. A. Ward, H. Gellersen, and G. Troster, \u201cEye movement analysis for activity recognition using electrooculography,\u201d IEEE Transactions on Pattern Analysis and Machine Intelligence, 2010, pp. 741\u2013753","DOI":"10.1109\/TPAMI.2010.86"},{"key":"14_CR23","unstructured":"K. Simonyan and A. Zisserman, \u201cTwo-stream convolutional networks for action recognition in videos,\u201d Advances in Neural Information Processing Systems, 2014, pp. 568\u2013576"},{"key":"14_CR24","doi-asserted-by":"crossref","unstructured":"C. Li, S. Li, Y. Gao, X. Zhang, and W. Li, \u201cA two-stream neural network for pose-based hand gesture recognition,\u201d IEEE Transactions on Cognitive and Developmental Systems, 2021, pp. 1594\u20131603","DOI":"10.1109\/TCDS.2021.3126637"},{"key":"14_CR25","doi-asserted-by":"crossref","unstructured":"M. Liu, L. Ma, K. Somasundaram, Y. Li, K. Grauman, J. M. Rehg, and C. Li, \u201cEgocentric activity recognition and localization on a 3d map,\u201d in Proceedings of the European Conference on Computer Vision, 2022, pp. 621\u2013638","DOI":"10.1007\/978-3-031-19778-9_36"},{"key":"14_CR26","doi-asserted-by":"crossref","unstructured":"A. Furnari and G. M. Farinella, \u201cWhat would you expect? anticipating egocentric actions with rolling-unrolling lstms and modality attention,\u201d in Proceedings of the IEEE International Conference on Computer Vision, 2019, pp. 6252\u20136261","DOI":"10.1109\/ICCV.2019.00635"},{"key":"14_CR27","doi-asserted-by":"crossref","unstructured":"Y. Huang, M. Cai, Z. Li, F. Lu, and Y. Sato, \u201cMutual context network for jointly estimating egocentric gaze and action,\u201d IEEE Transactions on Image Processing, 2020, pp. 7795\u20137806","DOI":"10.1109\/TIP.2020.3007841"},{"key":"14_CR28","unstructured":"M. A. Arabac\u0131, F.Ozkan, E. Surer, P. Jancovic, and A. Temizel, \u201cMulti-modal egocentric activity recognition using audio-visual features,\u201d arXiv preprint arXiv:1807.00612, 2018"},{"key":"14_CR29","doi-asserted-by":"crossref","unstructured":"S. Singh, C. Arora, and C. Jawahar, \u201cFirst person action recognition using deep learned descriptors,\u201d in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016, pp. 2620\u20132628","DOI":"10.1109\/CVPR.2016.287"},{"key":"14_CR30","volume-title":"Egocentric action recognition from noisy videos","author":"L Yang","year":"2020","unstructured":"Yang, L.: Egocentric action recognition from noisy videos. The University of Tokyo, Diss. (2020)"},{"key":"14_CR31","doi-asserted-by":"crossref","unstructured":"F. Li and H. Wechsler, \u201cOpen set face recognition using transduction,\u201d IEEE Transactions on Pattern Analysis and Machine Intelligence, 2005, pp. 1686\u20131697","DOI":"10.1109\/TPAMI.2005.224"},{"key":"14_CR32","doi-asserted-by":"crossref","unstructured":"A. Bendale and T. E. Boult, \u201cTowards open set deep networks,\u201d in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016, pp. 1563\u20131572","DOI":"10.1109\/CVPR.2016.173"},{"key":"14_CR33","doi-asserted-by":"crossref","unstructured":"Z. Ge, S. Demyanov, Z. Chen, and R. Garnavi, \u201cGenerative openmax for multi-class open set classification,\u201d arXiv preprint arXiv:1707.07418, 2017","DOI":"10.5244\/C.31.42"},{"key":"14_CR34","doi-asserted-by":"crossref","unstructured":"L. Neal, M. Olson, X. Fern, W.-K. Wong, and F. Li, \u201cOpen set learning with counterfactual images,\u201d in Proceedings of the European Conference on Computer Vision, 2018, pp. 613\u2013628","DOI":"10.1007\/978-3-030-01231-1_38"},{"key":"14_CR35","doi-asserted-by":"crossref","unstructured":"L. Ditria, B. J. Meyer, and T. Drummond, \u201cOpengan: Open set generative adversarial networks,\u201d arXiv preprint arXiv:2006.16241, 2020","DOI":"10.1007\/978-3-030-69538-5_29"},{"key":"14_CR36","doi-asserted-by":"crossref","unstructured":"P. Oza and V. M. Patel, \u201cC2ae: Class conditioned auto-encoder for open-set recognition,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 2307\u20132316","DOI":"10.1109\/CVPR.2019.00241"},{"key":"14_CR37","doi-asserted-by":"crossref","unstructured":"R. Yoshihashi, W. Shao, R. Kawakami, S. You, M. Iida, and T. Naemura, \u201cClassification-reconstruction learning for open-set recognition,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 4016\u20134025","DOI":"10.1109\/CVPR.2019.00414"},{"key":"14_CR38","doi-asserted-by":"crossref","unstructured":"X. Sun, Z. Yang, C. Zhang, K.-V. Ling, and G. Peng, \u201cConditional gaussian distribution learning for open set recognition,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 13480\u201313489","DOI":"10.1109\/CVPR42600.2020.01349"},{"key":"14_CR39","volume-title":"Practical uncertainty estimation and out-of-distribution robustness in deep learning","author":"D Tran","year":"2020","unstructured":"Tran, D., Snoek, J., Lakshminarayanan, B.: Practical uncertainty estimation and out-of-distribution robustness in deep learning. Technical Report, Google Brain (2020)"},{"key":"14_CR40","doi-asserted-by":"crossref","unstructured":"G. Pang, C. Shen, L. Cao, and A. V. D. Hengel, \u201cDeep learning for anomaly detection: A review,\u201d ACM Computing Surveys, 2021, pp. 1\u201338","DOI":"10.1145\/3439950"},{"key":"14_CR41","doi-asserted-by":"crossref","unstructured":"D. Mandal, S. Narayan, S. K. Dwivedi, V. Gupta, S. Ahmed, F. S. Khan, and L. Shao, \u201cOut-of-distribution detection for generalized zero-shot action recognition,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 9985\u20139993","DOI":"10.1109\/CVPR.2019.01022"},{"key":"14_CR42","doi-asserted-by":"crossref","unstructured":"A. Bendale and T. Boult, \u201cTowards open world recognition,\u201d in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2015, pp. 1893\u20131902","DOI":"10.1109\/CVPR.2015.7298799"},{"key":"14_CR43","doi-asserted-by":"crossref","unstructured":"S. N. Aakur and S. Kundu and N. Gunti, \"Knowledge guided learning: Open world egocentric action recognition with zero supervision\", Pattern Recognition Letters, 2022, pp. 38-45","DOI":"10.1016\/j.patrec.2022.03.007"},{"key":"14_CR44","unstructured":"S. Kuniaki and K. Donghyun and S. Kate, \"Openmatch: Open-set semi-supervised learning with open-set consistency regularization\", Advances in Neural Information Processing Systems, 2021, pp. 25956-25967"},{"key":"14_CR45","doi-asserted-by":"crossref","unstructured":"Y. Shu, Y. Shi, Y. Wang, Y. Zou, Q. Yuan, and Y. Tian, \u201cOdn: Opening the deep network for open-set action recognition,\u201d in 2018 IEEE International Conference on Multimedia and Expo. IEEE, 2018, pp. 1\u20136","DOI":"10.1109\/ICME.2018.8486601"},{"key":"14_CR46","unstructured":"R. Krishnan, M. Subedar, and O. Tickoo, \u201cBar: Bayesian activity recog- nition using variational inference,\u201d arXiv preprint arXiv:1811.03305, 2018"},{"key":"14_CR47","doi-asserted-by":"crossref","unstructured":"M. Subedar, R. Krishnan, P. L. Meyer, O. Tickoo, and J. Huang, \u201cUncertainty-aware audiovisual activity recognition using deep bayesian variational inference,\u201d in Proceedings of the IEEE\/CVF International Conference on Computer Vision, 2019, pp. 6301\u20136310","DOI":"10.1109\/ICCV.2019.00640"},{"key":"14_CR48","doi-asserted-by":"crossref","unstructured":"R. Krishnan, M. Subedar, and O. Tickoo, \u201cSpecifying weight priors in bayesian deep neural networks with empirical bayes,\u201d in Proceedings of the AAAI Conference on Artificial Intelligence, 2020, pp. 4477\u20134484","DOI":"10.1609\/aaai.v34i04.5875"},{"key":"14_CR49","unstructured":"P. P. Busto, A. Iqbal, and J. Gall, \u201cOpen set domain adaptation for image and action recognition,\u201d IEEE Transactions on Pattern Analysis and Machine Intelligence, 2018, pp. 413\u2013429"},{"key":"14_CR50","unstructured":"A. Malinin and M. Gales, \u201cPredictive uncertainty estimation via prior networks,\u201d Advances in Neural Information Processing Systems, 2018, pp. 7047\u20137058"},{"key":"14_CR51","unstructured":"B. Charpentier, D. Zugner, and S. Gunnemann, \u201cPosterior network: Uncertainty estimation without ood samples via density-based pseudo-counts,\u201d Advances in Neural Information Processing Systems, 2020, pp. 1356\u20131367"},{"key":"14_CR52","unstructured":"W. Shi, X. Zhao, F. Chen, and Q. Yu, \u201cMultifaceted uncertainty estimation for label-efficient deep learning,\u201d Advances in Neural Information Processing Systems, 2020, pp. 17247\u201317257"},{"key":"14_CR53","doi-asserted-by":"crossref","unstructured":"F. Kraus and K. Dietmayer, \u201cUncertainty estimation in one-stage object detection,\u201d IEEE Intelligent Transportation Systems Conference, 2019, pp. 53\u201360","DOI":"10.1109\/ITSC.2019.8917494"},{"key":"14_CR54","doi-asserted-by":"crossref","unstructured":"W. Bao, Q. Yu, and Y. Kong, \u201cUncertainty-based traffic accident anticipation with spatio-temporal relational learning,\u201d in Proceedings of the 28th ACM International Conference on Multimedia, 2020, pp. 2682\u20132690","DOI":"10.1145\/3394171.3413827"},{"key":"14_CR55","unstructured":"A. Amini, W. Schwarting, A. Soleimany, and D. Rus, \u201cDeep evidential regression,\u201d Advances in Neural Information Processing Systems, 2020, pp. 14927\u201314937"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-78354-8_14","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T11:28:17Z","timestamp":1733225297000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-78354-8_14"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,4]]},"ISBN":["9783031783531","9783031783548"],"references-count":55,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-78354-8_14","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,12,4]]},"assertion":[{"value":"4 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICPR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Kolkata","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1 December 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 December 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icpr2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icpr2024.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}