{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T04:25:22Z","timestamp":1750220722157,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":10,"publisher":"ACM","license":[{"start":{"date-parts":[[2020,3,23]],"date-time":"2020-03-23T00:00:00Z","timestamp":1584921600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2020,3,23]]},"DOI":"10.1145\/3371382.3378270","type":"proceedings-article","created":{"date-parts":[[2020,4,1]],"date-time":"2020-04-01T19:31:43Z","timestamp":1585769503000},"page":"427-429","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["The Unexpected Daily Situations (UDS) Dataset"],"prefix":"10.1145","author":[{"given":"Yoan","family":"Sallami","sequence":"first","affiliation":[{"name":"LAAS-CNRS, Toulouse, France"}]},{"given":"Katie","family":"Winkle","sequence":"additional","affiliation":[{"name":"Bristol Robotics Laboratory, Bristol, United Kingdom"}]},{"given":"Nicola","family":"Webb","sequence":"additional","affiliation":[{"name":"Bristol Robotics Laboratory, Bristol, United Kingdom"}]},{"given":"Severin","family":"Lemaignan","sequence":"additional","affiliation":[{"name":"University of the West of England, Bristol, United Kingdom"}]},{"given":"Rachid","family":"Alami","sequence":"additional","affiliation":[{"name":"LAAS-CNRS, Toulouse, France"}]}],"member":"320","published-online":{"date-parts":[[2020,4]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"A Short Note on the Kinetics-700 Human Action Dataset. CoRR abs\/1907.06987","author":"Carreira Jo\u00e3o","year":"2019","unstructured":"Jo\u00e3o Carreira , Eric Noland , Chloe Hillier , and Andrew Zisserman . 2019. A Short Note on the Kinetics-700 Human Action Dataset. CoRR abs\/1907.06987 ( 2019 ). arXiv:1907.06987 http:\/\/arxiv.org\/abs\/1907.06987 Jo\u00e3o Carreira, Eric Noland, Chloe Hillier, and Andrew Zisserman. 2019. A Short Note on the Kinetics-700 Human Action Dataset. CoRR abs\/1907.06987 (2019). arXiv:1907.06987 http:\/\/arxiv.org\/abs\/1907.06987"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.599"},{"key":"e_1_3_2_1_3_1","volume-title":"Actor and Observer: Joint Modeling of First and Third-Person Videos. CoRR abs\/1804.09627","author":"Sigurdsson Gunnar A.","year":"2018","unstructured":"Gunnar A. Sigurdsson , Abhinav Gupta , Cordelia Schmid , Ali Farhadi , and Karteek Alahari . 2018. Actor and Observer: Joint Modeling of First and Third-Person Videos. CoRR abs\/1804.09627 ( 2018 ). arXiv:1804.09627 http:\/\/arxiv.org\/abs\/1804. 09627 Gunnar A. Sigurdsson, Abhinav Gupta, Cordelia Schmid, Ali Farhadi, and Karteek Alahari. 2018. Actor and Observer: Joint Modeling of First and Third-Person Videos. CoRR abs\/1804.09627 (2018). arXiv:1804.09627 http:\/\/arxiv.org\/abs\/1804. 09627"},{"key":"e_1_3_2_1_4_1","volume-title":"Charades-Ego: A Large-Scale Dataset of Paired Third and First Person Videos. CoRR abs\/1804.09626","author":"Sigurdsson Gunnar A.","year":"2018","unstructured":"Gunnar A. Sigurdsson , Abhinav Gupta , Cordelia Schmid , Ali Farhadi , and Karteek Alahari . 2018. Charades-Ego: A Large-Scale Dataset of Paired Third and First Person Videos. CoRR abs\/1804.09626 ( 2018 ). arXiv:1804.09626 http:\/\/arxiv.org\/ abs\/1804.09626 Gunnar A. Sigurdsson, Abhinav Gupta, Cordelia Schmid, Ali Farhadi, and Karteek Alahari. 2018. Charades-Ego: A Large-Scale Dataset of Paired Third and First Person Videos. CoRR abs\/1804.09626 (2018). arXiv:1804.09626 http:\/\/arxiv.org\/ abs\/1804.09626"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46448-0_31"},{"key":"e_1_3_2_1_6_1","unstructured":"Karen Simonyan and Andrew Zisserman. 2014. Two-stream convolutional networks for action recognition in videos. In Advances in neural information processing systems. 568--576.  Karen Simonyan and Andrew Zisserman. 2014. Two-stream convolutional networks for action recognition in videos. In Advances in neural information processing systems. 568--576."},{"key":"e_1_3_2_1_7_1","volume-title":"Altruistic helping in human infants and young chimpanzees. science 311, 5765","author":"Michael Tomasello FelixWarneken","year":"2006","unstructured":"FelixWarneken and Michael Tomasello . 2006. Altruistic helping in human infants and young chimpanzees. science 311, 5765 ( 2006 ), 1301--1303. FelixWarneken and Michael Tomasello. 2006. Altruistic helping in human infants and young chimpanzees. science 311, 5765 (2006), 1301--1303."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1111\/j.1532-7078.2007.tb00227.x"},{"key":"e_1_3_2_1_9_1","volume-title":"Rethinking Spatiotemporal Feature Learning: Speed-Accuracy Trade-offs in Video Classification. In The European Conference on Computer Vision (ECCV).","author":"Xie Saining","year":"2018","unstructured":"Saining Xie , Chen Sun , Jonathan Huang , Zhuowen Tu , and Kevin Murphy . 2018 . Rethinking Spatiotemporal Feature Learning: Speed-Accuracy Trade-offs in Video Classification. In The European Conference on Computer Vision (ECCV). Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, and Kevin Murphy. 2018. Rethinking Spatiotemporal Feature Learning: Speed-Accuracy Trade-offs in Video Classification. In The European Conference on Computer Vision (ECCV)."},{"key":"e_1_3_2_1_10_1","volume-title":"HACS: Human Action Clips and Segments Dataset for Recognition and Temporal Localization. arXiv preprint arXiv:1712.09374","author":"Zhao Hang","year":"2019","unstructured":"Hang Zhao , Zhicheng Yan , Lorenzo Torresani , and Antonio Torralba . 2019 . HACS: Human Action Clips and Segments Dataset for Recognition and Temporal Localization. arXiv preprint arXiv:1712.09374 (2019). Hang Zhao, Zhicheng Yan, Lorenzo Torresani, and Antonio Torralba. 2019. HACS: Human Action Clips and Segments Dataset for Recognition and Temporal Localization. arXiv preprint arXiv:1712.09374 (2019)."}],"event":{"name":"HRI '20: ACM\/IEEE International Conference on Human-Robot Interaction","sponsor":["SIGAI ACM Special Interest Group on Artificial Intelligence","SIGCHI ACM Special Interest Group on Computer-Human Interaction"],"location":"Cambridge United Kingdom","acronym":"HRI '20"},"container-title":["Companion of the 2020 ACM\/IEEE International Conference on Human-Robot Interaction"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3371382.3378270","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3371382.3378270","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T22:33:31Z","timestamp":1750199611000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3371382.3378270"}},"subtitle":["A New Benchmark for Socially-Aware Assistive Robots"],"short-title":[],"issued":{"date-parts":[[2020,3,23]]},"references-count":10,"alternative-id":["10.1145\/3371382.3378270","10.1145\/3371382"],"URL":"https:\/\/doi.org\/10.1145\/3371382.3378270","relation":{},"subject":[],"published":{"date-parts":[[2020,3,23]]},"assertion":[{"value":"2020-04-01","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}