{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:16:53Z","timestamp":1777655813682,"version":"3.51.4"},"reference-count":98,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"7","license":[{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100008530","name":"European Regional Development Fund","doi-asserted-by":"publisher","award":["CZ.02.1.01\/0.0\/0.0\/15_003\/0000468"],"award-info":[{"award-number":["CZ.02.1.01\/0.0\/0.0\/15_003\/0000468"]}],"id":[{"id":"10.13039\/501100008530","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Ministry of Education, Youth and Sports of the Czech Republic"},{"name":"e-INFRA CZ","award":["90140"],"award-info":[{"award-number":["90140"]}]},{"name":"French government"},{"DOI":"10.13039\/501100001665","name":"Agence Nationale de la Recherche","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001665","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Investissements d&#x0027;avenir","award":["ANR19-P3IA-0001"],"award-info":[{"award-number":["ANR19-P3IA-0001"]}]},{"name":"PRAIRIE 3IA Institute"},{"name":"Louis Vuitton ENS Chair on Artificial Intelligence"},{"name":"European Union ERC"},{"name":"FRONTIER","award":["101097822"],"award-info":[{"award-number":["101097822"]}]},{"name":"EXA4MIND project"},{"name":"European Union&#x0027;s Horizon Europe Research and Innovation Programme","award":["101092944"],"award-info":[{"award-number":["101092944"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2024,7]]},"DOI":"10.1109\/tpami.2024.3362288","type":"journal-article","created":{"date-parts":[[2024,2,5]],"date-time":"2024-02-05T18:37:45Z","timestamp":1707158265000},"page":"5114-5130","source":"Crossref","is-referenced-by-count":10,"title":["Multi-Task Learning of Object States and State-Modifying Actions From Web Videos"],"prefix":"10.1109","volume":"46","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6911-5517","authenticated-orcid":false,"given":"Tom\u00e1\u0161","family":"Sou\u010dek","sequence":"first","affiliation":[{"name":"Czech Institute of Informatics, Robotics and Cybernetics, Czech Technical University, Prague, Czechia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3071-4157","authenticated-orcid":false,"given":"Jean-Baptiste","family":"Alayrac","sequence":"additional","affiliation":[{"name":"DeepMind, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6657-7812","authenticated-orcid":false,"given":"Antoine","family":"Miech","sequence":"additional","affiliation":[{"name":"DeepMind, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7072-3325","authenticated-orcid":false,"given":"Ivan","family":"Laptev","sequence":"additional","affiliation":[{"name":"Inria and D&#x00E9;partement d&#x2019;informatique de l&#x2019;ENS, &#x00C9;cole normale sup&#x00E9;rieure, CNRS, PSL Research University, Paris, France"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2554-5301","authenticated-orcid":false,"given":"Josef","family":"Sivic","sequence":"additional","affiliation":[{"name":"Czech Institute of Informatics, Robotics and Cybernetics, Czech Technical University, Prague, Czechia"}]}],"member":"263","reference":[{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.495"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.234"},{"key":"ref4","article-title":"Self-supervised multimodal versatile networks","volume-title":"Proc. 34th Int. Conf. Neural Inf. Process. Syst.","author":"Alayrac"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58604-1_8"},{"key":"ref6","article-title":"COBE: Contextualized object embeddings from narrated instructional video","volume-title":"Proc. 34th Int. Conf. Neural Inf. Process. Syst.","author":"Bertasius"},{"key":"ref7","article-title":"Is space-time attention all you need for video understanding","author":"Bertasius","year":"2021"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_41"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.507"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.4161\/cib.2.1.7297"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.675"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1023\/A:1007379606734"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00366"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58621-8_20"},{"key":"ref16","first-page":"1749","article-title":"Webly supervised joint embedding for cross-modal image-text retrieval","volume-title":"Proc. ACM Int. Conf. Multimedia","author":"Chowdhury"},{"key":"ref17","first-page":"753","article-title":"Scaling egocentric vision: The epic-kitchens dataset","volume-title":"Proc. Eur. Conf. Comput. Vis.","author":"Damen"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.5244\/c.28.30"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.226"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00957"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00634"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00095"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01346"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58520-4_33"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00151"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2013.333"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00028"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00630"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299176"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00020"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01325"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10593-2_35"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52688.2022.01842"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00147"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298744"},{"key":"ref37","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jia"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.105"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01234"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01339"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00634"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01244"},{"key":"ref44","article-title":"Egocentric video-language pretraining","author":"Lin","year":"2022"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00399"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00197"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1441"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.318"},{"key":"ref49","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00798"},{"key":"ref51","article-title":"Transformation-based adversarial video prediction on large-scale data","author":"Luc","year":"2020"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00113"},{"key":"ref53","article-title":"RareAct: A video dataset of unusual interactions","author":"Miech","year":"2020"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00990"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2019.00351"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00272"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.129"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.433"},{"key":"ref59","article-title":"Learning state-aware visual representations from audible interactions","author":"Mittal","year":"2022"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00101"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01246-5_11"},{"key":"ref62","article-title":"Activity graph transformer for temporal action localization","author":"Nawhal","year":"2021"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.5555\/1953048.2078195"},{"key":"ref64","first-page":"2641","article-title":"Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models","volume-title":"Proc. IEEE Int. Conf. Comput. Vis.","author":"Bryan"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01340"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00369"},{"key":"ref67","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00086"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00771"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0987-1"},{"key":"ref71","article-title":"Recognizing actions using object states","volume-title":"Proc. ICLR Workshop Elements Reasoning: Objects Struct. Causality","author":"Saini"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01329"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00873"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01002"},{"key":"ref75","first-page":"568","article-title":"Two-stream convolutional networks for action recognition in videos","volume-title":"Proc. 27th Int. Conf. Neural Inf. Process. Syst.","author":"Simonyan"},{"key":"ref76","article-title":"UCF101: A dataset of 101 human actions classes from videos in the wild","author":"Soomro","year":"2012"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3089127"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01357"},{"key":"ref79","article-title":"Learning general purpose distributed sentence representations via large scale multi-task learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Subramanian"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00130"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2797921"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2016.291"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00025"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00840"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00054"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00037"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.544"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01017"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00333"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01026"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00391"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01589"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00394"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12342"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_16"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00365"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58526-6_28"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/34\/10550108\/10420504.pdf?arnumber=10420504","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,6,25]],"date-time":"2024-06-25T19:57:49Z","timestamp":1719345469000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10420504\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7]]},"references-count":98,"journal-issue":{"issue":"7"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2024.3362288","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,7]]}}}