{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T00:49:55Z","timestamp":1767314995222,"version":"3.48.0"},"publisher-location":"Cham","reference-count":24,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032101914","type":"print"},{"value":"9783032101921","type":"electronic"}],"license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-10192-1_27","type":"book-chapter","created":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T00:46:32Z","timestamp":1767314792000},"page":"327-338","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Ego-Exo Object Correspondence by SAM2 and Cross-View Prompting"],"prefix":"10.1007","author":[{"given":"Devis","family":"Salierno","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1672-667X","authenticated-orcid":false,"given":"Matteo","family":"Dunnhofer","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4503-7483","authenticated-orcid":false,"given":"Christian","family":"Micheloni","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,2]]},"reference":[{"key":"27_CR1","unstructured":"Chen, S., Yu, E., Tao, W.: CRTracker: end-to-end cross-view referring multi-object tracking. arXiv preprint arXiv:2412.17807 (2024)"},{"key":"27_CR2","doi-asserted-by":"crossref","unstructured":"Cheng, H.K., Schwing, A.G.: Xmem: long-term video object segmentation with an Atkinson-Shiffrin memory model. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19815-1_37"},{"key":"27_CR3","unstructured":"Darkhalil, A., Shan, D., Zhu, B., Ma, J., Kar, A., Higgins, R., Fidler, S., Fouhey, D., Damen, D.: Epic-kitchens visor benchmark: video segmentations and object relations. NeurIPS (2022)"},{"key":"27_CR4","doi-asserted-by":"crossref","unstructured":"Dunnhofer, M., Furnari, A., Farinella, G.M., Micheloni, C.: Is first person vision challenging for object tracking? In: ICCVW (2021)","DOI":"10.1109\/ICCVW54120.2021.00304"},{"key":"27_CR5","doi-asserted-by":"crossref","unstructured":"Dunnhofer, M., Furnari, A., Farinella, G.M., Micheloni, C.: Visual object tracking in first person vision. IJCV (2023)","DOI":"10.1007\/s11263-022-01694-6"},{"key":"27_CR6","unstructured":"Dunnhofer, M., Manigrasso, Z., Micheloni, C.: Is tracking really more challenging in first person egocentric vision? In: ICCV (2025)"},{"key":"27_CR7","doi-asserted-by":"crossref","unstructured":"Dunnhofer, M., Simonato, K., Micheloni, C.: Combining complementary trackers for enhanced long-term visual object tracking. Image and Vision Computing (2022)","DOI":"10.1016\/j.imavis.2022.104448"},{"key":"27_CR8","doi-asserted-by":"crossref","unstructured":"Grauman, K., Westbury, A., Torresani, L., Kitani, K., Malik, J., Afouras, T., Ashutosh, K., Baiyya, V., Bansal, S., Boote, B., et\u00a0al.: Ego-Exo4D: understanding skilled human activity from first- and third-person perspectives. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.01834"},{"key":"27_CR9","doi-asserted-by":"crossref","unstructured":"Hao, S., Liu, P., Zhan, Y., et\u00a0al.: Divotrack: A dataset and baseline for cross-view multi-object tracking in diverse open scenes. IJCV (2024)","DOI":"10.1007\/s11263-023-01922-7"},{"key":"27_CR10","doi-asserted-by":"crossref","unstructured":"Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et\u00a0al.: Segment anything. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"27_CR11","unstructured":"Kristan, M., Leonardis, A., Matas, J., Felsberg, M., Pflugfelder, R., K\u00e4m\u00e4r\u00e4inen, J.K., Chang, H.J., Danelljan, M., Zajc, L.\u010c., Luke\u017ei\u010d, A., et\u00a0al.: The tenth visual object tracking vot2022 challenge results. In: ECCVW (2022)"},{"key":"27_CR12","unstructured":"Kristan, M., Leonardis, A., Matas, J., Felsberg, M., Pflugfelder, R., K\u00e4m\u00e4r\u00e4inen, J.K., Danelljan, M., Zajc, L.\u010c., Luke\u017ei\u010d, A., Drbohlav, O., et\u00a0al.: The eighth visual object tracking vot2020 challenge results. In: ECCVW (2020)"},{"key":"27_CR13","unstructured":"Kristan, M., Matas, J., Danelljan, M., Felsberg, M., Chang, H.J., Zajc, L.\u010c., Luke\u017ei\u010d, A., Drbohlav, O., Zhang, Z., Tran, K.T., et\u00a0al.: The first visual object tracking segmentation vots2023 challenge results. In: ICCVW (2023)"},{"key":"27_CR14","unstructured":"Kristan, M., Matas, J., Leonardis, A., Felsberg, M., Cehovin, L., Fernandez, G., Vojir, T., Hager, G., Nebehay, G., Pflugfelder, R.: The visual object tracking vot2015 challenge results. In: ICCVW (2015)"},{"key":"27_CR15","unstructured":"Kristan, M., Matas, J., Leonardis, A., Felsberg, M., Pflugfelder, R., Kamarainen, J.K., \u010cehovin\u00a0Zajc, L., Drbohlav, O., Lukezic, A., Berg, A., et\u00a0al.: The seventh visual object tracking vot2019 challenge results. In: ICCVW (2019)"},{"key":"27_CR16","doi-asserted-by":"crossref","unstructured":"Meinhardt, T., Kirillov, A., Leal-Taixe, L., Feichtenhofer, C.: Trackformer: Multi-object tracking with transformers. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00864"},{"key":"27_CR17","doi-asserted-by":"crossref","unstructured":"Perazzi, F., Pont-Tuset, J., McWilliams, B., Van\u00a0Gool, L., Gross, M., Sorkine-Hornung, A.: A benchmark dataset and evaluation methodology for video object segmentation. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.85"},{"key":"27_CR18","unstructured":"Ravi, N., Gabeur, V., Hu, Y.T.T., et\u00a0al.: Sam 2: Segment anything in images and videos. In: ICLR (2025)"},{"key":"27_CR19","doi-asserted-by":"crossref","unstructured":"Shen, X., Efros, A.A., Joulin, A., Aubry, M.: Learning co-segmentation by segment swapping for retrieval and discovery. In: CVPRW (2022)","DOI":"10.1109\/CVPRW56347.2022.00556"},{"key":"27_CR20","unstructured":"Tang, H., Liang, K.J., Grauman, K., Feiszli, M., Wang, W.: Egotracks: A long-term egocentric visual object tracking dataset. In: NeurIPS (2023)"},{"key":"27_CR21","doi-asserted-by":"crossref","unstructured":"Tokmakov, P., Li, J., Gaidon, A.: Breaking the \u201cobject\u201d in video object segmentation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02187"},{"key":"27_CR22","doi-asserted-by":"crossref","unstructured":"Wu, Y., Lim, J., Yang, M.H.: Online object tracking: a benchmark. In: CVPR (2013)","DOI":"10.1109\/CVPR.2013.312"},{"key":"27_CR23","doi-asserted-by":"crossref","unstructured":"Wu, Y., Lim, J., Yang, M.H.: Object tracking benchmark. IEEE TPAMI (2015)","DOI":"10.1109\/TPAMI.2014.2388226"},{"key":"27_CR24","unstructured":"Yang, Z., Wei, Y., Yang, Y.: Associating objects with transformers for video object segmentation. NeurIPS (2021)"}],"container-title":["Lecture Notes in Computer Science","Image Analysis and Processing \u2013 ICIAP 2025"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-10192-1_27","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T00:46:34Z","timestamp":1767314794000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-10192-1_27"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"ISBN":["9783032101914","9783032101921"],"references-count":24,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-10192-1_27","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"2 January 2026","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIAP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Image Analysis and Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Rome","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"19 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iciap2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.iciap.org\/home","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}