{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:39:46Z","timestamp":1775230786098,"version":"3.50.1"},"reference-count":50,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Visual. Comput. Graphics"],"published-print":{"date-parts":[[2025,11]]},"DOI":"10.1109\/tvcg.2025.3616866","type":"journal-article","created":{"date-parts":[[2025,10,7]],"date-time":"2025-10-07T17:41:14Z","timestamp":1759858874000},"page":"9720-9729","source":"Crossref","is-referenced-by-count":1,"title":["EgoTrigger: Toward Audio-Driven Image Capture for Human Memory Enhancement in All-Day Energy-Efficient Smart Glasses"],"prefix":"10.1109","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4664-3186","authenticated-orcid":false,"given":"Akshay","family":"Paruchuri","sequence":"first","affiliation":[{"name":"UNC Chapel Hill, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7333-005X","authenticated-orcid":false,"given":"Sinan","family":"Hersek","sequence":"additional","affiliation":[{"name":"Google, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-2280-1302","authenticated-orcid":false,"given":"Lavisha","family":"Aggarwal","sequence":"additional","affiliation":[{"name":"Google, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-9304-3654","authenticated-orcid":false,"given":"Qiao","family":"Yang","sequence":"additional","affiliation":[{"name":"Google, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9279-5386","authenticated-orcid":false,"given":"Xin","family":"Liu","sequence":"additional","affiliation":[{"name":"Google, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-8632-9575","authenticated-orcid":false,"given":"Achin","family":"Kulshrestha","sequence":"additional","affiliation":[{"name":"Google, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-6661-2216","authenticated-orcid":false,"given":"Andrea","family":"Colaco","sequence":"additional","affiliation":[{"name":"Google, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8834-4638","authenticated-orcid":false,"given":"Henry","family":"Fuchs","sequence":"additional","affiliation":[{"name":"UNC Chapel Hill, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2123-6392","authenticated-orcid":false,"given":"Ishan","family":"Chatterjee","sequence":"additional","affiliation":[{"name":"Google, USA"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023","journal-title":"arXiv preprint"},{"key":"ref2","first-page":"1","article-title":"The claude 3 model family: Opus, sonnet, haiku","volume":"1","author":"Anthropic","year":"2024","journal-title":"Claude-3 Model Card"},{"key":"ref3","article-title":"Soundnet: Learning sound representations from unlabeled video","author":"Aytar","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1039\/C5TC01644D"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00162"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/2.738305"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-92460-6_21"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72897-6_16"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3658126"},{"key":"ref10","article-title":"Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models","author":"Chu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref11","year":"2025","journal-title":"Vuzix m400\u2122 smart glasses"},{"key":"ref12","first-page":"800","article-title":"Tensorflow lite micro: Embedded machine learning for tinyml systems","volume-title":"Proceedings of Machine Learning and Systems","volume":"3","author":"David","year":"2021"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ISWC.2003.1241404"},{"key":"ref14","article-title":"Tensorflow","year":"2022","journal-title":"Zenodo"},{"key":"ref15","year":"2024","journal-title":"Audio classification guide"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/1873951.1874246"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01047"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952261"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3613905.3650758"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01842"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/3593908.3593948"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952132"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ISWC.2000.888493"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.3758\/s13423-019-01674-x"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3478513.3480514"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/PERCOMW.2015.7134100"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00559"},{"issue":"2","key":"ref28","first-page":"8","article-title":"Sensors are power hungry: An investigation of smartphone sensors impact on battery power from lifelogging perspective","volume":"9","author":"Khan","year":"2016","journal-title":"Bahria University Journal of Information & Communication Technology"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/cvprw67362.2025.00349"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM.2015.7218661"},{"key":"ref31","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv preprint"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/PRIMEAsia60757.2023.00022"},{"key":"ref33","article-title":"Decoupled weight decay regularization","author":"Loshchilov","year":"2017","journal-title":"arXiv preprint"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.679"},{"key":"ref35","first-page":"46212","article-title":"Egoschema: A diagnostic benchmark for very long-form video language understanding","volume":"36","author":"Mangalam","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref36","year":"2024","journal-title":"Introducing orion, our first true augmented reality glasses"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02247"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3025270"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/RTAS.2013.6531089"},{"issue":"7","key":"ref40","article-title":"How easy is it to fool your multimodal llms? an empirical analysis on deceptive prompts","volume":"2","author":"Qian","year":"2024","journal-title":"arXiv preprint"},{"key":"ref41","article-title":"I. Qualcomm Technologies","volume-title":"Snapdragon ar1 gen 1 platform","year":"2023"},{"key":"ref42","volume-title":"Ray-Ban Meta Smart Glasses","year":"2025"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ISWC.1997.629928"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1003"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ISMAR62088.2024.00108"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1038\/s41598-025-89709-3"},{"key":"ref47","first-page":"33485","article-title":"Egodistill: Egocentric head motion distillation for efficient video understanding","volume":"36","author":"Tan","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref48","article-title":"Gemini: a family of highly capable multimodal models","author":"Team","year":"2023","journal-title":"arXiv preprint"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ISMAR62088.2024.00107"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02690"}],"container-title":["IEEE Transactions on Visualization and Computer Graphics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/2945\/11231116\/11195962.pdf?arnumber=11195962","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,7]],"date-time":"2025-11-07T18:10:29Z","timestamp":1762539029000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11195962\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11]]},"references-count":50,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/tvcg.2025.3616866","relation":{},"ISSN":["1077-2626","1941-0506","2160-9306"],"issn-type":[{"value":"1077-2626","type":"print"},{"value":"1941-0506","type":"electronic"},{"value":"2160-9306","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11]]}}}