{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T17:04:21Z","timestamp":1767114261523,"version":"3.48.0"},"publisher-location":"New York, NY, USA","reference-count":20,"publisher":"ACM","funder":[{"name":"National Key Research and Development Program of China","award":["2024YFB4505500 &amp; 2024YFB4505503"],"award-info":[{"award-number":["2024YFB4505500 &amp; 2024YFB4505503"]}]},{"name":"Natural Science Foundation of China","award":["62472244"],"award-info":[{"award-number":["62472244"]}]},{"name":"Qinghai University Research Ability Enhancement Project","award":["2025KTSA05"],"award-info":[{"award-number":["2025KTSA05"]}]},{"name":"Beijing Key Lab of Networked Multimedia","award":[""],"award-info":[{"award-number":[""]}]},{"name":"Tsinghua University Initiative Scientific Research Program","award":[""],"award-info":[{"award-number":[""]}]},{"name":"Undergraduate Education Innovation Grants","award":[""],"award-info":[{"award-number":[""]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,10,12]]},"DOI":"10.1145\/3714394.3750600","type":"proceedings-article","created":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T21:13:49Z","timestamp":1767042829000},"page":"577-580","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Context-Adaptive Hearing Aid Fitting Advisor through Multi-turn Multimodal LLM Conversation"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0006-3618-9492","authenticated-orcid":false,"given":"Yingke","family":"Ding","sequence":"first","affiliation":[{"name":"Tsinghua University, Beijing, China and University of Washington, Seattle, Washington, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-5048-1665","authenticated-orcid":false,"given":"Zeyu","family":"Wang","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-9337-2278","authenticated-orcid":false,"given":"Xiyuxing","family":"Zhang","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-1649-8620","authenticated-orcid":false,"given":"Hongbin","family":"Chen","sequence":"additional","affiliation":[{"name":"Tianjin University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-0546-7580","authenticated-orcid":false,"given":"Zhenan","family":"Xu","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,12,29]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"crossref","unstructured":"D. A Fabry and A. K Bhowmik. 2021. Improving speech understanding and monitoring health with hearing aids using AI and sensors. In Seminars in Hearing.","DOI":"10.1055\/s-0041-1735136"},{"key":"e_1_3_2_1_2_1","volume-title":"Audio Set: An Ontology and Human-Labeled Dataset for Audio Events. In ICASSP.","author":"Gemmeke J. F.","year":"2017","unstructured":"J. F. Gemmeke, D. P. W. Ellis, D. Freedman, A. Jansen, W. Lawrence, R. C. Moore, M. Plakal, and M. Ritter. 2017. Audio Set: An Ontology and Human-Labeled Dataset for Audio Events. In ICASSP."},{"key":"e_1_3_2_1_3_1","unstructured":"Google. 2019. YAMNet: A pretrained audio event classifier."},{"key":"e_1_3_2_1_4_1","unstructured":"D. Guo D. Yang H. Zhang J. Song R. Zhang R. Xu Q. Zhu S. Ma P. Wang X. Bi et al. 2025. Deepseek-r1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. arXiv preprint arXiv:2501.12948 (2025)."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1055\/s-0041-1735175"},{"key":"e_1_3_2_1_6_1","first-page":"20","article-title":"AI Assistant Improves Both Wearer Outcomes and Clinical Efficiency","volume":"28","author":"Hoydal E. H.","year":"2021","unstructured":"E. H. Hoydal, N. S. Jensen, R.-L. Fischer, S. Haag, and B. Taylor. 2021. AI Assistant Improves Both Wearer Outcomes and Clinical Efficiency. The Hearing Review, Vol. 28, 11 (2021), 20-23.","journal-title":"The Hearing Review"},{"key":"e_1_3_2_1_7_1","unstructured":"A. Jaech A. Kalai A. Lerer A. Richardson A. El-Kishky et al. 2024. OpenAI o1 system card. arXiv preprint arXiv:2412.16720 (2024)."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.3390\/computers7010001"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1097\/AUD.0000000000001308"},{"key":"e_1_3_2_1_10_1","first-page":"79410","author":"Kim Y.","year":"2024","unstructured":"Y. Kim, C. Park, H. Jeong, Y. S. Chan, X. Xu, et al., 2024. MDAgents: An Adaptive Collaboration of LLMs for Medical Decision-Making. In NeurIPS. 79410-79452.","journal-title":"In NeurIPS."},{"key":"e_1_3_2_1_11_1","volume-title":"Proc. IUI.","author":"Korzepa Maciej Jan","year":"2018","unstructured":"Maciej Jan Korzepa, Benjamin Johansen, Michael Kai Petersen, Jan Larsen, Jakob Eg Larsen, and Niels Henrik Pontoppidan. 2018. Learning preferences and soundscapes for augmented hearing. In Proc. IUI."},{"volume-title":"Dify: Agentic Workflow Platform. https:\/\/github.com\/langgenius\/dify. Accessed: 2025-07-10.","year":"2025","key":"e_1_3_2_1_12_1","unstructured":"LangGenius. 2025. Dify: Agentic Workflow Platform. https:\/\/github.com\/langgenius\/dify. Accessed: 2025-07-10."},{"key":"e_1_3_2_1_13_1","first-page":"1964","volume-title":"JAMIA","volume":"31","author":"Lucas M. M.","year":"2024","unstructured":"M. M. Lucas, J. Yang, J. K. Pomeroy, and C. C. Yang. 2024. Reasoning with LLMs for medical question answering. JAMIA, Vol. 31, 9 (2024), 1964-1975."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.3390\/s22166033"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11257-022-09324-z"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-3087"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1177\/2331216520933392"},{"key":"e_1_3_2_1_18_1","unstructured":"Z. Wang X. Zhang R. Yu Y. Wang K. Christofferson J. Zhang A. Mariakakis and Y. Shi. 2024. DreamCatcher: A Wearer-aware Multi-modal Sleep Event Dataset Based on Earables in Non-restrictive Environments. Advances in Neural Information Processing Systems (2024)."},{"key":"e_1_3_2_1_19_1","unstructured":"World Health Organization. 2025. Deafness and hearing loss."},{"key":"e_1_3_2_1_20_1","unstructured":"H. Xie Y. Chen X. Xing J. Lin and X. Xu. 2024. PsyDT: Digital Twin of Counselor with Personalized Style. arXiv preprint arXiv:2412.13660 (2024)."}],"event":{"name":"UbiComp '25:The 2025 ACM International Joint Conference on Pervasive and Ubiquitous Computing \/ ISWC ACM International Symposium on Wearable Computers","sponsor":["SIGMOBILE ACM Special Interest Group on Mobility of Systems, Users, Data and Computing","SIGCHI ACM Special Interest Group on Computer-Human Interaction","SIGSPATIAL ACM Special Interest Group on Spatial Information"],"location":"Espoo Finland"},"container-title":["Companion of the 2025 ACM International Joint Conference on Pervasive and Ubiquitous Computing"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3714394.3750600","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T17:00:01Z","timestamp":1767114001000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3714394.3750600"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,12]]},"references-count":20,"alternative-id":["10.1145\/3714394.3750600","10.1145\/3714394"],"URL":"https:\/\/doi.org\/10.1145\/3714394.3750600","relation":{},"subject":[],"published":{"date-parts":[[2025,10,12]]},"assertion":[{"value":"2025-12-29","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}