{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:22:52Z","timestamp":1775229772541,"version":"3.50.1"},"reference-count":33,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100020962","name":"ACT-X","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100020962","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10445977","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"11261-11265","source":"Crossref","is-referenced-by-count":6,"title":["STYLECAP: Automatic Speaking-Style Captioning from Speech Based on Speech and Language Self-Supervised Learning Models"],"prefix":"10.1109","author":[{"given":"Kazuki","family":"Yamauchi","sequence":"first","affiliation":[{"name":"The University of Tokyo,Japan"}]},{"given":"Yusuke","family":"Ijima","sequence":"additional","affiliation":[{"name":"NTT Corporation,Japan"}]},{"given":"Yuki","family":"Saito","sequence":"additional","affiliation":[{"name":"The University of Tokyo,Japan"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1037\/amp0000399"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.5555\/3295222.3295349"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747197"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2019.12.001"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2021.03.004"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1002\/lio2.354"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095621"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2022.3175578"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TAI.2023.3266418"},{"key":"ref10","article-title":"Describing emotions with acoustic property prompts for speech emotion recognition","author":"Dhamyal","year":"2022"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/icassp48485.2024.10448394"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1186\/s13636-022-00259-2"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.126287"},{"key":"ref14","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. ICML","author":"Radford"},{"key":"ref15","article-title":"CLAP: Learning audio concepts from natural language supervision","volume-title":"Proc. ICASSP","author":"Benjamin"},{"key":"ref16","article-title":"Language models are few-shot learners","volume-title":"Proc. NeurIPS","author":"Brown"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/s10772-011-9125-1"},{"key":"ref18","article-title":"ClipCap: CLIP prefix for image captioning","author":"Mokady","year":"2021"},{"key":"ref19","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref20","article-title":"BERTScore: Evaluating text generation with BERT","volume-title":"Proc. ICLR","author":"Zhang"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096285"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3188113"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461375"},{"key":"ref25","article-title":"Language models are unsupervised multitask learners","author":"Radford","year":"2019"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.3115\/1073083.1073135"},{"key":"ref27","first-page":"74","article-title":"ROUGE: A package for automatic evaluation of summaries","volume-title":"Proc. Workshop on Text Summarization Branches Out","author":"Lin"},{"key":"ref28","first-page":"65","article-title":"METEOR: An automatic metric for MT evaluation with improved correlation with human judgments","volume-title":"Proc. ACL","author":"Banerjee"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46454-1_24"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N16-1014"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSPW59220.2023.10193459"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.153"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Seoul, Korea, Republic of","start":{"date-parts":[[2024,4,14]]},"end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10445977.pdf?arnumber=10445977","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T05:00:27Z","timestamp":1722574827000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10445977\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10445977","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}