{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T18:38:18Z","timestamp":1742927898059,"version":"3.40.3"},"publisher-location":"Cham","reference-count":41,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783031037887"},{"type":"electronic","value":"9783031037894"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-03789-4_17","type":"book-chapter","created":{"date-parts":[[2022,4,14]],"date-time":"2022-04-14T23:02:49Z","timestamp":1649977369000},"page":"259-274","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Emotion-Driven Interactive Storytelling: Let Me Tell You How to\u00a0Feel"],"prefix":"10.1007","author":[{"given":"Oneris Daniel","family":"Rico Garcia","sequence":"first","affiliation":[]},{"given":"Javier","family":"Fernandez Fernandez","sequence":"additional","affiliation":[]},{"given":"Rafael Andres","family":"Becerra Saldana","sequence":"additional","affiliation":[]},{"given":"Olaf","family":"Witkowski","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,4,15]]},"reference":[{"key":"17_CR1","doi-asserted-by":"publisher","unstructured":"Al Machot, F., Elmachot, A., Ali, M., Al Machot, E., Kyamakya, K.: A deep-learning model for subject-independent human emotion recognition using electrodermal activity sensors. Sensors 19(7) (2019). https:\/\/doi.org\/10.3390\/s19071659, https:\/\/www.mdpi.com\/1424-8220\/19\/7\/1659","DOI":"10.3390\/s19071659"},{"key":"17_CR2","doi-asserted-by":"crossref","unstructured":"Alserri, S.A., Zin, N.A.M., Wook, T.S.M.T.: Instrument validation for evaluating serious game engagement model. In: 2019 International Conference on Electrical Engineering and Informatics (ICEEI), pp. 170\u2013175. IEEE (2019)","DOI":"10.1109\/ICEEI47359.2019.8988873"},{"issue":"1","key":"17_CR3","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1177\/1529100619832930","volume":"20","author":"LF Barrett","year":"2019","unstructured":"Barrett, L.F., Adolphs, R., Marsella, S., Martinez, A.M., Pollak, S.D.: Emotional expressions reconsidered: challenges to inferring emotion from human facial movements. Psychol. Sci. Public Interest 20(1), 1\u201368 (2019)","journal-title":"Psychol. Sci. Public Interest"},{"key":"17_CR4","doi-asserted-by":"publisher","unstructured":"Baveye, Y., Dellandr\u00e9a, E., Chamaret, C., Chen, L.: LIRIS-ACCEDE: a video database for affective content analysis. IEEE Trans. Affect. Comput. 6, 43\u201355 (2015). https:\/\/doi.org\/10.1109\/TAFFC.2015.2396531","DOI":"10.1109\/TAFFC.2015.2396531"},{"key":"17_CR5","doi-asserted-by":"crossref","unstructured":"Bellantoni, P.: If It\u2019s Purple, Someone\u2019s Gonna Die: The Power of Color in Visual Storytelling. Taylor & Francis (2012). https:\/\/books.google.co.jp\/books?id=E57cAwAAQBAJ","DOI":"10.4324\/9780080478418"},{"key":"17_CR6","doi-asserted-by":"crossref","unstructured":"B\u00f6ck, R., et al.: Intraindividual and interindividual multimodal emotion analyses in human-machine-interaction. In: 2012 IEEE International Multi-Disciplinary Conference on Cognitive Methods in Situation Awareness and Decision Support, pp. 59\u201364. IEEE (2012)","DOI":"10.1109\/CogSIMA.2012.6188409"},{"key":"17_CR7","doi-asserted-by":"publisher","unstructured":"Bradley, M.M., Lang, P.J.: Measuring emotion: the self-assessment manikin and the semantic differential. J. Behav. Therapy Exp. Psychiatry 25(1), 49\u201359 (1994). https:\/\/doi.org\/10.1016\/0005-7916(94)90063-9, https:\/\/www.sciencedirect.com\/science\/article\/pii\/0005791694900639","DOI":"10.1016\/0005-7916(94)90063-9"},{"issue":"4","key":"17_CR8","doi-asserted-by":"publisher","first-page":"602","DOI":"10.1111\/j.1469-8986.2008.00654.x","volume":"45","author":"MM Bradley","year":"2008","unstructured":"Bradley, M.M., Miccoli, L., Escrig, M.A., Lang, P.J.: The pupil as a measure of emotional arousal and autonomic activation. Psychophysiology 45(4), 602\u2013607 (2008)","journal-title":"Psychophysiology"},{"key":"17_CR9","doi-asserted-by":"crossref","unstructured":"Carlton, J., Brown, A., Jay, C., Keane, J.: Inferring user engagement from interaction data. In: Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems, pp. 1\u20136 (2019)","DOI":"10.1145\/3290607.3313009"},{"key":"17_CR10","doi-asserted-by":"publisher","unstructured":"Caruso, E.M., Burns, Z.C., Converse, B.A.: Slow motion increases perceived intent. Proc. Natl. Acad. Sci. 113(33), 9250\u20139255 (2016). https:\/\/doi.org\/10.1073\/pnas.1603865113, https:\/\/www.pnas.org\/content\/113\/33\/9250","DOI":"10.1073\/pnas.1603865113"},{"key":"17_CR11","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"350","DOI":"10.1007\/978-3-030-17287-9_28","volume-title":"Persuasive Technology: Development of Persuasive and Behavior Change Support Systems","author":"AG Ciancone Chama","year":"2019","unstructured":"Ciancone Chama, A.G., Monaro, M., Piccoli, E., Gamberini, L., Spagnolli, A.: Engaging the audience with biased news: an exploratory study on prejudice and engagement. In: Oinas-Kukkonen, H., Win, K.T., Karapanos, E., Karppinen, P., Kyza, E. (eds.) PERSUASIVE 2019. LNCS, vol. 11433, pp. 350\u2013361. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-17287-9_28"},{"key":"17_CR12","doi-asserted-by":"publisher","unstructured":"Damiano, R., Lombardo, V., Monticone, G., Pizzo, A.: All about face. An experiment in face emotion recognition in interactive dramatic performance. In: 2019 8th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW), pp. 1\u20137 (2019). https:\/\/doi.org\/10.1109\/ACIIW.2019.8925032","DOI":"10.1109\/ACIIW.2019.8925032"},{"key":"17_CR13","doi-asserted-by":"publisher","unstructured":"De Andr\u00e9s, I., Garz\u00f3n, M., Reinoso-Su\u00e1rez, F.: Functional anatomy of non-REM sleep. Front. Neurol. 1\u201314 (2011). https:\/\/doi.org\/10.3389\/fneur.2011.00070","DOI":"10.3389\/fneur.2011.00070"},{"issue":"6","key":"17_CR14","doi-asserted-by":"publisher","first-page":"787","DOI":"10.1080\/02699930143000248","volume":"15","author":"PJ Deldin","year":"2001","unstructured":"Deldin, P.J., Keller, J., Gergen, J.A., Miller, G.A.: Cognitive bias and emotion in neuropsychological models of depression. Cogn. Emot. 15(6), 787\u2013802 (2001)","journal-title":"Cogn. Emot."},{"key":"17_CR15","doi-asserted-by":"publisher","unstructured":"Frey, J., Ostrin, G., Grabli, M., Cauchard, J.R.: Physiologically driven storytelling: concept and software tool. In: Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems, CHI 2020, pp. 1\u201313. Association for Computing Machinery, New York (2020). https:\/\/doi.org\/10.1145\/3313831.3376643, https:\/\/doi.org\/10.1145\/3313831.3376643","DOI":"10.1145\/3313831.3376643"},{"key":"17_CR16","unstructured":"Hauge, M.: Writing Screenplays That Sell. Bloomsbury Publishing (2011). https:\/\/books.google.co.jp\/books?id=6I9qDwAAQBAJ"},{"key":"17_CR17","unstructured":"Iwamoto, S.: Epic and interactive music in \u2018final fantasy xv\u2019 (2017). https:\/\/www.gdcvault.com\/play\/1023971\/Epic-AND-Interactive-Music-in"},{"key":"17_CR18","doi-asserted-by":"publisher","unstructured":"Koelstra, S., et al.: DEAP: a database for emotion analysis; using physiological signals. IEEE Trans. Affect. Comput. 3(1), 18\u201331 (2012). https:\/\/doi.org\/10.1109\/T-AFFC.2011.15","DOI":"10.1109\/T-AFFC.2011.15"},{"key":"17_CR19","doi-asserted-by":"publisher","unstructured":"Lang, P.J.: The emotion probe. Am. Psychol. Assoc. 50, 372\u2013385 (1995). https:\/\/doi.org\/10.1037\/0003-066X.50.5.372","DOI":"10.1037\/0003-066X.50.5.372"},{"key":"17_CR20","unstructured":"Laurans, G., Desmet, P.M., Hekkert, P.P.: Assessing emotion in interaction: some problems and a new approach. In: Proceedings of the 4th International Conference on Designing Pleasurable Products and Interfaces, DPPI 2009, Compiegne, October 2009. Universite de Technologie de Compiegne (2009)"},{"issue":"4","key":"17_CR21","doi-asserted-by":"publisher","first-page":"363","DOI":"10.1111\/j.1469-8986.1990.tb02330.x","volume":"27","author":"RW Levenson","year":"1990","unstructured":"Levenson, R.W., Ekman, P., Friesen, W.V.: Voluntary facial action generates emotion-specific autonomic nervous system activity. Psychophysiology 27(4), 363\u2013384 (1990)","journal-title":"Psychophysiology"},{"key":"17_CR22","doi-asserted-by":"publisher","unstructured":"Liu, J., et al.: EEG-based emotion classification using a deep neural network and sparse autoencoder. Front. Syst. Neurosci. 14, 43 (2020). https:\/\/doi.org\/10.3389\/fnsys.2020.00043, https:\/\/www.frontiersin.org\/article\/10.3389\/fnsys.2020.00043","DOI":"10.3389\/fnsys.2020.00043"},{"key":"17_CR23","doi-asserted-by":"crossref","unstructured":"Liu, Y., Sourina, O., Nguyen, M.K.: Real-time EEG-based human emotion recognition and visualization. In: 2010 International Conference on Cyberworlds, pp. 262\u2013269. IEEE (2010)","DOI":"10.1109\/CW.2010.37"},{"key":"17_CR24","doi-asserted-by":"publisher","first-page":"109","DOI":"10.1037\/h0041627","volume":"25","author":"D Marlowe","year":"1961","unstructured":"Marlowe, D., Crowne, D.P.: Social desirability and response to perceived situational demands. J. Consult. Psychol. 25, 109\u201315 (1961)","journal-title":"J. Consult. Psychol."},{"issue":"4","key":"17_CR25","doi-asserted-by":"publisher","first-page":"385","DOI":"10.1109\/TAFFC.2015.2432810","volume":"6","author":"M Nardelli","year":"2015","unstructured":"Nardelli, M., Valenza, G., Greco, A., Lanata, A., Scilingo, E.P.: Recognizing emotions induced by affective sounds through heart rate variability. IEEE Trans. Affect. Comput. 6(4), 385\u2013394 (2015). https:\/\/doi.org\/10.1109\/TAFFC.2015.2432810","journal-title":"IEEE Trans. Affect. Comput."},{"key":"17_CR26","doi-asserted-by":"publisher","first-page":"27","DOI":"10.1007\/978-3-319-27446-1_2","volume-title":"Why Engagement Matters","author":"H O\u2019Brien","year":"2016","unstructured":"O\u2019Brien, H.: Translating theory into methodological practice. In: O\u2019Brien, H., Cairns, P. (eds.) Why Engagement Matters, pp. 27\u201352. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-27446-1_2"},{"key":"17_CR27","doi-asserted-by":"publisher","unstructured":"O\u2019Brien, H.L., Cairns, P., Hall, M.: A practical approach to measuring user engagement with the refined user engagement scale (UES) and new UES short form. Int. J. Hum.-Comput. Stud. 112, 28\u201339 (2018). https:\/\/doi.org\/10.1016\/j.ijhcs.2018.01.004, https:\/\/www.sciencedirect.com\/science\/article\/pii\/S1071581918300041","DOI":"10.1016\/j.ijhcs.2018.01.004"},{"key":"17_CR28","doi-asserted-by":"publisher","unstructured":"Pasquali, A., Cleeremans, A., Gaillard, V.: Reversible second-order conditional sequences in incidental sequence learning tasks. Q. J. Exp. Psychol. 72(5), 1164\u20131175 (2019). https:\/\/doi.org\/10.1177\/1747021818780690, https:\/\/doi.org\/10.1177\/1747021818780690, pMID: 29779443","DOI":"10.1177\/1747021818780690"},{"key":"17_CR29","unstructured":"Perea Restrepo, C.M.: Limpieza social. Una violencia mal nombrada. Bogot\u00e1: Centro Nacional de Memoria Hist\u00f3rica (2019). http:\/\/www.cervantesvirtual.com\/obra\/limpieza-social-una-violencia-mal-nombrada-879231"},{"issue":"3","key":"17_CR30","doi-asserted-by":"publisher","first-page":"233","DOI":"10.1080\/17400300802418552","volume":"6","author":"B Perron","year":"2008","unstructured":"Perron, B., Arsenault, D., Picard, M., Therrien, C.: Methodological questions in \u2018interactive film studies\u2019. New Rev. Film Telev. Stud. 6(3), 233\u2013252 (2008)","journal-title":"New Rev. Film Telev. Stud."},{"issue":"3","key":"17_CR31","doi-asserted-by":"publisher","first-page":"237","DOI":"10.1016\/0010-4825(79)90008-8","volume":"9","author":"MJ Potel","year":"1979","unstructured":"Potel, M.J., Sayre, R.E., Robertson, A.: A system for interactive film analysis. Comput. Biol. Med. 9(3), 237\u2013256 (1979)","journal-title":"Comput. Biol. Med."},{"key":"17_CR32","unstructured":"Prokasy, W.: Electrodermal activity in psychological research. Elsevier Sci. (2012). https:\/\/books.google.co.jp\/books?id=m9l5ApC3avoC"},{"key":"17_CR33","unstructured":"Rad\u00faz, C., J\u00e1n, R., Vladim\u00edr, S.: Kinoautomat: One Man and His House. Czechoslovakia (1967)"},{"key":"17_CR34","doi-asserted-by":"publisher","unstructured":"Rico, O., Tag, B., Ohta, N., Sugiura, K.: Seamless multithread films in virtual reality. In: Proceedings of the Eleventh International Conference on Tangible, Embedded, and Embodied Interaction, TEI 2017. ACM, New York, pp. 641\u2013646 (2017). https:\/\/doi.org\/10.1145\/3024969.3025096, http:\/\/doi.acm.org\/10.1145\/3024969.3025096","DOI":"10.1145\/3024969.3025096"},{"key":"17_CR35","doi-asserted-by":"crossref","unstructured":"Saeghe, P., et al.: Augmenting television with augmented reality. In: Proceedings of the 2019 ACM International Conference on Interactive Experiences for TV and Online Video, pp. 255\u2013261 (2019)","DOI":"10.1145\/3317697.3325129"},{"key":"17_CR36","doi-asserted-by":"publisher","unstructured":"Shu, L., et al.: A review of emotion recognition using physiological signals. Sensors (Switzerland) 18 (2018). https:\/\/doi.org\/10.3390\/s18072074","DOI":"10.3390\/s18072074"},{"key":"17_CR37","unstructured":"Slade, D.: Black Mirror: Bandersnatch (2018)"},{"issue":"2","key":"17_CR38","first-page":"532","volume":"15","author":"PD Welch","year":"1975","unstructured":"Welch, P.D.: The use of Fast Fourier transform for the estimation of power spectra. Digit. Sig. Process. 15(2), 532\u2013574 (1975)","journal-title":"Digit. Sig. Process."},{"key":"17_CR39","doi-asserted-by":"crossref","unstructured":"Wu, S., Du, Z., Li, W., Huang, D., Wang, Y.: Continuous emotion recognition in videos by fusing facial expression, head pose and eye gaze. In: 2019 International Conference on Multimodal Interaction, pp. 40\u201348 (2019)","DOI":"10.1145\/3340555.3353739"},{"key":"17_CR40","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"106","DOI":"10.1007\/978-3-642-35725-1_10","volume-title":"Advances in Multimedia Modeling","author":"K Yadati","year":"2013","unstructured":"Yadati, K., Katti, H., Kankanhalli, M.: Interactive video advertising: a multimodal affective approach. In: Li, S., et al. (eds.) MMM 2013. LNCS, vol. 7732, pp. 106\u2013117. Springer, Heidelberg (2013). https:\/\/doi.org\/10.1007\/978-3-642-35725-1_10"},{"key":"17_CR41","unstructured":"Zheng, W.L., Dong, B.N., Lu, B.L.: Multimodal emotion recognition using EEG and eye tracking data. In: 2014 36th Annual International Conference of the IEEE Engineering in Medicine and Biology Society, pp. 5040\u20135043. IEEE (2014)"}],"container-title":["Lecture Notes in Computer Science","Artificial Intelligence in Music, Sound, Art and Design"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-03789-4_17","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,24]],"date-time":"2022-05-24T08:08:13Z","timestamp":1653379693000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-03789-4_17"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031037887","9783031037894"],"references-count":41,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-03789-4_17","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"15 April 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"EvoMUSART","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Computational Intelligence in Music, Sound, Art and Design (Part of EvoStar)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Madrid","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Spain","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20 April 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 April 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"evomusart2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.evostar.org\/2022\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"51","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"20","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"6","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"39% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}