{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T14:23:53Z","timestamp":1742912633682,"version":"3.40.3"},"publisher-location":"Cham","reference-count":26,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031347313"},{"type":"electronic","value":"9783031347320"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-34732-0_29","type":"book-chapter","created":{"date-parts":[[2023,7,11]],"date-time":"2023-07-11T21:01:21Z","timestamp":1689109281000},"page":"379-390","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Bimodal Neural Style Transfer for\u00a0Image Generation Based on\u00a0Text Prompts"],"prefix":"10.1007","author":[{"given":"Diego","family":"Guti\u00e9rrez","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7969-6041","authenticated-orcid":false,"given":"Marcelo","family":"Mendoza","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,7,9]]},"reference":[{"issue":"6","key":"29_CR1","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2017","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: ImageNet classification with deep convolutional neural networks. Commun. ACM 60(6), 84\u201390 (2017)","journal-title":"Commun. ACM"},{"key":"29_CR2","doi-asserted-by":"crossref","unstructured":"Bugue\u00f1o, M., Mendoza, M.: Learning to detect online harassment on twitter with the transformer. PKDD\/ECML Workshops (2), 298\u2013306 (2019)","DOI":"10.1007\/978-3-030-43887-6_23"},{"key":"29_CR3","doi-asserted-by":"crossref","unstructured":"Castillo, S., et al.: Detection of bots and cyborgs in twitter: a study on the chilean presidential election in 2017. HCI (13), 311\u2013323 (2019)","DOI":"10.1007\/978-3-030-21902-4_22"},{"issue":"1","key":"29_CR4","doi-asserted-by":"publisher","first-page":"55","DOI":"10.1108\/17440081211222591","volume":"8","author":"M Mendoza","year":"2012","unstructured":"Mendoza, M.: A new term-weighting scheme for na\u00efve Bayes text categorization. Int. J. Web Inf. Syst. 8(1), 55\u201372 (2012)","journal-title":"Int. J. Web Inf. Syst."},{"key":"29_CR5","volume-title":"Luke Zettlemoyer","author":"A Aghajanyan","year":"2021","unstructured":"Aghajanyan, A., Shrivastava, A., Gupta, A., Goyal, N.: Luke Zettlemoyer. Better Fine-Tuning by Reducing Representational Collapse. ICLR, Sonal Gupta (2021)"},{"key":"29_CR6","doi-asserted-by":"crossref","unstructured":"Paranjape, B., Michael, J., Ghazvininejad, M., Hajishirzi, H., Zettlemoyer, L.: Prompting contrastive explanations for commonsense reasoning tasks. ACL\/IJCNLP (Findings), 4179\u20134192 (2021)","DOI":"10.18653\/v1\/2021.findings-acl.366"},{"key":"29_CR7","doi-asserted-by":"crossref","unstructured":"Tampe, I., Mendoza, M., Milios, E.: Neural abstractive unsupervised summarization of online news discussions. IntelliSys (2), 822\u2013841 (2021)","DOI":"10.1007\/978-3-030-82196-8_60"},{"key":"29_CR8","doi-asserted-by":"crossref","unstructured":"Mendoza, M., Tesconi, M., Cresci, S.: Bots in social and interaction networks: detection and impact estimation. ACM Trans. Inf. Syst. 39(1), 5:1\u20135:32 (2020)","DOI":"10.1145\/3419369"},{"key":"29_CR9","doi-asserted-by":"crossref","unstructured":"Ulloa, G., Veloz, A., Allende-Cid, H., Monge, R., Allende, H.: Efficient methodology based on convolutional neural networks with augmented penalization on hard-to-classify boundary voxels on the task of brain lesion segmentation. MCPR, 338\u2013347 (2022)","DOI":"10.1007\/978-3-031-07750-0_31"},{"key":"29_CR10","doi-asserted-by":"crossref","unstructured":"Molina, G., et al.: A new content-based image retrieval system for SARS-CoV-2 computer-aided diagnosis. MICAD, 316\u2013324 (2021)","DOI":"10.1007\/978-981-16-3880-0_33"},{"key":"29_CR11","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: ICML, pp. 8748\u20138763 (2021)"},{"key":"29_CR12","unstructured":"Ramesh, A., Pavlov, M., Goh, G., Gray, S., Voss, C., Radford, A.: Mark Chen, pp. 8821\u20138831. Zero-Shot Text-to-Image Generation. ICML, Ilya Sutskever (2021)"},{"issue":"6","key":"29_CR13","doi-asserted-by":"publisher","first-page":"890","DOI":"10.1109\/TRA.2002.805646","volume":"18","author":"D Mery","year":"2002","unstructured":"Mery, D., Filbert, D.: Automated flaw detection in aluminum castings based on the tracking of potential defects in a radioscopic image sequence. IEEE Trans. Robot. Autom. 18(6), 890\u2013901 (2002)","journal-title":"IEEE Trans. Robot. Autom."},{"issue":"13","key":"29_CR14","doi-asserted-by":"publisher","first-page":"7803","DOI":"10.1007\/s00521-020-05521-2","volume":"33","author":"D Saavedra","year":"2021","unstructured":"Saavedra, D., Banerjee, S., Mery, D.: Detection of threat objects in baggage inspection with X-ray images using deep learning. Neural Comput. Appl. 33(13), 7803\u20137819 (2021)","journal-title":"Neural Comput. Appl."},{"key":"29_CR15","unstructured":"Duan, Y., Andrychowicz, M., Stadie, B.C., Ho, J., Schneider, J., Sutskever, I.: Pieter Abbeel, pp. 1087\u20131098. One-Shot Imitation Learning. NIPS, Wojciech Zaremba (2017)"},{"key":"29_CR16","unstructured":"Nichol, A.Q., et al.: GLIDE: towards photorealistic image generation and editing with text-guided diffusion models. In: ICML, pp. 16784\u201316804 (2022)"},{"issue":"4","key":"29_CR17","doi-asserted-by":"publisher","first-page":"307","DOI":"10.1561\/2200000056","volume":"12","author":"P Diederik","year":"2019","unstructured":"Diederik, P.: Kingma, max welling: an introduction to variational autoencoders. Found. Trends Mach. Learn. 12(4), 307\u2013392 (2019)","journal-title":"Found. Trends Mach. Learn."},{"key":"29_CR18","unstructured":"Ian, J. et al.: Generative adversarial nets. In: NIPS, pp. 2672\u20132680 (2014)"},{"key":"29_CR19","doi-asserted-by":"crossref","unstructured":"Zhu, J.-Y., Park, T., Isola, P., Alexei A.: EFROS: unpaired image-to-image translation using cycle-consistent adversarial networks. In: ICCV, pp. 2242\u20132251 (2017)","DOI":"10.1109\/ICCV.2017.244"},{"key":"29_CR20","unstructured":"Radford, A., Metz, L., Chintala, S.: Unsupervised representation learning with deep convolutional generative adversarial networks. In: ICLR (Poster) (2016)"},{"key":"29_CR21","doi-asserted-by":"crossref","unstructured":"Jiang, Y., et al.: SimGAN: hybrid simulator identification for domain adaptation via adversarial reinforcement learning. In: ICRA, pp. 2884\u20132890 (2021)","DOI":"10.1109\/ICRA48506.2021.9561731"},{"key":"29_CR22","doi-asserted-by":"crossref","unstructured":"Gatys, L.A., Ecker, A.S., Bethge, M., Hertzmann, A., Shechtman, E.: Controlling perceptual factors in neural style transfer. In: CVPR, pp. 3730\u20133738 (2017)","DOI":"10.1109\/CVPR.2017.397"},{"issue":"1","key":"29_CR23","doi-asserted-by":"publisher","first-page":"155","DOI":"10.1162\/coli_a_00426","volume":"48","author":"D Jin","year":"2022","unstructured":"Jin, D., Jin, Z., Zhiting, H., Vechtomova, O., Mihalcea, R.: Deep learning for text style transfer: a survey. Comput. Linguist. 48(1), 155\u2013205 (2022)","journal-title":"Comput. Linguist."},{"key":"29_CR24","doi-asserted-by":"crossref","unstructured":"Garcia, N., Vogiatzis, G.: How to read paintings: semantic art understanding with multi-modal retrieval. ECCV Workshops (2), 676\u2013691 (2018)","DOI":"10.1007\/978-3-030-11012-3_52"},{"key":"29_CR25","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"29_CR26","doi-asserted-by":"crossref","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: ICLR (2015)","DOI":"10.1109\/ICCV.2015.314"}],"container-title":["Lecture Notes in Computer Science","Culture and Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-34732-0_29","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,23]],"date-time":"2024-10-23T23:58:15Z","timestamp":1729727895000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-34732-0_29"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031347313","9783031347320"],"references-count":26,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-34732-0_29","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"9 July 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"HCII","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Human-Computer Interaction","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Copenhagen","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Denmark","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 July 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 July 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"hcii2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/2023.hci.international\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMS","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7472","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1578","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"396","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"21% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}