{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,15]],"date-time":"2026-01-15T08:09:56Z","timestamp":1768464596532,"version":"3.49.0"},"publisher-location":"Singapore","reference-count":11,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819615308","type":"print"},{"value":"9789819615315","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-96-1531-5_17","type":"book-chapter","created":{"date-parts":[[2025,2,7]],"date-time":"2025-02-07T14:18:32Z","timestamp":1738937912000},"page":"171-179","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["From\u00a0Vision to Vocabulary: A Multimodal Approach to Detect and Track Black Cattle Behaviors"],"prefix":"10.1007","author":[{"given":"Su Myat","family":"Noe","sequence":"first","affiliation":[]},{"given":"Thi Thi","family":"Zin","sequence":"additional","affiliation":[]},{"given":"Pyke","family":"Tin","sequence":"additional","affiliation":[]},{"given":"Ikuo","family":"Kobayashi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,8]]},"reference":[{"key":"17_CR1","unstructured":"Naveed, H.: A comprehensive overview of large language models. arXiv preprint arXiv:2307.06435 (2023)"},{"key":"17_CR2","first-page":"23716","volume":"35","author":"JB Alayrac","year":"2022","unstructured":"Alayrac, J.B., et al.: Flamingo: a visual language model for few-shot learning. Adv. Neural. Inf. Process. Syst. 35, 23716\u201323736 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"17_CR3","doi-asserted-by":"crossref","unstructured":"Zhang, H., Li, X., Bing, L.: Video-llama: an instruction-tuned audio-visual language model for video understanding. arXiv preprint arXiv:2306.02858 (2023)","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"issue":"1","key":"17_CR4","doi-asserted-by":"publisher","first-page":"532","DOI":"10.3390\/s23010532","volume":"23","author":"S Myat Noe","year":"2023","unstructured":"Myat Noe, S., Zin, T.T., Tin, P., Kobayashi, I.: Comparing state-of-the-art deep learning algorithms for the automated detection and tracking of black cattle. Sensors 23(1), 532 (2023)","journal-title":"Sensors"},{"key":"17_CR5","doi-asserted-by":"crossref","unstructured":"Wang, C.Y., Yeh, I.H., Liao, H.Y.M.: YOLOv9: learning What You Want to Learn Using Programmable Gradient Information. arXiv preprint arXiv:2402.13616 (2024)","DOI":"10.1007\/978-3-031-72751-1_1"},{"key":"17_CR6","doi-asserted-by":"crossref","unstructured":"Maggiolino, G., Ahmad, A., Cao, J., Kitani, K.: Deep oc-sort: multi-pedestrian tracking by adaptive re-identification. In: 2023 IEEE International Conference on Image Processing (ICIP), pp. 3025\u20133029. IEEE, New York (2023)","DOI":"10.1109\/ICIP49359.2023.10222576"},{"issue":"1","key":"17_CR7","first-page":"211","volume":"18","author":"SM Noe","year":"2022","unstructured":"Noe, S.M., Zin, T.T., Tin, P., Kobayashi, I.: Automatic detection and tracking of mounting behavior in cattle using a deep learning-based instance segmentation model. Int. J. Innov. Comput. Inf. Control 18(1), 211\u2013220 (2022)","journal-title":"Int. J. Innov. Comput. Inf. Control"},{"key":"17_CR8","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual Instruction Tuning. arXiv preprint arXiv:2304.08485 (2023)"},{"key":"17_CR9","doi-asserted-by":"publisher","unstructured":"Wei, H.: Vary: Scaling up the Vision Vocabulary for Large Vision-Language Models. arXiv preprint arXiv:2312.06109 (2023). https:\/\/doi.org\/10.48550\/arXiv.2312.06109","DOI":"10.48550\/arXiv.2312.06109"},{"key":"17_CR10","unstructured":"Zhang, H.: Open-Vocabulary Animal Keypoint Detection with Semantic-feature Matching. arXiv preprint \narXiv:2310.05056v3\n [cs.CV]\n 11 Dec 2023"},{"key":"17_CR11","doi-asserted-by":"crossref","unstructured":"Sun, M., Zhao, Z., Chai, W., Luo, H., Cao, S., Zhang, Y.: UniAP: towards Universal Animal Perception in Vision via Few-Shot Learning. In: The Thirty-Eighth AAAI Conference on Artificial Intelligence (AAAI 2024)","DOI":"10.1609\/aaai.v38i5.28305"}],"container-title":["Lecture Notes in Electrical Engineering","Genetic and Evolutionary Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-96-1531-5_17","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,7]],"date-time":"2025-02-07T14:18:42Z","timestamp":1738937922000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-96-1531-5_17"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9789819615308","9789819615315"],"references-count":11,"URL":"https:\/\/doi.org\/10.1007\/978-981-96-1531-5_17","relation":{},"ISSN":["1876-1100","1876-1119"],"issn-type":[{"value":"1876-1100","type":"print"},{"value":"1876-1119","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"8 February 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICGEC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Genetic and Evolutionary Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Miyazaki","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Japan","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 August 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icgec2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icgec24.github.io\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}