{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T19:17:35Z","timestamp":1743103055861,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":26,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819947416"},{"type":"electronic","value":"9789819947423"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-981-99-4742-3_13","type":"book-chapter","created":{"date-parts":[[2023,7,30]],"date-time":"2023-07-30T00:02:38Z","timestamp":1690675358000},"page":"162-173","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Make Active Attention More Active: Using Lipschitz Regularity to Improve Long Sequence Time-Series Forecasting"],"prefix":"10.1007","author":[{"given":"Xiangxu","family":"Meng","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0998-5435","authenticated-orcid":false,"given":"Wei","family":"Li","sequence":"additional","affiliation":[]},{"given":"Wenqi","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Zheng","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Guangsheng","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Huiqiang","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,7,30]]},"reference":[{"key":"13_CR1","doi-asserted-by":"crossref","unstructured":"Ariyo, A.A., Adewumi, A.O., Ayo, C.K.: Stock price prediction using the arima model. In: 2014 UKSim-AMSS 16th International Conference on Computer Modelling and Simulation, pp. 106\u2013112. IEEE (2014)","DOI":"10.1109\/UKSim.2014.67"},{"key":"13_CR2","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv preprint arXiv:1607.06450 (2016)"},{"key":"13_CR3","unstructured":"Bahdanau, D., Cho, K., Bengio, Y.: Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473 (2014)"},{"key":"13_CR4","doi-asserted-by":"publisher","first-page":"29","DOI":"10.1007\/978-3-030-16145-3_3","volume-title":"Advances in Knowledge Discovery and Data Mining","author":"L Bai","year":"2019","unstructured":"Bai, L., Yao, L., Kanhere, S.S., Yang, Z., Chu, J., Wang, X.: Passenger demand forecasting with multi-task convolutional recurrent neural networks. In: Yang, Q., Zhou, Z.-H., Gong, Z., Zhang, M.-L., Huang, S.-J. (eds.) Advances in Knowledge Discovery and Data Mining, pp. 29\u201342. Springer International Publishing, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-16145-3_3"},{"key":"13_CR5","first-page":"17766","volume":"33","author":"D Cao","year":"2020","unstructured":"Cao, D., et al.: Spectral temporal graph neural network for multivariate timeseries forecasting. Adv. Neural. Inf. Process. Syst. 33, 17766\u201317778 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"13_CR6","unstructured":"Child, R., Gray, S., Radford, A., Sutskever, I.: Generating long sequences with sparse transformers. arXiv preprint arXiv:1904.10509 (2019)"},{"key":"13_CR7","doi-asserted-by":"crossref","unstructured":"Cirstea, R.G., Guo, C., Yang, B., Kieu, T., Dong, X., Pan, S.: Triformer: Triangular, variable-specific attentions for long sequence multivariate time series forecasting\u2013full version. arXiv preprint arXiv:2204.13767 (2022)","DOI":"10.24963\/ijcai.2022\/277"},{"key":"13_CR8","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805(2018)"},{"key":"13_CR9","unstructured":"Dong, Y., Cordonnier, J.B., Loukas, A.: Attention is not all you need: Pure attention loses rank doubly exponentially with depth. In: International Conference on Machine Learning, pp. 2793\u20132803. PMLR (2021)"},{"key":"13_CR10","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"13_CR11","unstructured":"Han, K., Wang, Y., Guo, J., Tang, Y., Wu, E.: Vision gnn: An image is worth graph of nodes. arXiv preprint arXiv:2206.00272 (2022)"},{"key":"13_CR12","first-page":"15908","volume":"34","author":"K Han","year":"2021","unstructured":"Han, K., Xiao, A., Wu, E., Guo, J., Xu, C., Wang, Y.: Transformer in transformer. Adv. Neural. Inf. Process. Syst. 34, 15908\u201315919 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"13_CR13","unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: Accelerating deep network training by reducing internal covariate shift. In: International Conference on Machine Learning, pp. 448\u2013456. PMLR (2015)"},{"key":"13_CR14","unstructured":"Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"key":"13_CR15","unstructured":"Kitaev, N., Kaiser, \u0141., Levskaya, A.: Reformer: The efficient transformer. arXiv preprint arXiv:2001.04451 (2020)"},{"key":"13_CR16","doi-asserted-by":"crossref","unstructured":"Li, G., Muller, M., Thabet, A., Ghanem, B.: DeepGCNs: can GCNS go as deep as CNNS? In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9267\u20139276 (2019)","DOI":"10.1109\/ICCV.2019.00936"},{"key":"13_CR17","unstructured":"Li, S., et al.: Enhancing the locality and breaking the memory bottleneck of transformer on time series forecasting. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"issue":"3","key":"13_CR18","doi-asserted-by":"publisher","first-page":"1181","DOI":"10.1016\/j.ijforecast.2019.07.001","volume":"36","author":"D Salinas","year":"2020","unstructured":"Salinas, D., Flunkert, V., Gasthaus, J., Januschowski, T.: Deepar: probabilistic forecasting with autoregressive recurrent networks. Int. J. Forecast. 36(3), 1181\u20131191 (2020)","journal-title":"Int. J. Forecast."},{"key":"13_CR19","doi-asserted-by":"publisher","first-page":"362","DOI":"10.1007\/978-3-030-04167-0_33","volume-title":"Neural Information Processing","author":"Y Seo","year":"2018","unstructured":"Seo, Y., Defferrard, M., Vandergheynst, P., Bresson, X.: Structured sequence modeling with graph convolutional recurrent networks. In: Cheng, L., Leung, A.C.S., Ozawa, S. (eds.) Neural Information Processing. LNCS, vol. 11301, pp. 362\u2013373. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-04167-0_33"},{"key":"13_CR20","doi-asserted-by":"crossref","unstructured":"Shaw, P., Uszkoreit, J., Vaswani, A.: Self-attention with relative position representations. arXiv preprint arXiv:1803.02155 (2018)","DOI":"10.18653\/v1\/N18-2074"},{"issue":"1","key":"13_CR21","doi-asserted-by":"publisher","first-page":"37","DOI":"10.1080\/00031305.2017.1380080","volume":"72","author":"SJ Taylor","year":"2018","unstructured":"Taylor, S.J., Letham, B.: Forecasting at scale. Am. Stat. 72(1), 37\u201345 (2018)","journal-title":"Am. Stat."},{"key":"13_CR22","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"13_CR23","unstructured":"Virmaux, A., Scaman, K.: Lipschitz regularity of deep neural networks: analysis and efficient estimation. In: Advances in Neural Information Processing Systems, vol. 31 (2018)"},{"key":"13_CR24","unstructured":"Wang, S., Li, B.Z., Khabsa, M., Fang, H., Ma, H.: Linformer: Self-attention with linear complexity. arXiv preprint arXiv:2006.04768 (2020)"},{"key":"13_CR25","unstructured":"Wen, Q., et al.: Transformers in time series: a survey. arXiv preprint arXiv:2202.07125 (2022)"},{"issue":"12","key":"13_CR26","doi-asserted-by":"publisher","first-page":"11106","DOI":"10.1609\/aaai.v35i12.17325","volume":"35","author":"H Zhou","year":"2021","unstructured":"Zhou, H., et al.: Informer: beyond efficient transformer for long sequence time-series forecasting. Proc. AAAI Conf. Artif. Intell. 35(12), 11106\u201311115 (2021). https:\/\/doi.org\/10.1609\/aaai.v35i12.17325","journal-title":"Proc. AAAI Conf. Artif. Intell."}],"container-title":["Lecture Notes in Computer Science","Advanced Intelligent Computing Technology and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-4742-3_13","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T23:24:14Z","timestamp":1690932254000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-4742-3_13"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9789819947416","9789819947423"],"references-count":26,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-4742-3_13","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"30 July 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Intelligent Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Zhengzhou","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"10 August 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"13 August 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"19","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icic2023a","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.ic-icc.cn\/2023\/index.htm","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}