<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Environ. Sci.</journal-id>
<journal-title>Frontiers in Environmental Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Environ. Sci.</abbrev-journal-title>
<issn pub-type="epub">2296-665X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1187201</article-id>
<article-id pub-id-type="doi">10.3389/fenvs.2023.1187201</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Environmental Science</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Industrial steam consumption analysis and prediction based on multi-source sensing data for sustainable energy development</article-title>
<alt-title alt-title-type="left-running-head">Yang et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fenvs.2023.1187201">10.3389/fenvs.2023.1187201</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Yang</surname>
<given-names>Mingxia</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2246535/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Xu</surname>
<given-names>Xiaojie</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Cheng</surname>
<given-names>Huayan</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhan</surname>
<given-names>Zhidan</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Xu</surname>
<given-names>Zhongshen</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Tong</surname>
<given-names>Lianghuai</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Fang</surname>
<given-names>Kai</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Ahmed</surname>
<given-names>Ahmedin M.</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1735855/overview"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>College of Electrical and Information Engineering</institution>, <institution>Quzhou University</institution>, <addr-line>Quzhou</addr-line>, <addr-line>Zhejiang</addr-line>, <country>China</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Quzhou Academy of Metrology and Quality Inspection</institution>, <addr-line>Quzhou</addr-line>, <addr-line>Zhejiang</addr-line>, <country>China</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Macau University of Science and Technology</institution>, <addr-line>Macau</addr-line>, <country>China</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>Florida International University</institution>, <addr-line>Miami</addr-line>, <addr-line>FL</addr-line>, <country>United States</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/60058/overview">Bryan M. Wong</ext-link>, University of California, Riverside, United States</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2277485/overview">Zulfikhar Ali</ext-link>, University of California, Riverside, United States</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1008964/overview">Qiang Wang</ext-link>, China University of Petroleum, Huadong, China</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Mingxia Yang, <email>37049@qzc.edu.cn</email>; Xiaojie Xu, <email>csccpv@163.com</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>02</day>
<month>06</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>11</volume>
<elocation-id>1187201</elocation-id>
<history>
<date date-type="received">
<day>21</day>
<month>03</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>09</day>
<month>05</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2023 Yang, Xu, Cheng, Zhan, Xu, Tong, Fang and Ahmed.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Yang, Xu, Cheng, Zhan, Xu, Tong, Fang and Ahmed</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Centralized heating is an energy-saving and environmentally friendly way that is strongly promoted by the state. It can improve energy utilization and reduce carbon emissions. However, Centralized heating depends on accurate heat demand forecasting. On the one hand, it is impossible to save energy if over producing, while on the other hand, it is impossible to meet the heat demand of enterprises if there is not enough capacity. Therefore, it is necessary to forecast the future trend of heat consumption, so as to provide a reliable basis for enterprises to reasonably deploy fuel stocks and boiler power. At the same time, it is also necessary to analyze and monitor the steam consumption of enterprises for abnormalities in order to monitor pipeline leakage and enterprise gas theft. Due to the nonlinear characteristics of heat load, it is difficult for traditional forecasting methods to capture data trend. Therefore, it is necessary to study the characteristics of heat loads and explore suitable heat load prediction models. In this paper, industrial steam consumption of a paper manufacturer is used as an example, and steam consumption data are periodically analyzed to study its time series characteristics; then steam consumption prediction models are established based on ARIMA model and LSTM neural network, respectively. The prediction work was carried out in minutes and hours, respectively. The experimental results show that the LSTM neural network has greater advantages in this steam consumption load prediction and can meet the needs of heat load prediction.</p>
</abstract>
<kwd-group>
<kwd>industrial steam consumption</kwd>
<kwd>Centralized heating</kwd>
<kwd>heat load prediction</kwd>
<kwd>sensing data</kwd>
<kwd>LSTM</kwd>
</kwd-group>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Environmental Informatics and Remote Sensing</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>While generating electricity, the cogeneration plant also uses the steam extraction or exhaust steam from the turbine to supply heat to the customers. Due to the large scale of heat supply, large boilers with high parameters and high efficiency can be used. Compared with decentralized heat supply, energy utilization efficiency is greatly improved, fuel is saved, and emissions are reduced. Therefore, centralized heat supply is an energy-saving and environmentally friendly way that is strongly promoted by the state. However, due to the ineffective energy use of thermoelectricity, the heat loss is great. On the one hand, energy cannot be saved if there is overproduction, while on the other hand, heat demand of enterprises cannot be met if there is not enough capacity. Therefore, accurate steam consumption prediction becomes an important issue.</p>
<p>Load forecasts for thermoelectric company can usually be divided into four categories based on the length of the forecast: long-term forecasts, medium-term forecasts, short-term forecasts and ultra-short-term forecasting (<xref ref-type="bibr" rid="B5">Du et al., 2019</xref>; <xref ref-type="bibr" rid="B17">Li et al., 2020</xref>). Short-term forecasting refers to forecasting data for one to a few days in the future and is the focus of this paper (<xref ref-type="bibr" rid="B16">L&#xe4;ngkvist et al., 2014</xref>). In thermoelectric load forecasting, classical methods include regression analysis (<xref ref-type="bibr" rid="B23">Qing et al., 2013</xref>), time series methods, mathematical and statistical methods such as Kalman filtering (<xref ref-type="bibr" rid="B4">Dong et al., 2015</xref>). Machine learning was gradually introduced into short-term load forecasting (<xref ref-type="bibr" rid="B10">Greff et al., 2016</xref>; <xref ref-type="bibr" rid="B9">Geysen et al., 2018</xref>), such as expert systems (<xref ref-type="bibr" rid="B3">Chen et al., 1991</xref>), fuzzy forecasting (<xref ref-type="bibr" rid="B13">Jovi&#x107;, 2021</xref>), wavelet analysis (<xref ref-type="bibr" rid="B14">Kumbinarasaiah et al., 2023</xref>), chaos theory (<xref ref-type="bibr" rid="B1">Al-Shammari et al., 2016</xref>), support vector machines (<xref ref-type="bibr" rid="B15">Kuzishchin and Ismatkhodzhaev, 2020</xref>; <xref ref-type="bibr" rid="B24">Razzak et al., 2020</xref>), cluster analysis models (<xref ref-type="bibr" rid="B18">Liu et al., 2020</xref>) and artificial neural networks (<xref ref-type="bibr" rid="B19">Mao et al., 2021</xref>; <xref ref-type="bibr" rid="B30">Wang et al., 2022a</xref>; <xref ref-type="bibr" rid="B29">Wang et al., 2022b</xref>; <xref ref-type="bibr" rid="B32">Yang et al., 2022</xref>).</p>
<p>Potocnik (<xref ref-type="bibr" rid="B22">Poto&#x10d;nik et al., 2014</xref>) investigated static and adaptive models for short-term natural gas load forecasting, constructing linear models, neural network models and support vector machine regression models. Forecasts of gas consumption by individual customers and local gas companies show that the adaptive model has better forecasting performance. Ervural et al. (<xref ref-type="bibr" rid="B7">Ervural et al., 2016</xref>) developed a combined forecasting model based on MA and ARMA, in which a genetic algorithm was used to determine the <italic>p</italic>, <italic>q</italic> values in ARMA (<italic>p</italic>, <italic>q</italic>). The single model and the combined model were used to forecast natural gas consumption in Turkey, and the results showed that the combined model had a higher prediction accuracy. Beyca (<xref ref-type="bibr" rid="B2">Beyca et al., 2019</xref>) forecasted natural gas consumption in Istanbul, Turkey. Multiple linear regression (MLR), artificial neural network (ANN) and support vector regression (SVR) were used in the study, and the results showed that SVR had the lowest forecasting error.</p>
<p>Yu et al. (<xref ref-type="bibr" rid="B33">Yu and Xu, 2014</xref>) improved the traditional BP neural network, increased the adaptive learning rate of the BP neural network, and applied a genetic algorithm to optimally determine the initial weights and thresholds of the BP neural network, and proposed the BPNN-GA natural gas load forecasting model. The model takes into account the effects of maximum temperature, minimum temperature, average temperature, date type and weather conditions, and predicts the natural gas load in Shanghai. The experimental results show that the MAPE value of the BPNN-GA model is 4.59%, and the optimized combined model has better prediction results.</p>
<p>The goal of deep learning is to stack multiple modules together to form deep net-works in order to create more expressive models that can learn more abstract representations of data and achieve better learning performance (<xref ref-type="bibr" rid="B20">Muzaffar and Afshari, 2019</xref>; <xref ref-type="bibr" rid="B17">Li et al., 2020</xref>). As a type of deep learning neural network, recurrent neural networks (RNN) rely on their own hidden layer recurrent structure to capture temporal correlations between data well and have been widely used in various time series prediction problems. However, RNNs are prone to gradient disappearance when training network parameters, and therefore cannot handle long-term dependence between data (<xref ref-type="bibr" rid="B27">Wang et al., 2018</xref>; <xref ref-type="bibr" rid="B26">Wang et al., 2019</xref>; <xref ref-type="bibr" rid="B28">Wang and Song, 2019</xref>). Currently, several RNN architectures have been derived to solve the gradient disappearance problem, including gate architectures, cross-timescale connections, initialization constraints and regularization methods, etc. The most influential of these are the gate architectures represented by the LSTM (<xref ref-type="bibr" rid="B12">Hochreiter and Schmidhuber, 1997</xref>).</p>
<p>Pang et al. (<xref ref-type="bibr" rid="B21">Pang et al., 2021</xref>) integrated the historical load and various load influencing factors to build a load prediction model. Using the feature extraction capability of neural networks and the temporal memory capability of LSTM, the long-term change pattern of load and the non-linear influence of various influencing factors on load are identified, and the load prediction performance of different historical time windows and different network architectures are verified based on actual load data. Zhuang et al. (<xref ref-type="bibr" rid="B34">Zhuang et al., 2020</xref>) studied and analyzed various popular RNN architectures, and designs a cross-time scale sub-modular recurrent neural network architecture by combining the Zoneout technique, focusing on the random update strategy of the hidden layer modules, which effectively solves the RNN gradient disappearance problem and substantially reduces the network parameters to be trained (<xref ref-type="bibr" rid="B8">Fang et al., 2022</xref>; <xref ref-type="bibr" rid="B31">Wang et al., 2020</xref>; <xref ref-type="bibr" rid="B25">Shen et al., 2022</xref>; <xref ref-type="bibr" rid="B6">Duan et al., 2022</xref>; <xref ref-type="bibr" rid="B11">He et al., 2021</xref>).</p>
<p>The data in this paper are derived from the actual steam consumption data of a local paper mill from 2020 to 2021. Firstly, the steam consumption data of this enterprise for the past 2&#xa0;years were pre-processed for annual and monthly visualization analysis; then, the time series characteristics of the steam consumption load data were fully investigated; finally, to investigate the forecasting of steam consumption and optimal control methods, forecasting models were established based on the differential autoregressive moving average model (ARIMA) and the long and short-term memory model (LSTM), respectively. The objectives are: i) to supply heat on demand, save energy and reduce emissions, conserve resources and protect the environment; ii) to understand the habits and conditions of enterprises and adjust steam supply in time; and iii) to reduce energy consumption and operating costs and improve the steam revenue of thermoelectric company.</p>
</sec>
<sec id="s2">
<title>2 Data sample description and pre-processing</title>
<sec id="s2-1">
<title>2.1 Data sources</title>
<p>The data in this paper is real data from a local thermoelectric company, which provides heat and gas to hundreds of companies. In this paper, the industrial steam consumption of a paper company is selected for analysis and forecasting. The table information records some basic information on the amount of steam used by this paper company, with an original sampling interval of 1&#xa0;min, describing the company&#x2019;s industrial steam consumption over the last 2&#xa0;years.</p>
<p>In order to facilitate analysis of steam consumption, prevent gas theft and gas leakage, as well as guide the production of the company, the total daily steam consumption and the average steam consumption per hour were chosen to represent the data characteristics.</p>
</sec>
<sec id="s2-2">
<title>2.2 Data pre-processing</title>
<p>Common problems in time series data are unordered time stamps, missing values, outliers and noise in the data, and we will deal with each of these below.<list list-type="simple">
<list-item>
<p>(1) Null and outlier handling. Null and outlier values were identified and removed during the data collation. Missing values were deleted, and duplicates were removed. Since steam consumption is always positive, there were no cases where steam consumption was zero. Therefore, outlier values were not present.</p>
</list-item>
<list-item>
<p>(2) Set the time index. The date time column has a default string data type and must first be converted to a datetime data type. Because this paper is analyzed by time series, so the index of DataFrame must be time type. This paper selects <italic>recTime</italic> as the index of data.</p>
</list-item>
<list-item>
<p>(3) Normalization. Normalization was performed to improve model training accuracy and convergence speed. The normalization method used is detailed in Eq. <xref ref-type="disp-formula" rid="e1">1</xref>.</p>
</list-item>
</list>
<disp-formula id="e1">
<mml:math id="m1">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mrow>
<mml:mi>n</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>min</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>max</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>min</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>
</p>
<p>Where: <italic>x</italic>
<sub>
<italic>norm</italic>
</sub> denotes the normalized data; <italic>x</italic> denotes the original data; <italic>x</italic>
<sub>min</sub> denotes the minimum value of the sample data; <italic>x</italic>
<sub>max</sub> denotes the maximum value of the sample data.</p>
</sec>
<sec id="s2-3">
<title>2.3 Data visualization and analysis</title>
<p>Visual presentation includes: data import, time series generation, data down-sampling, etc.</p>
<p>Data of 2020 and 2021 are stored in excel and now need to be exported from 12 excel files to a python environment to be presented as a data-frame. Due to the amount of data and the repetitive nature of the operation, a programmatic loop is used to import.</p>
<p>As the data is recorded once a minute, when visualizing the data, the display in minutes is not only extensive but also too microscopic, so it is necessary to down sample the data. A visual analysis was then carried out by matplotlib, and the average monthly steam consumption in 2020 is shown in <xref ref-type="fig" rid="F1">Figure 1</xref>.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Average monthly steam consumption statistics for 2020.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g001.tif"/>
</fig>
<p>To present the data as a Python data frame, data from 12 Excel files from 2020 to 2021 were programmatically looped. To down-sample the data, monthly steam consumption was visualized using <xref ref-type="fig" rid="F1">Figure 1</xref>. The company&#x2019;s daily steam consumption was about 0.37 tons and remained stable in all months except for February, May, and December, with the most significant fluctuations in February due to low consumption during the New Year holidays.</p>
</sec>
</sec>
<sec id="s3">
<title>3 Steam consumption prediction</title>
<p>Heat load forecasting, from a large perspective, can reduce energy waste, shrink excess capacity and deepen structural reform on the energy supply side; from a small perspective, by achieving accurate heat supply and heat delivery on demand, it can further achieve energy saving and emission reduction effects, and improve the operational efficiency of the entire heat network.</p>
<p>One need for heat load forecasting is to predict heat consumption data from one to 7&#xa0;days in the future, so as to guide recent production planning. This paper examines the problem of short-term data forecasting. Steam usage varies over time and can be viewed as a set of time series data, so a time series analysis model can be used to analyze and forecast steam usage. The following prediction analysis is performed using a linear regression model and a machine learning model, respectively.</p>
<sec id="s3-1">
<title>3.1 ARIMA model</title>
<sec id="s3-1-1">
<title>3.1.1 ARMA</title>
<p>ARMA (<bold>A</bold>utoregressive <bold>M</bold>oving <bold>A</bold>verage Model) is a combination of autoregressive model (AR) and moving average model (MA).</p>
<sec id="s3-1-1-1">
<title>3.1.1.1 Autoregressive model (AR)</title>
<p>AR uses the variable&#x2019;s own historical values to predict itself by determining the relationship between current and historical values. The model requires the data to have smooth characteristics, and if not smooth, it needs to be differenced, and the number of differences depends on the value of <italic>p</italic>.</p>
<p>
<italic>p</italic> denotes the time interval, e.g., <italic>p</italic> &#x3d; 1 means it is today and yesterday&#x2019;s data, and <italic>p</italic> &#x3d; 2 means today and the previous day&#x2019;s data. <italic>p</italic>-order autoregressive process is defined by the following expression:<disp-formula id="e2">
<mml:math id="m2">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3bc;</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>p</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">&#x3b3;</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>&#x3f5;</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>
</p>
<p>Where <inline-formula id="inf1">
<mml:math id="m3">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the current value, <inline-formula id="inf2">
<mml:math id="m4">
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the constant term, <inline-formula id="inf3">
<mml:math id="m5">
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the order, <inline-formula id="inf4">
<mml:math id="m6">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the autocorrelation coefficient, <inline-formula id="inf5">
<mml:math id="m7">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3f5;</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the error&#x3002;</p>
</sec>
<sec id="s3-1-1-2">
<title>3.1.1.2 Moving Average model (MA)</title>
<p>MA is a linear combination of the data at the current moment being the past <italic>q</italic> order white noise, and is mainly concerned with the accumulation of the error term (<inline-formula id="inf6">
<mml:math id="m8">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3f5;</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> above) in the autoregressive model. Its mean and variance are constant; it can better eliminate the random fluctuations in the prediction and make the error values relatively balanced.</p>
<p>The equation for the q-order autoregressive process of MA is as follows.<disp-formula id="e3">
<mml:math id="m9">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3bc;</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>&#x3f5;</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>q</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">&#x3b8;</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>&#x3f5;</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>
</p>
</sec>
<sec id="s3-1-1-3">
<title>3.1.1.3 Autoregressive moving average model (ARMA)</title>
<p>ARMA (Autoregressive moving average model) is a combination of autoregressive model (AR) and moving average model (MA). The equation for of ARMA is as follows.<disp-formula id="e4">
<mml:math id="m10">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3bc;</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>p</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">&#x3b3;</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>&#x3f5;</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>q</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">&#x3b8;</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>&#x3f5;</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>
</p>
</sec>
</sec>
<sec id="s3-1-2">
<title>3.1.2 ARIMA</title>
<p>ARIMA (Autoregressive Integrated Moving Average Model), also noted as ARIMA (<italic>p</italic>, <italic>d</italic>, <italic>q</italic>), is one of the most common statistical models used for time series forecasting, where AR is &#x201c;autoregressive&#x201d; and <italic>p</italic> is the number of autoregressive terms, <italic>MA</italic> is &#x201c;sliding average&#x201d; and <italic>q</italic> is the number of sliding average terms. ARIMA is based on ARMA with the addition of the differential order <italic>I</italic>. The basic idea is to transform an unstable time series into a stable time series to obtain a stable time series, thus building the model, as shown in <xref ref-type="fig" rid="F2">Figure 2</xref>.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>ARIMA modeling process.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g002.tiff"/>
</fig>
</sec>
<sec id="s3-1-3">
<title>3.1.3 Modeling process</title>
<sec id="s3-1-3-1">
<title>3.1.3.1 Obtain a smooth time series</title>
<p>A smooth time series is obtained. The data for the whole day of 2 January 2021 was used as the training data. The potential requirement for time series autoregressive analysis is that the time series analyzed shall meet the requirement of smoothness. Therefore, the stability of the data needs to be judged first, and if it is not good, it needs to be differenced. For series plotting, ADF test is performed to observe whether the series is smooth or not; for non-smooth time series, d-order differencing is to be performed first to transform into a smooth time series, as shown in <xref ref-type="fig" rid="F3">Figure 3</xref>.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Differential results.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g003.tif"/>
</fig>
<p>From the result graph, it can be seen that both the original data tot_cha and the data diff_1 after differencing the first order do not fluctuate much, within 1.0, and the smoothness is good, so we can consider not to differ the original data and take tot_cha for prediction.</p>
</sec>
<sec id="s3-1-3-2">
<title>3.1.3.2 Calculation of ACF and PACF</title>
<p>the function is evaluated by taking the following two main indicators: i) autocorrelation function ACF. ii) Partial autocorrelation function PACF. The two data sets are not adjacent to each other, i.e., the relationship obtained between <italic>x</italic>(<italic>t</italic>) and <italic>x</italic> (<italic>t</italic>-<italic>k</italic>) is not a pure correlation, and <italic>x</italic>(<italic>t</italic>) will also be affected by the intermediate <italic>k</italic>-1 values, which will produce some bias, which requires the PACF partial correlation function to correct for the correlation between the two and strictly control the correlation between the two variables.</p>
<p>The calculation and visual analysis of the indicators is carried out below. Plotting with the plot_acf and plot_pacf functions indicates the order of the data and the change in autocorrelation to determine the values of <italic>q</italic> and <italic>p</italic>. The determination of <italic>q</italic> and <italic>p</italic> is primarily based on how many orders in the respective plots are truncated after the tail. The truncated tails indicate that the points fall within the confidence interval, which is the shaded area in <xref ref-type="fig" rid="F4">Figure 4</xref>.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Distribution of ACF and PACF parameters.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g004.tif"/>
</fig>
</sec>
<sec id="s3-1-3-3">
<title>3.1.3.3 Parameters calculation</title>
<p>From the above analysis, <italic>d</italic>, <italic>q</italic> and <italic>p</italic> were calculated to obtain the ARIMA model. The model parameters are then estimated, and the residuals and white noise are tested, so that the model was built.</p>
</sec>
</sec>
</sec>
<sec id="s3-2">
<title>3.2 LSTM models</title>
<p>LSTM networks are an improved variant of Recurrent Neural Networks (RNN), retaining the ability of RNN networks to efficiently process time-loaded data and effectively solve the problems of gradient disappearance and gradient explosion. LSTM has the ability of processing no-linear data, and can calculate the dependence between individual observations in a time series.</p>
<sec id="s3-2-1">
<title>3.2.1 RNN</title>
<p>Recurrent Neural Network (RNN) is a class of recursive neural network that takes sequence data as input, recursion in the direction of sequence evolution and all nodes (recurrent units) are connected in a chain, as shown in <xref ref-type="fig" rid="F5">Figure 5</xref>. The RNN network structure has a hidden layer &#x201c;recursion&#x201d; function, which allows the nodes in the hidden layer to be interconnected, thus providing the network with memory capabilities.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Cell structure of RNN.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g005.tif"/>
</fig>
<p>The connection structure of RNN is shown in Fig. The hidden state <italic>s</italic>
<sub>
<italic>t</italic>
</sub> and the model output <italic>o</italic>
<sub>
<italic>t</italic>
</sub> on the moment t are calculated as follows:<disp-formula id="e5">
<mml:math id="m11">
<mml:mrow>
<mml:msub>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>U</mml:mi>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>W</mml:mi>
<mml:msub>
<mml:mi>s</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(5)</label>
</disp-formula>
<disp-formula id="e6">
<mml:math id="m12">
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>max</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>V</mml:mi>
<mml:msub>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>o</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(6)</label>
</disp-formula>where: <italic>x</italic>
<sub>
<italic>t</italic>
</sub> denotes the input at moment t; <inline-formula id="inf7">
<mml:math id="m13">
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> denotes the activation function;<italic>U</italic>, <italic>W</italic>, and <italic>V</italic> denote the weights of the input, hidden, and output layers, respectively; <italic>b</italic>
<sub>
<italic>s</italic>
</sub>, <italic>b</italic>
<sub>
<italic>o</italic>
</sub> denote the hidden and output layer bias parameters, respectively.</p>
<p>When the input sequence is too long, the later features cannot obtain the earlier features, leading to the Long Term Dependency problem. As the number of gradients increases, the gradient disappearance problem will occur. The problem of gradient disappearance is that the weights <italic>w</italic> are hardly updated, so it is difficult to find a suitable weight <italic>w</italic>, to map the relationship between the input and output values.</p>
</sec>
<sec id="s3-2-2">
<title>3.2.2 LSTM</title>
<p>LSTM is a derivative algorithm of RNN, which can obtain better analysis in longer sequences. The most important improvement to the LSTM is the inclusion of cell states, i.e., the inclusion of the LSTM CELL, which passes the hidden and cell states of the previous moment to the next moment through input gates, output gates and forgetting gates.</p>
<p>LSTM controls the discard or addition of information through a &#x201c;gate&#x201d; structure that allows selective passage of information, thus achieving forgetting or remembering. A single LSTM unit has three gates, namely, forget gate, input gate and output gate. The cell structure is shown in <xref ref-type="fig" rid="F6">Figure 6</xref>.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Cell structure of LSTM.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g006.tif"/>
</fig>
<p>The role of the forgetting gate is to forget the scaling of the information at the previous moment <italic>c</italic>
<sub>
<italic>t-1</italic>
</sub>, which is one of the key factors for the network to have the memory function. Eq. <xref ref-type="disp-formula" rid="e7">7</xref> is the formula of the forgetting gate. <inline-formula id="inf8">
<mml:math id="m14">
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> denotes the sigmoid function; <italic>W</italic>
<sub>
<italic>f</italic>
</sub> is the weight of the forgetting gate, <italic>h</italic>
<sub>
<italic>t-1</italic>
</sub> denotes the output of the unit at the previous moment; <italic>x</italic>
<sub>
<italic>t</italic>
</sub> denotes the input at the current moment; <italic>b</italic>
<sub>
<italic>f</italic>
</sub> denotes the bias parameter of the forgetting gate.<disp-formula id="e7">
<mml:math id="m15">
<mml:mrow>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
<mml:mo>&#x22c5;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(7)</label>
</disp-formula>
</p>
<p>The input gate combines the output information of the previous moment with the input information of the current moment to update the cell state. Eq. <xref ref-type="disp-formula" rid="e8">8</xref> is the input gate and Eq. <xref ref-type="disp-formula" rid="e9">9</xref> is the current learned state. Combining the forgetting gate and the input gate, the cell state at the current moment is the sum of the decay of <italic>c</italic>
<sub>
<italic>t-1</italic>
</sub> and <inline-formula id="inf9">
<mml:math id="m16">
<mml:mrow>
<mml:mover accent="true">
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x223c;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula>, represented by Eq. <xref ref-type="disp-formula" rid="e10">10</xref>. <italic>W</italic>
<sub>
<italic>i</italic>
</sub> and <italic>b</italic>
<sub>
<italic>i</italic>
</sub> denote the weights and biases of the input gate; <italic>W</italic>
<sub>
<italic>c</italic>
</sub> and <italic>b</italic>
<sub>
<italic>c</italic>
</sub> when denote the weights and biases of the current cell state; tanh denotes the hyperbolic tangent activation function.<disp-formula id="e8">
<mml:math id="m17">
<mml:mrow>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x22c5;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(8)</label>
</disp-formula>
<disp-formula id="e9">
<mml:math id="m18">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>c</mml:mi>
<mml:mo>&#x223c;</mml:mo>
</mml:mover>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>tanh</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
<mml:mo>&#x22c5;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(9)</label>
</disp-formula>
<disp-formula id="e10">
<mml:math id="m19">
<mml:mrow>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:msub>
<mml:mover accent="true">
<mml:mi>c</mml:mi>
<mml:mo>&#x223c;</mml:mo>
</mml:mover>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(10)</label>
</disp-formula>
</p>
<p>The output gate calculates the output of the current moment based on the current latest state <italic>c</italic>
<sub>
<italic>t</italic>
</sub>, the previous moment cell output <italic>h</italic>
<sub>
<italic>t-1</italic>
</sub>, and the current moment input <italic>x</italic>
<sub>
<italic>t</italic>
</sub>. The final output value of the LSTM model is jointly determined by the output gate and the current moment cell state. Eq. <xref ref-type="disp-formula" rid="e11">11</xref> is the output gate, and Eq. <xref ref-type="disp-formula" rid="e12">12</xref> is the final output value of the model. <italic>W</italic>
<sub>
<italic>o</italic>
</sub> and <italic>b</italic>
<sub>
<italic>o</italic>
</sub> are the weight and bias parameters of the output gate, respectively.<disp-formula id="e11">
<mml:math id="m20">
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>o</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>o</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(11)</label>
</disp-formula>
<disp-formula id="e12">
<mml:math id="m21">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2a;</mml:mo>
<mml:mi>tanh</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(12)</label>
</disp-formula>
</p>
<p>In this paper, LSTM predicts the steam consumption based on the data of previous <italic>n</italic> days. The structure of the LSTM network model is shown in <xref ref-type="fig" rid="F7">Figure 7</xref>.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>LSTM steam consumption prediction model.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g007.tif"/>
</fig>
<p>At time <italic>t</italic>, the input to the LSTM prediction model is the historically predicted steam consumption. where the input is the historical steam consumption at time <italic>t</italic>, which can be expressed as Input &#x3d; {<italic>H</italic>
<sub>
<italic>t,d-n</italic>
</sub> }, and the model output at time <italic>t</italic> can be expressed as Output &#x3d; <italic>h</italic>
<sub>
<italic>t,d</italic>
</sub>.</p>
</sec>
</sec>
<sec id="s3-3">
<title>3.3 Experimental comparison</title>
<sec id="s3-3-1">
<title>3.3.1 Predicted steam consumption per minute</title>
<p>The dataset was selected as steam consumption per minute from January 2 to 9 January 2021, and the first 70% was used as the training set, the second 20% as the test set, and the last 10% as the validation set.</p>
<p>In this paper, the feature variable is steam consumption, so the number of neuron nodes is set to 1. The number of hidden layers <italic>N is</italic> adjusted according to the experimental method, and in this paper, single-layer, two-layer and three-layer LSTM neural networks are built, and the optimal number of layers 3 that makes the best experimental results is selected. The number of neurons in the hidden layer was set to 32 by comparing the results for different values. The number of neurons in the output layer is determined by the target variable and is set to 1.</p>
<p>After training, the number of single training samples batch_size was set to 16; the time step time step was 10; the number of iterations epochs was 140; the dropout to prevent overfitting was 0.2; the loss function was the mean square error; the learning rate was 0.0001; the activation function was the relu function; and the Adam optimizer was set.</p>
<p>The trained model was used to predict future steam consumption. The predicted values obtained based on ARIMA are shown in <xref ref-type="fig" rid="F8">Figure 8</xref>. It can be seen that the predicted trend is relatively smooth, with steam consumption at around 0.65 tons per minute, and future trends can be predicted better.</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>ARIMA prediction results.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g008.tif"/>
</fig>
<p>The prediction results based on LSTM are shown in <xref ref-type="fig" rid="F9">Figures 9</xref>, <xref ref-type="fig" rid="F10">10</xref>, <xref ref-type="fig" rid="F11">11</xref>, from which it can be seen that the predicted values of the LSTM model are very close to the measured values and the fit is good.</p>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption>
<p>LSTM Prediction results.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g009.tif"/>
</fig>
<fig id="F10" position="float">
<label>FIGURE 10</label>
<caption>
<p>LSTM prediction results of March 3.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g010.tif"/>
</fig>
<fig id="F11" position="float">
<label>FIGURE 11</label>
<caption>
<p>LSTM prediction results of March 24.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g011.tif"/>
</fig>
<p>A comparison of the evaluation indicators of the models is shown in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Evaluation indexes of two models.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Model</th>
<th align="left">MAE</th>
<th align="left">MSE</th>
<th align="left">RMSE</th>
<th align="left">MAPE</th>
<th align="left">R2</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">LSTM</td>
<td align="left">0.0059</td>
<td align="left">0.0386</td>
<td align="left">0.0062</td>
<td align="left">0.0125</td>
<td align="left">0.9928</td>
</tr>
<tr>
<td align="left">ARIMA</td>
<td align="left">0.0255</td>
<td align="left">0.0016</td>
<td align="left">0.0401</td>
<td align="left">0.0050</td>
<td align="left">0.6910</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>From the table, it can be seen that the coefficients of determination for the models are: <inline-formula id="inf10">
<mml:math id="m22">
<mml:mrow>
<mml:msubsup>
<mml:mi>R</mml:mi>
<mml:mrow>
<mml:mi>L</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>T</mml:mi>
<mml:mi>M</mml:mi>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.9928</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf11">
<mml:math id="m23">
<mml:mrow>
<mml:msubsup>
<mml:mi>R</mml:mi>
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mi>R</mml:mi>
<mml:mi>I</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.6910</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, where the coefficient of determination of the LSTM model is closer to 1, and the model fit better with the measured values. And the error evaluation index of LSTM model is lower than that of ARIMA, indicating that the accuracy of the LSTM model is higher.</p>
</sec>
<sec id="s3-3-2">
<title>3.3.2 Hourly steam consumption forecast</title>
<p>Steam usage from January to September 2021 was used as the training data. The dataset was downsampled at an hourly sampling frequency, with a total of 6546 rows and 2 columns. 6546 rows refer to 6546&#xa0;h, and the first column in 1 column is the average steam usage at the current time point, and the second column is the actual hourly average steam consumption data at the next time point, which is used as the label set in this experiment. In this experiment, the data set is used as the training data set according to the first 6378&#xa0;h of data and the last 168&#xa0;h of the last 7&#xa0;days as the test data set. <italic>x_train</italic> is the feature data of the first 6378 rows of the feature set, and <italic>y_train</italic> is the data of the first 6378 rows of the label set. <italic>x_test</italic> is the feature data of the last 168 rows of the feature set, and <italic>y_test</italic> is the data of the last 168 rows of the label set. Test is the sample data collected for the last 168 rows of the label set.</p>
<p>After experiments, the specific parameters are set as follows: DropOut is 20%, i.e., 20% of the network nodes are randomly dropped, the learning rate is set to 0.005, the model selects &#x201c;Adam&#x201d; optimizer to process the learning rate, and &#x201c;Mean_squared_error&#x201d; is selected as the loss function. error&#x201d; as the loss function. According to the experimental effect, the training batch is set to 16 and the number of iterations is set to 100.</p>
<p>The trained model is used to predict the hourly steam usage for a particular day in the following period. The test values were selected from the hourly steam usage data for September 23-26 September 2021. The predicted results are shown in <xref ref-type="fig" rid="F12">Figures 12</xref>, <xref ref-type="fig" rid="F13">13</xref>, <xref ref-type="fig" rid="F14">14</xref>, <xref ref-type="fig" rid="F15">15</xref>.</p>
<fig id="F12" position="float">
<label>FIGURE 12</label>
<caption>
<p>LSTM prediction results of September 23.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g012.tif"/>
</fig>
<fig id="F13" position="float">
<label>FIGURE 13</label>
<caption>
<p>LSTM prediction results of September 24.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g013.tif"/>
</fig>
<fig id="F14" position="float">
<label>FIGURE 14</label>
<caption>
<p>LSTM prediction results of September 25.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g014.tif"/>
</fig>
<fig id="F15" position="float">
<label>FIGURE 15</label>
<caption>
<p>LSTM prediction results of September 26.</p>
</caption>
<graphic xlink:href="fenvs-11-1187201-g015.tif"/>
</fig>
<p>As seen from the result, the trend of the predicted value is consistent with the measured value. It further proves that LSTM can better predict the trend of data changes for longer time periods.</p>
<p>Experiments have shown that ARIMA tends to predict more accurate results for data with a clear trend in the series, while LSTM tends to do better on unstable time series with more stationary components. LSTM shows better performance in predicting unstable time series.</p>
</sec>
</sec>
</sec>
<sec id="s4">
<title>4 Summary</title>
<p>Cogeneration plants provide a centralized heat supply method that improves energy efficiency and reduces carbon emissions. In order to provide companies with accurate heat demand to guide production, accurate analysis and forecasting are required. Due to the strong internal correlation, long lag time and non-linear characteristics of heat load, it is difficult for traditional forecasting methods to capture the trend of data changes. Therefore, this paper takes the industrial steam consumption of a paper manufacturer as an example to study the characteristics of heat load consumption and explore a suitable heat load prediction model. The steam consumption prediction models are established based on ARIMA model and LSTM neural network, respectively.</p>
<p>The prediction was performed in minutes and hours, respectively. The results show that ARIMA tends to predict more accurate results on the data when there is a clear trend in the series, while LSTM tends to do better on unstable time series with more stationary components. Its prediction means has a significant improvement compared with traditional machine learning methods, and with the increase of data volume, the method shows its good robustness and the timeliness and accuracy of prediction results. The LSTM neural network has a greater advantage in this steam consumption load prediction, and can meet the needs of heat load prediction. Thus, it can achieve energy saving and emission reduction, improve efficiency and improve the service quality of heat supply.</p>
<p>In this paper, only steam consumption data has been selected as features. Future research could improve the selection of features, take into account factors such as flow rate, pressure and temperature, explore the influence of environmental factors on steam consumption, select a more comprehensive set of influencing factors, and improve the deep learning algorithm, thus improving the accuracy of steam consumption prediction.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s5">
<title>Data availability statement</title>
<p>The data analyzed in this study is subject to the following licenses/restrictions: The data set involves details of industrial production and is not convenient for public disclosure. Other researchers can contact me by email if necessary. Requests to access these datasets should be directed to <email>37049@qzc.edu.cn</email>.</p>
</sec>
<sec id="s6">
<title>Author contributions</title>
<p>Conceptualization, MY; methodology, MY; software, KF; validation, XX and KF; formal analysis, XX; investigation, XX; resources, HC; data curation, HC; writing&#x2014;original draft preparation, ZZ; writing&#x2014;review and editing, ZX; visualization, ZX; supervision, KF; project administration, KF; funding acquisition, MY. All authors listed have made a substantial, direct, and intellectual contribution to the work and approved it for publication.</p>
</sec>
<sec id="s7">
<title>Funding</title>
<p>This research was funded by Zhejiang Provincial Natural Science Foundation of China under grant number LGF20F030003. The Quzhou City Science and Technology Project under grant no. 2022K162. The Zhejiang Provincial Market Supervision Administration Scientific Research Plan Project under grant no. 20200130.</p>
</sec>
<sec sec-type="COI-statement" id="s8">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s9">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Al-Shammari</surname>
<given-names>E. T.</given-names>
</name>
<name>
<surname>Keivani</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Shamshirband</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Mostafaeipour</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Yee</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Petkovi&#x107;</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>Prediction of heat load in district heating systems by Support Vector Machine with Firefly searching algorithm</article-title>. <source>Energy</source> <volume>95</volume>, <fpage>266</fpage>&#x2013;<lpage>273</lpage>. <pub-id pub-id-type="doi">10.1016/j.energy.2015.11.079</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Beyca</surname>
<given-names>O. F.</given-names>
</name>
<name>
<surname>Ervural</surname>
<given-names>B. C.</given-names>
</name>
<name>
<surname>Tatoglu</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Ozuyar</surname>
<given-names>P. G.</given-names>
</name>
<name>
<surname>Zaim</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Using machine learning tools for forecasting natural gas consumption in the province of Istanbul</article-title>. <source>Energy Econ.</source> <volume>80</volume>, <fpage>937</fpage>&#x2013;<lpage>949</lpage>. <pub-id pub-id-type="doi">10.1016/j.eneco.2019.03.006</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>1991</year>). &#x201c;<article-title>An expert system for short-term load forecasting</article-title>,&#x201d; in <conf-name>1991 International Conference on Advances in Power System Control, Operation and Management, APSCOM-91</conf-name>, <conf-loc>Hong Kong</conf-loc>, <conf-date>November 5-8, 1991</conf-date> (<publisher-name>IET</publisher-name>), <fpage>330</fpage>&#x2013;<lpage>334</lpage>.</citation>
</ref>
<ref id="B4">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Dong</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2015</year>). <source>Short-term forecasting of highway capacity through wavelet transform and dynamic neural time series: A stochastic analysis (No. 15-5048)</source>, <conf-name>TRB 94th annual meeting compendium of papers</conf-name>, <conf-loc>Washington, DC</conf-loc>. <comment>
<ext-link ext-link-type="uri" xlink:href="http://trid.org/view/1339004">http://trid.org/view/1339004</ext-link>
</comment>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Du</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Fei</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Malicious data deception attacks against power systems: A new case and its detection method</article-title>. <source>Trans. Inst. Meas. Control</source> <volume>41</volume> (<issue>6</issue>), <fpage>1590</fpage>&#x2013;<lpage>1599</lpage>. <pub-id pub-id-type="doi">10.1177/0142331217740622</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Duan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Bashir</surname>
<given-names>A. K.</given-names>
</name>
<name>
<surname>Alshehri</surname>
<given-names>M. D.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>P.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>A web knowledge-driven multimodal retrieval method in computational social systems: Unsupervised and robust graph convolutional hashing</article-title>. <source>IEEE Trans. Comput. Soc. Syst.</source>, <fpage>1</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1109/tcss.2022.3216621</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ervural</surname>
<given-names>B. C.</given-names>
</name>
<name>
<surname>Beyca</surname>
<given-names>O. F.</given-names>
</name>
<name>
<surname>Zaim</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Model estimation of ARMA using genetic algorithms: A case study of forecasting natural gas consumption</article-title>. <source>Procedia-Social Behav. Sci.</source> <volume>235</volume>, <fpage>537</fpage>&#x2013;<lpage>545</lpage>. <pub-id pub-id-type="doi">10.1016/j.sbspro.2016.11.066</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Miao</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Pan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Detection of weak electromagnetic interference attacks based on fingerprint in IIoT systems</article-title>. <source>Future Gener. Comput. Syst.</source> <volume>126</volume>, <fpage>295</fpage>&#x2013;<lpage>304</lpage>. <pub-id pub-id-type="doi">10.1016/j.future.2021.08.020</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Geysen</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>De Somer</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Johansson</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Brage</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Vanhoudt</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Operational thermal load forecasting in district heating networks using machine learning and expert advice</article-title>. <source>Energy Build.</source> <volume>162</volume>, <fpage>144</fpage>&#x2013;<lpage>153</lpage>. <pub-id pub-id-type="doi">10.1016/j.enbuild.2017.12.042</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Greff</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Srivastava</surname>
<given-names>R. K.</given-names>
</name>
<name>
<surname>Koutn&#xed;k</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Steunebrink</surname>
<given-names>B. R.</given-names>
</name>
<name>
<surname>Schmidhuber</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Lstm: A search space odyssey</article-title>. <source>IEEE Trans. neural Netw. Learn. Syst.</source> <volume>28</volume> (<issue>10</issue>), <fpage>2222</fpage>&#x2013;<lpage>2232</lpage>. <pub-id pub-id-type="doi">10.1109/tnnls.2016.2582924</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>He</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ding</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Han</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>DNS rebinding threat modeling and security analysis for local area network of maritime transportation systems</article-title>. <source>IEEE Trans. Intelligent Transp. Syst.</source>, <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1109/tits.2021.3135197</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hochreiter</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Schmidhuber</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>Long short-term memory</article-title>. <source>Neural Comput.</source> <volume>9</volume> (<issue>8</issue>), <fpage>1735</fpage>&#x2013;<lpage>1780</lpage>. <pub-id pub-id-type="doi">10.1162/neco.1997.9.8.1735</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jovi&#x107;</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Adaptive neuro-fuzzy prediction of flow pattern and gas hold-up in bubble column reactors</article-title>. <source>Eng. Comput.</source> <volume>37</volume>, <fpage>1723</fpage>&#x2013;<lpage>1734</lpage>. <pub-id pub-id-type="doi">10.1007/s00366-019-00905-y</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kumbinarasaiah</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Raghunatha</surname>
<given-names>K. R.</given-names>
</name>
<name>
<surname>Preetham</surname>
<given-names>M. P.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Applications of Bernoulli wavelet collocation method in the analysis of Jeffery&#x2013;Hamel flow and heat transfer in Eyring&#x2013;Powell fluid</article-title>. <source>J. Therm. Analysis Calorim.</source> <volume>148</volume> (<issue>3</issue>), <fpage>1173</fpage>&#x2013;<lpage>1189</lpage>. <pub-id pub-id-type="doi">10.1007/s10973-022-11706-9</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kuzishchin</surname>
<given-names>V. F.</given-names>
</name>
<name>
<surname>Ismatkhodzhaev</surname>
<given-names>S. K.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>All-mode automatic temperature control system for superheated industrial steam boiler during buffer consumption of gaseous production waste</article-title>. <source>J. Phys. Conf. Ser.</source> <volume>1683</volume> (<issue>4</issue>), <fpage>042023</fpage>. <pub-id pub-id-type="doi">10.1088/1742-6596/1683/4/042023</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>L&#xe4;ngkvist</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Karlsson</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Loutfi</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>A review of unsupervised feature learning and deep learning for time-series modeling</article-title>. <source>Pattern Recognit. Lett.</source> <volume>42</volume>, <fpage>11</fpage>&#x2013;<lpage>24</lpage>. <pub-id pub-id-type="doi">10.1016/j.patrec.2014.01.008</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>A novel multichannel long short-term memory method with time series for soil temperature modeling</article-title>. <source>IEEE Access</source> <volume>8</volume>, <fpage>182026</fpage>&#x2013;<lpage>182043</lpage>. <pub-id pub-id-type="doi">10.1109/access.2020.3028995</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>L&#xfc;</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Investigation on the performance evaluation of gas-fired combi-boilers with factor analysis and cluster analysis</article-title>. <source>SN Appl. Sci.</source> <volume>2</volume>, <fpage>1132</fpage>&#x2013;<lpage>1210</lpage>. <pub-id pub-id-type="doi">10.1007/s42452-020-2931-9</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mao</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Jin</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Fang</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A fast calibration algorithm for Non-Dispersive Infrared single channel carbon dioxide sensor based on deep learning</article-title>. <source>Comput. Commun.</source> <volume>179</volume>, <fpage>175</fpage>&#x2013;<lpage>182</lpage>. <pub-id pub-id-type="doi">10.1016/j.comcom.2021.08.003</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Muzaffar</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Afshari</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Short-term load forecasts using LSTM networks</article-title>. <source>Energy Procedia</source> <volume>158</volume>, <fpage>2922</fpage>&#x2013;<lpage>2927</lpage>. <pub-id pub-id-type="doi">10.1016/j.egypro.2019.01.952</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Short-term power load forecasting based on LSTM recurrent neural network</article-title>. <source>Power Eng. Technol.</source> <volume>40</volume>, <fpage>175</fpage>&#x2013;<lpage>180</lpage>. <pub-id pub-id-type="doi">10.12158/j.2096-3203.2021.01.025</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Poto&#x10d;nik</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Soldo</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>&#x160;imunovi&#x107;</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>&#x160;ari&#x107;</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Jeromen</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Govekar</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Comparison of static and adaptive models for short-term residential natural gas forecasting in Croatia</article-title>. <source>Appl. energy</source> <volume>129</volume>, <fpage>94</fpage>&#x2013;<lpage>103</lpage>. <pub-id pub-id-type="doi">10.1016/j.apenergy.2014.04.102</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Qing</surname>
<given-names>Q. D.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>H. M.</given-names>
</name>
<name>
<surname>Xiang</surname>
<given-names>L. L.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Holidays short term load forecasting using fuzzy linear regression method</article-title>. <source>Power Demand Side Manag</source>.</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Razzak</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Zafar</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Imran</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Randomized nonlinear one-class support vector machines with bounded loss function to detect of outliers for large scale IoT data</article-title>. <source>Future Gener. Comput. Syst.</source> <volume>112</volume>, <fpage>715</fpage>&#x2013;<lpage>723</lpage>. <pub-id pub-id-type="doi">10.1016/j.future.2020.05.045</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shen</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Ding</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Yao</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Bhardwaj</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A privacy-preserving social computing framework for health management using federated learning</article-title>. <source>IEEE Trans. Comput. Soc. Syst.</source>, <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1109/tcss.2022.3222682</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Wan</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Parallel LSTM-based regional integrated energy system multienergy source-load information interactive energy prediction</article-title>. <source>Complexity</source> <volume>2019</volume>, <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1155/2019/7414318</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Forecasting energy demand in China and India: Using single-linear, hybrid-linear, and non-linear time series forecast techniques</article-title>. <source>Energy</source> <volume>161</volume>, <fpage>821</fpage>&#x2013;<lpage>831</lpage>. <pub-id pub-id-type="doi">10.1016/j.energy.2018.07.168</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Song</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Forecasting China&#x27;s oil consumption: A comparison of novel nonlinear-dynamic grey model (gm), linear gm, nonlinear gm and metabolism gm</article-title>. <source>Energy</source> <volume>183</volume> (<issue>15</issue>), <fpage>160</fpage>&#x2013;<lpage>171</lpage>. <pub-id pub-id-type="doi">10.1016/j.energy.2019.06.139</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Fang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Tian</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Pan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Microcontroller unit chip temperature fingerprint informed machine learning for IIoT intrusion detection</article-title>. <source>IEEE Trans. Industrial Inf.</source> <volume>19</volume> (<issue>2</issue>), <fpage>2219</fpage>&#x2013;<lpage>2227</lpage>. <pub-id pub-id-type="doi">10.1109/tii.2022.3195287</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Fang</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Deep-learning-based weak electromagnetic intrusion detection method for zero touch networks on industrial IoT</article-title>. <source>IEEE Netw.</source> <volume>36</volume> (<issue>6</issue>), <fpage>236</fpage>&#x2013;<lpage>242</lpage>. <pub-id pub-id-type="doi">10.1109/mnet.001.2100754</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Kumar</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Gong</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Kong</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>W.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Realizing the potential of the internet of things for smart tourism with 5G and AI</article-title>. <source>IEEE Netw.</source> <volume>34</volume> (<issue>6</issue>), <fpage>295</fpage>&#x2013;<lpage>301</lpage>. <pub-id pub-id-type="doi">10.1109/mnet.011.2000250</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>S. D.</given-names>
</name>
<name>
<surname>Ali</surname>
<given-names>Z. A.</given-names>
</name>
<name>
<surname>Kwon</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Wong</surname>
<given-names>B. M.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Predicting complex erosion profiles in steam distribution headers with convolutional and recurrent neural networks</article-title>. <source>Industrial Eng. Chem. Res.</source> <volume>61</volume> (<issue>24</issue>), <fpage>8520</fpage>&#x2013;<lpage>8529</lpage>. <pub-id pub-id-type="doi">10.1021/acs.iecr.1c04712</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yu</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>A short-term load forecasting model of natural gas based on optimized genetic algorithm and improved BP neural network</article-title>. <source>Appl. Energy</source> <volume>134</volume>, <fpage>102</fpage>&#x2013;<lpage>113</lpage>. <pub-id pub-id-type="doi">10.1016/j.apenergy.2014.07.104</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhuang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>W. Z.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>F. W.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Cross-scale recurrent neural network based on Zoneout and its application in short-term power load forecasting</article-title>. <source>Comput. Sci.</source> <volume>47</volume> (<issue>9</issue>), <fpage>105</fpage>&#x2013;<lpage>109</lpage>.</citation>
</ref>
</ref-list>
</back>
</article>