<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Energy Res.</journal-id>
<journal-title>Frontiers in Energy Research</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Energy Res.</abbrev-journal-title>
<issn pub-type="epub">2296-598X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1227979</article-id>
<article-id pub-id-type="doi">10.3389/fenrg.2023.1227979</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Energy Research</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Deep learning time pattern attention mechanism-based short-term load forecasting method</article-title>
<alt-title alt-title-type="left-running-head">Liao et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fenrg.2023.1227979">10.3389/fenrg.2023.1227979</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Liao</surname>
<given-names>Wei</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Ruan</surname>
<given-names>Jiaqi</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1858969/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Xie</surname>
<given-names>Yinghua</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wang</surname>
<given-names>Qingwei</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Li</surname>
<given-names>Jing</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wang</surname>
<given-names>Ruoyu</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhao</surname>
<given-names>Junhua</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/568588/overview"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Shenzhen Power Supply Co., Ltd.</institution>, <addr-line>Shenzhen</addr-line>, <country>China</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>School of Science and Engineering</institution>, <institution>The Chinese University of Hong Kong</institution>, <addr-line>Shenzhen</addr-line>, <country>China</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Shenzhen Institute of Artificial Intelligence and Robotics for Society</institution>, <addr-line>Shenzhen</addr-line>, <country>China</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1969821/overview">Zhengmao Li</ext-link>, Nanyang Technological University, Singapore</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1101809/overview">Guibin Wang</ext-link>, Shenzhen University, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1882153/overview">Jing Qiu</ext-link>, The University of Sydney, Australia</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2327158/overview">Bing Luo</ext-link>, Duke Kunshan University, China</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Jiaqi Ruan, <email>jiaqiruan@link.cuhk.edu.cn</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>31</day>
<month>07</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>11</volume>
<elocation-id>1227979</elocation-id>
<history>
<date date-type="received">
<day>24</day>
<month>05</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>14</day>
<month>07</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2023 Liao, Ruan, Xie, Wang, Li, Wang and Zhao.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Liao, Ruan, Xie, Wang, Li, Wang and Zhao</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Accurate load forecasting is crucial to improve the stability and cost-efficiency of smart grid operations. However, how to integrate multiple significant factors for enhancing load forecasting performance is insufficiently investigated in previous studies. To fill the gap, this study proposes a novel hybrid deep learning model for short-term load forecasting. First, the long short-term memory network is utilized to capture patterns from historical load data. Second, a time pattern attention (TPA) mechanism is incorporated to improve feature extraction and learning capabilities. By discerning valuable features and eliminating irrelevant ones, the TPA mechanism enhances the learning process. Third, fully-connected layers are employed to integrate external factors such as climatic conditions, economic indicators, and temporal aspects. This comprehensive approach facilitates a deeper understanding of the impact of these factors on load profiles, leading to the development of a highly accurate load forecasting model. Rigorous experimental evaluations demonstrate the superior performance of the proposed approach in comparison to existing state-of-the-art load forecasting methodologies.</p>
</abstract>
<kwd-group>
<kwd>load forecasting</kwd>
<kwd>deep learning</kwd>
<kwd>time pattern attention</kwd>
<kwd>smart grid</kwd>
<kwd>data driven</kwd>
</kwd-group>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Smart Grids</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>With the increasing complexity of electrical infrastructure, the power industry has embraced the emergence of smart grids (<xref ref-type="bibr" rid="B21">Wang et al., 2018</xref>), which integrate conventional power system equipment with advanced intelligent digital communication devices, aiming to enhance system&#x2019;s performance, safety, and reliability (<xref ref-type="bibr" rid="B27">Yu et al., 2014</xref>). The integration of intelligent electronic devices into smart grids enables inter-device communication and real-time data sharing with control centers, resulting in the accumulation of significant amounts of data. Although initially unproductive, this data can be utilized for system assessments, ultimately improving the operational performance of smart grids.</p>
<p>As the energy internet emerges (<xref ref-type="bibr" rid="B22">Wang et al., 2019a</xref>) and energy demands escalate, there is a growing emphasis on energy conservation, leading to an amplified need for load forecasting, particularly in the commercial and industrial sectors. Load forecasting offers several advantages, including the mitigation of supply-demand imbalances and optimization of energy utilization benefits. With the increasing availability of big data, artificial intelligence techniques play a crucial role in load forecasting (<xref ref-type="bibr" rid="B17">Ruan et al., 2023b</xref>).</p>
<p>Short-term load forecasting, which commonly predicts loads within a range from hours to weeks, enables utilities and power plants to adjust generation in response to market demands. Research has shown that a mere 1% reduction in load forecasting errors can lead to an annual operating cost reduction of &#xa3;10 million for a British power company (<xref ref-type="bibr" rid="B4">Gilanifar et al., 2019</xref>). Additionally, accurate load forecasts facilitate the implementation of dynamic pricing structures in the electricity market (<xref ref-type="bibr" rid="B17">Ruan et al., 2023b</xref>). However, due to the intricacies and uncertainties associated with power demands (<xref ref-type="bibr" rid="B14">Ruan et al., 2022a</xref>), load forecasting remains some significant challenges. To address it, recent advancements in data analysis techniques and data collection systems, such as smart meters (<xref ref-type="bibr" rid="B8">Li et al., 2021</xref>), have the potential to greatly enhance load forecasting accuracy. Specifically, machine learning-based load forecasting methods, including autoregressive integrated moving average (ARIMA), multiple linear regression (<xref ref-type="bibr" rid="B27">Yu et al., 2014</xref>), Gaussian process regression (<xref ref-type="bibr" rid="B1">Akorede et al., 2010</xref>), support vector regression (SVR) (<xref ref-type="bibr" rid="B7">Hossain et al., 2019</xref>), artificial neural networks (ANN) (<xref ref-type="bibr" rid="B20">Virote and Neves-Silva, 2012</xref>; <xref ref-type="bibr" rid="B2">Candanedo et al., 2017</xref>), and deep neural networks (DNN) (<xref ref-type="bibr" rid="B13">Menezes et al., 2014</xref>), have gained substantial attention.</p>
<p>Deep learning techniques have proven to be effective in developing highly accurate load forecasting models. For example, the literature (<xref ref-type="bibr" rid="B22">Wang et al., 2019a</xref>) proposed a deep belief network (DBN)-based model for short-term load forecasting, which is able to learn probability distribution so as to determine future load profiles. Other studies recommended the use of self-recurrent wavelet neural networks (SRWNN) for load forecasting in microgrids by introducing a Levenberg-Marquardt learning algorithm to improve the forecast accuracy for highly volatile and non-smooth time series of microgrid electricity load (<xref ref-type="bibr" rid="B3">Chitsaz et al., 2015</xref>), the employment of multi-layer perceptron (MLP) for non-residential building electric load forecasting with analyses of most relevant features (<xref ref-type="bibr" rid="B12">Massana et al., 2015</xref>), and the application of recurrent neural networks (RNN) for short-term load forecasting that can effectively handle time-series data (<xref ref-type="bibr" rid="B24">Wen et al., 2022</xref>). These models utilize historical data in digital formats to predict future electric load variations.</p>
<p>However, the extensive integration of renewable energy sources (<xref ref-type="bibr" rid="B25">Yang et al., 2021</xref>), the widespread adoption of electric vehicles (<xref ref-type="bibr" rid="B5">Hartvigsson et al., 2021</xref>; <xref ref-type="bibr" rid="B26">Yang et al., 2022</xref>), the large-scale deployment of energy storage systems (<xref ref-type="bibr" rid="B28">Zhang et al., 2021</xref>), and emerging cyber threats (<xref ref-type="bibr" rid="B16">Ruan et al., 2023a</xref>) have introduced greater uncertainty and disturbances in short-term load forecasting (<xref ref-type="bibr" rid="B23">Wang et al., 2019b</xref>). To address the limitations of existing models in capturing these dynamic changes, this paper proposes a deep learning-based approach that incorporates a time pattern attention (TPA) mechanism to construct a highly accurate load forecasting model. The contributions of this article can be summarized as follows.<list list-type="simple">
<list-item>
<p>&#x2022; To our knowledge, it is the first study to propose an adaptive short-term load forecasting framework that can accommodate various critical features, thereby facilitating accurate forecasting results.</p>
</list-item>
<list-item>
<p>&#x2022; A specific deep learning-based hybrid model is proposed. It incorporates the long short-term memory (LSTM) network and the TPA mechanism as well as various deep learning techniques that can effectively utilize historical load data and external factors (e.g., climate, economy, and date) to discern dynamic load trends for load forecasting.</p>
</list-item>
<list-item>
<p>&#x2022; Comprehensive experiments are conducted by using Panama data to analyze of the proposed model and compare it with alternative state-of-the-art load forecasting models. The results demonstrate the superior performance of the proposed method.</p>
</list-item>
</list>
</p>
<p>The remainder of the paper is organized as follows. <xref ref-type="sec" rid="s2">Section 2</xref> introduces preliminaries of load forecasting, including its importance, features, and challenges. <xref ref-type="sec" rid="s3">Section 3</xref> elaborates on deep learning-based TPA mechanism and the proposed short-term load forecasting model as well as the overall framework. <xref ref-type="sec" rid="s4">Section 4</xref> demonstrates and discusses the case studies on the proposed load forecasting model. At last, <xref ref-type="sec" rid="s5">section 5</xref> summarizes the article.</p>
</sec>
<sec id="s2">
<title>2 Preliminaries of load forecasting</title>
<sec id="s2-1">
<title>2.1 Importance of load forecasting</title>
<p>Load forecasting has consistently been a vital concern for the power industry (<xref ref-type="bibr" rid="B10">Li et al., 2023b</xref>), as forecasting data enables power generation and load management departments to bolster their performance and reliability. In addition to economic and environmental considerations, load forecasting serves the following essential functions.<list list-type="simple">
<list-item>
<p>1) Comprehending load profiles allows power companies to devise rational electricity demand plans for customers, make economically prudent decisions, and mitigate risks for the organization.</p>
</list-item>
<list-item>
<p>2) Load forecasting aids power generation enterprises in anticipating potential resource requirements, facilitating the storage of necessary resources, such as fuel, to guarantee an uninterrupted power supply.</p>
</list-item>
<list-item>
<p>3) It assists in projecting the evolution of electricity generation within society and determining the need for future power plants, thus guiding power companies in preparations for constructing additional generating units to accommodate escalating electricity demands.</p>
</list-item>
<list-item>
<p>4) It contributes to the analysis and planning of power system maintenance;</p>
</list-item>
<list-item>
<p>5) By reducing energy production shortages and surpluses, load forecasting helps power companies minimize economic and energy losses.</p>
</list-item>
</list>
</p>
</sec>
<sec id="s2-2">
<title>2.2 Load forecasting features</title>
<p>The outcomes of load forecasting techniques are influenced by various factors. To obtain accurate predictions, it is crucial to consider the relevant factors of the dataset and use them appropriately. Numerous variables may affect the load forecasting performance. Here are some factors related to load forecasting.</p>
<sec id="s2-2-1">
<title>2.2.1 Time factors</title>
<p>Due to the deductive nature of electric load over time (<xref ref-type="bibr" rid="B15">Ruan et al., 2022b</xref>), the most critical aspect in forecasting is time. As the available data generated by various devices (such as smart meters, sensors, data servers, and other equipment) is time-series data, the importance of time in forecasting is paramount. Time has different attributes that can be used for prediction, such as &#x201c;day of the week,&#x201d; &#x201c;week of the month,&#x201d; &#x201c;month of the season,&#x201d; and so on (<xref ref-type="bibr" rid="B19">Ruzic et al., 2003</xref>). The selection of time horizon in forecasting is also a key factor (<xref ref-type="bibr" rid="B11">Lusis et al., 2017</xref>). Employing a more extended time range allows for the utilization of additional historical data.</p>
</sec>
<sec id="s2-2-2">
<title>2.2.2 Climate factors</title>
<p>Climate stands as a paramount factor in load forecasting, as it substantially influences both the agricultural sector and household consumption behaviours. Specifically, the usage patterns of various electrical devices, contingent upon weather-related warmth or coldness, can give rise to distinct load profiles. Consequently, load forecasting models may incorporate weather data sourced from the nearest accessible meteorological station, encompassing variables such as temperature, precipitation, humidity, dew point temperature, solar radiation intensity, wind speed, wind chill index (WCI), temperature-humidity index (THI), and other meteorological parameters.</p>
</sec>
<sec id="s2-2-3">
<title>2.2.3 Other factors</title>
<p>Economic determinants, including market stability, electricity price fluctuations, load control, and industrial growth rates, profoundly influence system average load and peak demand (<xref ref-type="bibr" rid="B9">Li et al., 2023a</xref>). Moreover, the physical attributes of structural, housing, or surrounding areas exhibit distinct load characteristics. Load forecasting for edifices and other structures can generally be executed utilizing building attribute parameters, such as the number of rooms and floors, window-to-wall ratio, orientation, window-wall thermal efficiency, fresh air volume, and occupant density.</p>
</sec>
</sec>
<sec id="s2-3">
<title>2.3 Challenges in load forecasting</title>
<p>For a long time, researchers have been dedicated to improving load forecasting techniques. However, when it comes to the specific modeling of load forecasting, there are still some obstacles.</p>
<p>First, weather is a key factor when performing load forecasting. Since the weather cannot be accurately estimated, it is impossible to accurately determine its impact on the load. Sudden weather changes can have significant effects on the expected load characteristics.</p>
<p>Second, the variety of meters utilized by consumers considerably influences load forecasting performance. Consumers employ an array of meters, encompassing smart and conventional meters, each with distinct measurement frequencies. As meter measurement frequencies and customer consumption behavior diverge, employing combined data for load forecasting may result in significant prediction errors.</p>
<p>Third, to further refine load forecasting, a series of other complex factors can be considered, but this adds to the difficulty of accommodating multiple variables, rendering the selection of an appropriate load forecasting model extremely challenging.</p>
<p>Fourth, since power systems may experience faults, power outages, and other intermittent events during dynamic operation, load forecasting models cannot account for such sudden occurrences, which also affect the load forecasting performance.</p>
<p>Fifth, consumer electricity demand is influenced by changes in economic market conditions or tariff changes. Although these economic factors significantly impact load forecasting outcomes, they are often overlooked by existing load forecasting methodologies.</p>
</sec>
</sec>
<sec id="s3">
<title>3 Proposed short-term load forecasting model based on the time pattern attention mechanism</title>
<sec id="s3-1">
<title>3.1 Long short-term memory network</title>
<p>In constructing a load forecasting model, the time dimension emerges as a critical factor influencing forecasting performance. Dynamic patterns can be discerned from historical load time series data. Consequently, employing neural networks adept at handling time series data can effectively extract inherent feature information and augment model accuracy. Long short-term memory (LSTM) networks, a unique variant of recurrent neural networks (RNNs), exhibit a natural advantage in processing sequential data (<xref ref-type="bibr" rid="B6">Hochreiter and Schmidhuber, 1997</xref>), as shown in <xref ref-type="fig" rid="F1">Figure 1</xref>. The LSTM network manipulates the cell state through internal input gates, output gates, and forget gates, ultimately yielding their hidden state, as demonstrated in the ensuing equations:<disp-formula id="e1">
<mml:math id="m1">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>g</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:msub>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mi mathvariant="bold-italic">i</mml:mi>
</mml:msub>
</mml:msub>
<mml:msub>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi mathvariant="bold-italic">i</mml:mi>
</mml:msub>
</mml:msub>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>
<disp-formula id="e2">
<mml:math id="m2">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>g</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:msub>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mi mathvariant="bold-italic">o</mml:mi>
</mml:msub>
</mml:msub>
<mml:msub>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi mathvariant="bold-italic">o</mml:mi>
</mml:msub>
</mml:msub>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>
<disp-formula id="e3">
<mml:math id="m3">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>g</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:msub>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mi mathvariant="bold-italic">f</mml:mi>
</mml:msub>
</mml:msub>
<mml:msub>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi mathvariant="bold-italic">f</mml:mi>
</mml:msub>
</mml:msub>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>
<disp-formula id="e4">
<mml:math id="m4">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:mi mathvariant="italic">tanh</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:msub>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
</mml:msub>
</mml:msub>
<mml:msub>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi mathvariant="bold-italic">c</mml:mi>
</mml:msub>
</mml:msub>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>
<disp-formula id="e5">
<mml:math id="m5">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:mi mathvariant="italic">tanh</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(5)</label>
</disp-formula>where <inline-formula id="inf1">
<mml:math id="m6">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi mathvariant="double-struck">R</mml:mi>
<mml:mi>n</mml:mi>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> represents the input of the LSTM layer at time <inline-formula id="inf2">
<mml:math id="m7">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>; <inline-formula id="inf3">
<mml:math id="m8">
<mml:mrow>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf4">
<mml:math id="m9">
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf5">
<mml:math id="m10">
<mml:mrow>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf6">
<mml:math id="m11">
<mml:mrow>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, and <inline-formula id="inf7">
<mml:math id="m12">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mi>m</mml:mi>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> denote the input gate state, output gate state, forget gate state, cell state, and hidden layer state at time <inline-formula id="inf8">
<mml:math id="m13">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, respectively; <inline-formula id="inf9">
<mml:math id="m14">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf10">
<mml:math id="m15">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>o</mml:mi>
</mml:msub>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf11">
<mml:math id="m16">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, and <inline-formula id="inf12">
<mml:math id="m17">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> are all learnable parameter matrices. The symbol <inline-formula id="inf13">
<mml:math id="m18">
<mml:mrow>
<mml:mo>&#x2299;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> denotes element-wise multiplication.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Diagram of the long short-term memory network.</p>
</caption>
<graphic xlink:href="fenrg-11-1227979-g001.tif"/>
</fig>
</sec>
<sec id="s3-2">
<title>3.2 Time pattern attention mechanism</title>
<p>While the LSTM network has exhibited remarkable proficiency in managing time series data, the advent of the attention mechanism facilitates the extraction of pertinent information among features (<xref ref-type="bibr" rid="B18">Ruan et al., 2023c</xref>), thereby augmenting the model&#x2019;s learning capacity and accuracy. Consequently, this article incorporates a TPA mechanism, grounded in the LSTM network, to bolster the load forecasting model&#x2019;s ability to learn from historical load time series data.</p>
<p>First, a one-dimensional convolutional neural network (1-D CNN) layer is used to extract the feature learning capability of the LSTM network&#x2019;s hidden state. Let <inline-formula id="inf14">
<mml:math id="m19">
<mml:mrow>
<mml:mrow>
<mml:mfenced open="{" close="}" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi mathvariant="double-struck">R</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> represent the hidden states of the LSTM layer, where dimension <inline-formula id="inf15">
<mml:math id="m20">
<mml:mrow>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents the number of features and dimension <inline-formula id="inf16">
<mml:math id="m21">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents the time steps. The hidden states in the past <inline-formula id="inf17">
<mml:math id="m22">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> steps, i.e., <inline-formula id="inf18">
<mml:math id="m23">
<mml:mrow>
<mml:mi mathvariant="bold-italic">H</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="{" close="}" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi mathvariant="double-struck">R</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>, are processed by the one-dimensional convolution operation, as follows:<disp-formula id="e6">
<mml:math id="m24">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="bold-italic">H</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>C</mml:mi>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>T</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">H</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>T</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:msub>
<mml:mo>&#xd7;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">C</mml:mi>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(6)</label>
</disp-formula>where the convolution operation <inline-formula id="inf19">
<mml:math id="m25">
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="bold-italic">H</mml:mi>
<mml:mi>C</mml:mi>
</mml:msup>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mrow>
<mml:mi>n</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> is configured with <inline-formula id="inf20">
<mml:math id="m26">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> convolution kernels <inline-formula id="inf21">
<mml:math id="m27">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="bold-italic">C</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi mathvariant="double-struck">R</mml:mi>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>. The convolution kernels are applied along the row vectors of the hidden state matrix <inline-formula id="inf22">
<mml:math id="m28">
<mml:mrow>
<mml:mi mathvariant="bold-italic">H</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> to compute the convolution, extracting the temporal pattern matrix <inline-formula id="inf23">
<mml:math id="m29">
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="bold-italic">H</mml:mi>
<mml:mi>C</mml:mi>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> within the visual field of the convolution kernels. <inline-formula id="inf24">
<mml:math id="m30">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="bold-italic">H</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>C</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> represents the result value of processing the <inline-formula id="inf25">
<mml:math id="m31">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> th row vector of <inline-formula id="inf26">
<mml:math id="m32">
<mml:mrow>
<mml:mi mathvariant="bold-italic">H</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> with the <inline-formula id="inf27">
<mml:math id="m33">
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> th convolution kernel.</p>
<p>Subsequently, a scoring mechanism is employed to evaluate the relevance between the hidden state <inline-formula id="inf28">
<mml:math id="m34">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and the row vectors of the convolutional temporal pattern matrix <inline-formula id="inf29">
<mml:math id="m35">
<mml:mrow>
<mml:msup>
<mml:mi>H</mml:mi>
<mml:mi>C</mml:mi>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>, as follows,<disp-formula id="e7">
<mml:math id="m36">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="bold-italic">H</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>c</mml:mi>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:msubsup>
<mml:mi mathvariant="bold-italic">H</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>c</mml:mi>
</mml:msubsup>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mi>a</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(7)</label>
</disp-formula>where <inline-formula id="inf30">
<mml:math id="m37">
<mml:mrow>
<mml:msubsup>
<mml:mi>H</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>C</mml:mi>
</mml:msubsup>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> represents the <inline-formula id="inf31">
<mml:math id="m38">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> th row vector of <inline-formula id="inf32">
<mml:math id="m39">
<mml:mrow>
<mml:msup>
<mml:mi>H</mml:mi>
<mml:mi>C</mml:mi>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>; <inline-formula id="inf33">
<mml:math id="m40">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>a</mml:mi>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> is the attention mapping matrix in the scoring mechanism.</p>
<p>By applying the Sigmoid activation function to the scoring mechanism, the attention coefficient <inline-formula id="inf34">
<mml:math id="m41">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">&#x3b1;</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is obtained, which represents the relevance between <inline-formula id="inf35">
<mml:math id="m42">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf36">
<mml:math id="m43">
<mml:mrow>
<mml:msubsup>
<mml:mi>H</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>C</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>, making it easier to compare multivariate associations:<disp-formula id="e8">
<mml:math id="m44">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>g</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="bold-italic">H</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>c</mml:mi>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(8)</label>
</disp-formula>
</p>
<p>Based on the obtained attention coefficients, performing attention-weighted summation and addition operations yields the output under the TPA mechanism:<disp-formula id="e9">
<mml:math id="m45">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi>t</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mi>h</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi mathvariant="bold-italic">h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mi>v</mml:mi>
</mml:msub>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msubsup>
<mml:mi mathvariant="bold-italic">H</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>C</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi mathvariant="normal">T</mml:mi>
</mml:msup>
</mml:mrow>
</mml:math>
<label>(9)</label>
</disp-formula>where both <inline-formula id="inf37">
<mml:math id="m46">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>h</mml:mi>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi mathvariant="double-struck">R</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf38">
<mml:math id="m47">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>v</mml:mi>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> are learnable parameter matrices for the TPA layer, and <inline-formula id="inf39">
<mml:math id="m48">
<mml:mrow>
<mml:msubsup>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi mathvariant="double-struck">R</mml:mi>
<mml:mi>m</mml:mi>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> represents the hidden state after being processed by the LSTM layer and the TPA layer.</p>
</sec>
<sec id="s3-3">
<title>3.3 Time pattern attention mechanism-based short-term load forecasting</title>
<p>An overall model for the TPA-LSTM-based short-term load forecasting considering multi-regional factors is described in <xref ref-type="fig" rid="F2">Figure 2</xref>, encompassing three modules. Module 1 employs TPA-LSTM to learn from historical load data, initially establishing a load baseline. Module 2 assimilates various factors from distinct regions, such as climate and economy, utilizing fully connected layers (FCLs) to learn diverse climate conditions, including temperature, humidity, wind speed, precipitation, and economic factors like market stability, electricity price adjustments, load control, and industrial growth. Module 3 constitutes the date information learning module, which examines the influence of varying seasons and typical days on electric load and integrates the output of Module 2 through a concatenation operation to learn the impact of date information on the electric load across various regions. Ultimately, the three modules enter the fusion layer and yield the final load profile in the form of a fully connected layer.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Framework of the short-term load forecasting model based on TPA-LSTM.</p>
</caption>
<graphic xlink:href="fenrg-11-1227979-g002.tif"/>
</fig>
<p>The determination of hyperparameters can be accomplished through a combined implementation of random search and <inline-formula id="inf40">
<mml:math id="m49">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>-fold cross-validation methodologies. Initially, a predefined search space is established to encompass the range of potential values for each hyperparameter. From this search space, a series of random samples is generated to explore and discover the possibly optimal combination of hyperparameters. Subsequently, in order to enhance the robustness of the load forecasting model, the entire dataset is divided into <inline-formula id="inf41">
<mml:math id="m50">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> subsets. During each iteration of training, the model is trained <inline-formula id="inf42">
<mml:math id="m51">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> times using different subsets as the training set and one subset as the validation set. This process ensures that each subset serves as the validation set exactly once throughout the iterations. Following the completion of <inline-formula id="inf43">
<mml:math id="m52">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> iterations, the model&#x2019;s performance metrics are averaged over the training process. Finally, the set of hyperparameters that yields the best performance is selected for implementation in the load forecasting model.</p>
<p>The employment of the proposed load forecasting model is illustrated in <xref ref-type="fig" rid="F3">Figure 3</xref>. The process begins with the careful preparation of the dataset, followed by the construction of the load forecasting model as demonstrated in <xref ref-type="fig" rid="F2">Figure 2</xref>. Subsequently, the hyperparameters of the load forecasting model are selected by employing a combination of random search and <inline-formula id="inf44">
<mml:math id="m53">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>-fold cross-validation techniques. The finalized hyperparameters are then implemented in the load forecasting model, which undergoes training for practical application.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Illustration of employment of the proposed load forecasting model.</p>
</caption>
<graphic xlink:href="fenrg-11-1227979-g003.tif"/>
</fig>
</sec>
</sec>
<sec sec-type="results|discussion" id="s4">
<title>4 Results and discussions</title>
<sec id="s4-1">
<title>4.1 Set up</title>
<p>To evaluate the performance of the proposed load forecasting model, a historical load dataset encompassing all regions in Panama from 2015 to 2020 is employed for simulation. The data has a time granularity of 1 h and includes total load (MWh), temperature (&#xb0;C), relative humidity (%), liquid precipitation (L/m2), wind speed (m/s), school day indicator (0/1), holiday indicator (0/1), and holiday index (integer) for three cities in Panama. The dataset is divided into training, validation, and test sets with a non-overlapping partition ratio of 8:1:1. The whole dataset starts from 3 January 2015, and ends by 27 June 2020. Accordingly, the sample sizes of the training, validation, and test sets are 1596, 199, 200, respectively.</p>
<p>In addition, three prevalent deep learning models serve as comparative models, as shown in <xref ref-type="table" rid="T1">Table 1</xref>. The MLP model learns climate information, economic factors, and date information to predict the load profile for the next 24 h. Based on the learning of external factors such as climate information and date information, the LSTM and GRU models learn from the historical load profile for the past week to predict the load profile for the next 24 h. Their model structure is similar to that shown in <xref ref-type="fig" rid="F2">Figure 2</xref>, with the only difference being that they use LSTM and GRU instead of TPA-LSTM to process time-series data. The TPA-LSTM model predicts the load profile for the next 24 h by learning the impact of external factors on future loads while simultaneously extracting the features of historical load curves based on the temporal pattern attention mechanism.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Description of model scenarios.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Model</th>
<th align="center">Brief description</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">MLP</td>
<td align="left">The model inputs include climate information and date information, without considerations of previous load information, while the model output is a 24-h load profile. It removes Module 1 in <xref ref-type="fig" rid="F2">Figure 2</xref>
</td>
</tr>
<tr>
<td align="center">LSTM</td>
<td align="left">The model inputs include climate information, date information, and the load information from the previous week. The model output is a 24-h load profile. Module 1 in <xref ref-type="fig" rid="F2">Figure 2</xref> is replaced with the LSTM network to process the historical load information</td>
</tr>
<tr>
<td align="center">GRU</td>
<td align="left">The model inputs include climate information, date information, and the load information from the previous week. The model output is a 24-h load profile. Module 1 in <xref ref-type="fig" rid="F2">Figure 2</xref> is replaced with the GRU network to process the historical load information</td>
</tr>
<tr>
<td align="center">TPA-LSTM</td>
<td align="left">The proposed short-term load forecasting model based on the temporal pattern attention mechanism, with its network structure shown in <xref ref-type="fig" rid="F2">Figure 2</xref>
</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4-2">
<title>4.2 Numerical results and discussions</title>
<p>During the model training process, standard deviation normalization transformation is applied to all data features to mitigate the influence of feature units on prediction outcomes. The mathematical expression for this transformation is as follows,<disp-formula id="e10">
<mml:math id="m54">
<mml:mrow>
<mml:msubsup>
<mml:mover accent="true">
<mml:mi>x</mml:mi>
<mml:mo>&#x223c;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
</mml:msubsup>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>x</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(10)</label>
</disp-formula>where <inline-formula id="inf45">
<mml:math id="m55">
<mml:mrow>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf46">
<mml:math id="m56">
<mml:mrow>
<mml:msubsup>
<mml:mover accent="true">
<mml:mi>x</mml:mi>
<mml:mo>&#x223c;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> represent the original and transformed values of the <inline-formula id="inf47">
<mml:math id="m57">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> th sample in the <inline-formula id="inf48">
<mml:math id="m58">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> th feature of data, respectively; <inline-formula id="inf49">
<mml:math id="m59">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>x</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf50">
<mml:math id="m60">
<mml:mrow>
<mml:mi mathvariant="normal">&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> represent the mean and standard deviation of the <inline-formula id="inf51">
<mml:math id="m61">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> th feature of data, respectively.</p>
<p>After all data is transformed, the models can be trained, with the mean squared error (MSE) as the training loss function, as follows,<disp-formula id="e11">
<mml:math id="m62">
<mml:mrow>
<mml:mi>L</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>N</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(11)</label>
</disp-formula>where <inline-formula id="inf52">
<mml:math id="m63">
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the total number of samples for training; <inline-formula id="inf53">
<mml:math id="m64">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf54">
<mml:math id="m65">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> are true value and prediction of the <inline-formula id="inf55">
<mml:math id="m66">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> th sample.</p>
<p>The training process of the four models is shown in <xref ref-type="fig" rid="F4">Figure 4</xref>. It is clear that all models converge at the end of the training.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Comparison of model training processes.</p>
</caption>
<graphic xlink:href="fenrg-11-1227979-g004.tif"/>
</fig>
<p>For visualization, the last week of data in the test set is selected to compare the predictive performance over the four models. These predicted results are denormalized to the normal scale, as shown in <xref ref-type="fig" rid="F5">Figure 5</xref>. It is evident that the MLP model, which only focuses on climate information, economic factors, and date information, cannot accurately predict the load profile. The LSTM and GRU, two special types of RNNs, can fit the load profile to a certain extent based on the extraction of external factors, but still have significant errors. However, the TPA-LSTM model can not only capture the changes in the load itself but also pay attention to the impact of external factors on the load. The attention mechanism can focus on high-value features, thereby accurately predicting the electric load.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Comparison of model forecasting results.</p>
</caption>
<graphic xlink:href="fenrg-11-1227979-g005.tif"/>
</fig>
<p>To comprehensively evaluate the performance of the four models, the mean absolute percentage error (MAPE), mean absolute error (MAE), and root mean squared error (RMSE) are used as indicators to statistically analyze the predictions of the four models on the test set, as follows,<disp-formula id="e12">
<mml:math id="m67">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>N</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:mfenced open="|" close="|" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(12)</label>
</disp-formula>
<disp-formula id="e13">
<mml:math id="m68">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>N</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:mfenced open="|" close="|" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(13)</label>
</disp-formula>
<disp-formula id="e14">
<mml:math id="m69">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>N</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:math>
<label>(14)</label>
</disp-formula>
</p>
<p>The statistical results are shown in <xref ref-type="table" rid="T2">Table 2</xref>. It can be seen that the proposed TPA-LSTM model demonstrates superior predictive performance in all indicators. The reason is that this model not only takes into account the external factors for load variations, but also considers the influence from the historical load profile. More importantly, it employs the TPA mechanism that can identify valuable features and eliminate irrelevant ones to improve the model performance.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Statistics of model performance.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Model</th>
<th align="left">MAPE (%)</th>
<th align="left">MAE (MW)</th>
<th align="left">RMSE (MW)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">MLP</td>
<td align="left">10.54</td>
<td align="left">124.24</td>
<td align="left">156.64</td>
</tr>
<tr>
<td align="left">LSTM</td>
<td align="left">8.56</td>
<td align="left">96.41</td>
<td align="left">148.85</td>
</tr>
<tr>
<td align="left">GRU</td>
<td align="left">8.10</td>
<td align="left">91.30</td>
<td align="left">144.18</td>
</tr>
<tr>
<td align="left">TPA-LSTM</td>
<td align="left">
<bold>4.41</bold>
</td>
<td align="left">
<bold>51.43</bold>
</td>
<td align="left">
<bold>73.73</bold>
</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>The bold values indicates the best performance.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>In summary, the model proposed in this article processes historical load data based on the TPA mechanism to establish a baseline for load forecasting. It also uses fully-connected layers to extract the impact of external factors (such as regional climate information, economic factors, and date information on the load), thereby accurately predicting the load curve. According to the MAPE statistics, the model has an error of only 4.41%, making it suitable for use in actual load forecasting models.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<title>5 Conclusion</title>
<p>Accurate load forecasting is crucial for ensuring the stable operation of smart grids. This study introduces a short-term load forecasting approach utilizing the TPA mechanism to fulfill the goal. First, the LSTM network is applied to process historical load time-series data, while the TPA mechanism is incorporated to extract temporal feature correlations, thereby enhancing the model&#x2019;s learning capability. Second, FCLs are employed to analyze external factors such as climate, economy, and dates, investigating their influence on future load patterns and establishing a high-precision forecasting model. Last, the proposed method is simulated and compared through a realistic dataset from Panama. The simulation results demonstrate that the proposed load forecasting approach achieves the lowest errors in terms of MAPE, MAE, and RMSE indicators, displaying the closest alignment with the actual load values. Thus, this method holds significant potential for practical load forecasting applications.</p>
<p>It is worth noting that the proposed load forecasting method still has two unresolved challenges. As a result, future work will focus on the integration of various sampling frequency meters into load forecasting methods, as well as the development of highly robust load forecasting techniques that are able to handle unconventional emergencies.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/Supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="s7">
<title>Author contributions</title>
<p>WL and JR proposed the research methods, wrote the code for experiments, and wrote the manuscript. YX and QW conceived the overall structure and framework of the article. JL, RW, and JZ provided constructive discussions and technical support for the manuscript. All authors contributed to the article and approved the submitted version.</p>
</sec>
<sec id="s8">
<title>Funding</title>
<p>This work was supported in part by the National Natural Science Foundation of China under Grant 72171206, 71931003, and 72061147004; in part by the Shenzhen Institute of Artificial Intelligence and Robotics for Society; in part by the Shenzhen Key Lab of Crowd Intelligence Empowered Low-Carbon Energy Network under Grant ZDSYS20220606100601002.</p>
</sec>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of interest</title>
<p>Authors WL, YX, QW, JL, and RW were employed by the Shenzhen Power Supply Co., Ltd.</p>
<p>The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Akorede</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hizam</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Pouresmaeil</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Distributed energy resources and benefits to the environment</article-title>. <source>Renew. Sustain. energy Rev.</source> <volume>14</volume>, <fpage>724</fpage>&#x2013;<lpage>734</lpage>. <pub-id pub-id-type="doi">10.1016/j.rser.2009.10.025</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Candanedo</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Feldheim</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Deramaix</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Data driven prediction models of energy use of appliances in a low-energy house</article-title>. <source>Energy Build.</source> <volume>140</volume>, <fpage>81</fpage>&#x2013;<lpage>97</lpage>. <pub-id pub-id-type="doi">10.1016/j.enbuild.2017.01.083</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chitsaz</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Shaker</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zareipour</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Wood</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Amjady</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Short-term electricity load forecasting of buildings in microgrids</article-title>. <source>Energy Build.</source> <volume>99</volume>, <fpage>50</fpage>&#x2013;<lpage>60</lpage>. <pub-id pub-id-type="doi">10.1016/j.enbuild.2015.04.011</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gilanifar</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Sriram</surname>
<given-names>L. M. K.</given-names>
</name>
<name>
<surname>Ozguven</surname>
<given-names>E. E.</given-names>
</name>
<name>
<surname>Arghandeh</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Multitask Bayesian spatiotemporal Gaussian processes for short-term load forecasting</article-title>. <source>IEEE Trans. Industrial Electron.</source> <volume>67</volume> (<issue>6</issue>), <fpage>5132</fpage>&#x2013;<lpage>5143</lpage>. <pub-id pub-id-type="doi">10.1109/tie.2019.2928275</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hartvigsson</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Jakobsson</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Taljegard</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Odenberger</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Comparison and analysis of GPS measured electric vehicle charging demand: The case of western Sweden and seattle</article-title>. <source>Front. Energy Res.</source> <volume>9</volume>, <fpage>730242</fpage>. <pub-id pub-id-type="doi">10.3389/fenrg.2021.730242</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hochreiter</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Schmidhuber</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>Long short-term memory</article-title>. <source>Neural Comput.</source> <volume>9</volume>, <fpage>1735</fpage>&#x2013;<lpage>1780</lpage>. <pub-id pub-id-type="doi">10.1162/neco.1997.9.8.1735</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hossain</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Khan</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Un-Noor</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Sikander</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sunny</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Application of big data and machine learning in smart grid, and associated security concerns: A review</article-title>. <source>IEEE Access</source> <volume>7</volume>, <fpage>13960</fpage>&#x2013;<lpage>13988</lpage>. <pub-id pub-id-type="doi">10.1109/access.2019.2894819</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zheng</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Stochastic-weighted robust optimization based bilayer operation of a Multi-energy building microgrid considering practical thermal loads and battery degradation</article-title>. <source>IEEE Trans. Sustain. Energy</source> <volume>13</volume> (<issue>2</issue>), <fpage>668</fpage>&#x2013;<lpage>682</lpage>. <pub-id pub-id-type="doi">10.1109/tste.2021.3126776</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2023a</year>). <article-title>Distributed tri-layer risk-averse stochastic game approach for energy trading among multi-energy microgrids</article-title>. <source>Appl. Energy</source> <volume>331</volume>, <fpage>120282</fpage>. <pub-id pub-id-type="doi">10.1016/j.apenergy.2022.120282</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Xiao</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2023b</year>). <article-title>Coordinated preparation and recovery of a post-disaster multi-energy distribution system considering thermal inertia and diverse uncertainties</article-title>. <source>Appl. Energy</source> <volume>336</volume>, <fpage>120736</fpage>. <pub-id pub-id-type="doi">10.1016/j.apenergy.2023.120736</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lusis</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Khalilpour</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Andrew</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Liebman</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Short-term residential load forecasting: Impact of calendar effects and forecast granularity</article-title>. <source>Appl. Energy</source> <volume>205</volume>, <fpage>654</fpage>&#x2013;<lpage>669</lpage>. <pub-id pub-id-type="doi">10.1016/j.apenergy.2017.07.114</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Massana</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Pous</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Burgas</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Melendez</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Colomer</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Short-term load forecasting in a non-residential building contrasting models and attributes</article-title>. <source>Energy Build.</source> <volume>92</volume>, <fpage>322</fpage>&#x2013;<lpage>330</lpage>. <pub-id pub-id-type="doi">10.1016/j.enbuild.2015.02.007</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Menezes</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cripps</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Buswell</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Wright</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Bouchlaghem</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Estimating the energy consumption and power demand of small power equipment in office buildings</article-title>. <source>Energy Build.</source> <volume>75</volume>, <fpage>199</fpage>&#x2013;<lpage>209</lpage>. <pub-id pub-id-type="doi">10.1016/j.enbuild.2014.02.011</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ruan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Qiu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2022a</year>). <article-title>Time-varying price elasticity of demand estimation for demand-side smart dynamic pricing</article-title>. <source>Appl. Energy</source> <volume>322</volume>, <fpage>119520</fpage>. <pub-id pub-id-type="doi">10.1016/j.apenergy.2022.119520</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ruan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Qiu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Dong</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2022b</year>). <article-title>An inertia-based data recovery scheme for false data injection attack</article-title>. <source>IEEE Trans. Industrial Inf.</source> <volume>18</volume>, <fpage>7814</fpage>&#x2013;<lpage>7823</lpage>. <pub-id pub-id-type="doi">10.1109/tii.2022.3146859</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ruan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Fan</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wen</surname>
<given-names>F.</given-names>
</name>
<etal/>
</person-group> (<year>2023a</year>). <article-title>Super-resolution perception assisted spatiotemporal graph deep learning against false data injection attacks in smart grid</article-title>. <source>IEEE Trans. Smart Grid</source>, <fpage>1</fpage>&#x2013;<lpage>17</lpage>. <pub-id pub-id-type="doi">10.1007/s10734-023-01031-x</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ruan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Lei</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Qiu</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2023b</year>). <article-title>Graph deep learning-based retail dynamic pricing for demand response</article-title>. <source>IEEE Trans. Smart Grid</source>, <fpage>1</fpage>&#x2013;<lpage>17</lpage>. <pub-id pub-id-type="doi">10.1007/s10734-023-01031-x</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ruan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2023c</year>). <article-title>Assessment of spatiotemporally coordinated cyberattacks on renewable energy forecasting in smart energy system</article-title>. <source>Appl. Energy</source> <volume>347</volume>, <fpage>121470</fpage>. <pub-id pub-id-type="doi">10.1016/j.apenergy.2023.121470</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ruzic</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Vuckovic</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Nikolic</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Weather sensitive method for short term load forecasting in electric power utility of Serbia</article-title>. <source>IEEE Trans. Power Syst.</source> <volume>18</volume>, <fpage>1581</fpage>&#x2013;<lpage>1586</lpage>. <pub-id pub-id-type="doi">10.1109/tpwrs.2003.811172</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Virote</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Neves-Silva</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Stochastic models for building energy prediction based on occupant behavior assessment</article-title>. <source>Energy Build.</source> <volume>53</volume>, <fpage>183</fpage>&#x2013;<lpage>193</lpage>. <pub-id pub-id-type="doi">10.1016/j.enbuild.2012.06.001</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ruan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>X.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Deep learning-based interval state estimation of AC smart grids against sparse cyber attacks</article-title>. <source>IEEE Trans. Industrial Inf.</source> <volume>14</volume>, <fpage>4766</fpage>&#x2013;<lpage>4778</lpage>. <pub-id pub-id-type="doi">10.1109/tii.2018.2804669</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ruan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Cao</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2019a</year>). <article-title>Deep learning aided interval state prediction for improving cyber security in energy internet</article-title>. <source>Energy</source> <volume>174</volume>, <fpage>1292</fpage>&#x2013;<lpage>1304</lpage>. <pub-id pub-id-type="doi">10.1016/j.energy.2019.03.009</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ruan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Raza</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2019b</year>). <article-title>Dynamic data injection attack detection of cyber physical power systems with uncertainties</article-title>. <source>IEEE Trans. Industrial Inf.</source> <volume>15</volume>, <fpage>5505</fpage>&#x2013;<lpage>5518</lpage>. <pub-id pub-id-type="doi">10.1109/tii.2019.2902163</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wen</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Xian</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A novel forward operator-based Bayesian recurrent neural network-based short-term net load demand forecasting considering demand-side renewable energy</article-title>. <source>Front. Energy Res.</source> <volume>10</volume>, <fpage>963657</fpage>. <pub-id pub-id-type="doi">10.3389/fenrg.2022.963657</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Qiu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Integrated grid, coal-fired power generation retirement and GESS planning towards a low-carbon economy</article-title>. <source>Int. J. Electr. Power Energy Syst.</source> <volume>124</volume>, <fpage>106409</fpage>. <pub-id pub-id-type="doi">10.1016/j.ijepes.2020.106409</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Qiu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Flexible integrated network planning considering echelon utilization of second-life of used electric vehicle batteries</article-title>. <source>IEEE Trans. Transp. Electrification</source> <volume>8</volume>, <fpage>263</fpage>&#x2013;<lpage>276</lpage>. <pub-id pub-id-type="doi">10.1109/tte.2021.3068121</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yu</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Wen</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>L&#xfc;</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Bridging the gap between complex networks and smart grids</article-title>. <source>J. Control Decis.</source> <volume>1</volume>, <fpage>102</fpage>&#x2013;<lpage>114</lpage>. <pub-id pub-id-type="doi">10.1080/23307706.2014.885293</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Qiu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Trading-oriented battery energy storage planning for distribution market</article-title>. <source>Int. J. Electr. Power Energy Syst.</source> <volume>129</volume>, <fpage>106848</fpage>. <pub-id pub-id-type="doi">10.1016/j.ijepes.2021.106848</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>