Updated thesis

This commit is contained in:
Victor Mylle
2024-05-19 22:05:15 +00:00
parent 26807eae22
commit 74729f6b85
44 changed files with 2039 additions and 6013 deletions

View File

@@ -0,0 +1,440 @@
@online{noauthor_elia_nodate,
title = {Elia: de electriciteitsmarkt en -systeem},
url = {https://www.elia.be/nl/elektriciteitsmarkt-en-systeem},
shorttitle = {Elia},
abstract = {Elia deelt de Europese ambitie om een geïntegreerde elektriciteitsmarkt tot stand te brengen en verschillende marktspelers aan te moedigen tot het aanbieden van systeemdiensten.},
urldate = {2023-06-23},
langid = {dutch},
file = {Snapshot:/Users/victormylle/Zotero/storage/7QY94WTW/elektriciteitsmarkt-en-systeem.html:text/html},
}
@article{toubeau_interpretable_2022,
title = {Interpretable Probabilistic Forecasting of Imbalances in Renewable-Dominated Electricity Systems},
volume = {13},
issn = {1949-3029, 1949-3037},
url = {https://ieeexplore.ieee.org/document/9464660/},
doi = {10.1109/TSTE.2021.3092137},
abstract = {High penetration of renewable energy such as wind power and photovoltaic ({PV}) requires large amounts of flexibility to balance their inherent variability. Making an accurate prediction of the future power system imbalance is an efficient approach to reduce these balancing costs. However, the imbalance is affected not only by renewables but also by complex market dynamics and technology constraints, for which the dependence structure is unknown. Therefore, this paper introduces a new architecture of sequence-to-sequence recurrent neural networks to efficiently process time-based information in an interpretable fashion. To that end, the selection of relevant variables is internalized into the model, which provides insights on the relative importance of individual inputs, while bypassing the cumbersome need for data preprocessing. Then, the model is further enriched with an attention mechanism that is tailored to focus on the relevant contextual information, which is useful to better understand the underlying dynamics such as seasonal patterns. Outcomes show that adding modules to generate explainable forecasts makes the model more efficient and robust, thus leading to enhanced performance.},
pages = {1267--1277},
number = {2},
journaltitle = {{IEEE} Transactions on Sustainable Energy},
shortjournal = {{IEEE} Trans. Sustain. Energy},
author = {Toubeau, Jean-Francois and Bottieau, Jeremie and Wang, Yi and Vallee, Francois},
urldate = {2023-09-28},
date = {2022-04},
langid = {english},
file = {Toubeau et al. - 2022 - Interpretable Probabilistic Forecasting of Imbalan.pdf:/Users/victormylle/Zotero/storage/WA7DZBXX/Toubeau et al. - 2022 - Interpretable Probabilistic Forecasting of Imbalan.pdf:application/pdf},
}
@article{bond-taylor_deep_2022,
title = {Deep Generative Modelling: A Comparative Review of {VAEs}, {GANs}, Normalizing Flows, Energy-Based and Autoregressive Models},
volume = {44},
issn = {0162-8828, 2160-9292, 1939-3539},
url = {https://ieeexplore.ieee.org/document/9555209/},
doi = {10.1109/TPAMI.2021.3116668},
shorttitle = {Deep Generative Modelling},
abstract = {Deep generative models are a class of techniques that train deep neural networks to model the distribution of training samples. Research has fragmented into various interconnected approaches, each of which make trade-offs including run-time, diversity, and architectural restrictions. In particular, this compendium covers energy-based models, variational autoencoders, generative adversarial networks, autoregressive models, normalizing flows, in addition to numerous hybrid approaches. These techniques are compared and contrasted, explaining the premises behind each and how they are interrelated, while reviewing current state-of-the-art advances and implementations.},
pages = {7327--7347},
number = {11},
journaltitle = {{IEEE} Transactions on Pattern Analysis and Machine Intelligence},
shortjournal = {{IEEE} Trans. Pattern Anal. Mach. Intell.},
author = {Bond-Taylor, Sam and Leach, Adam and Long, Yang and Willcocks, Chris G.},
urldate = {2023-10-11},
date = {2022-11-01},
langid = {english},
file = {Bond-Taylor et al. - 2022 - Deep Generative Modelling A Comparative Review of.pdf:/Users/victormylle/Zotero/storage/UNAST9UC/Bond-Taylor et al. - 2022 - Deep Generative Modelling A Comparative Review of.pdf:application/pdf},
}
@article{lecun_tutorial_nodate,
title = {A Tutorial on Energy-Based Learning},
abstract = {Energy-Based Models ({EBMs}) capture dependencies between variables by associating a scalar energy to each configuration of the variables. Inference consists in clamping the value of observed variables and finding configurations of the remaining variables that minimize the energy. Learning consists in finding an energy function in which observed configurations of the variables are given lower energies than unobserved ones. The {EBM} approach provides a common theoretical framework for many learning models, including traditional discriminative and generative approaches, as well as graph-transformer networks, conditional random fields, maximum margin Markov networks, and several manifold learning methods.},
author = {{LeCun}, Yann and Chopra, Sumit and Hadsell, Raia and Ranzato, MarcAurelio and Huang, Fu Jie},
langid = {english},
file = {LeCun et al. - A Tutorial on Energy-Based Learning.pdf:/Users/victormylle/Zotero/storage/8932975Z/LeCun et al. - A Tutorial on Energy-Based Learning.pdf:application/pdf},
}
@article{gatta_neural_2022,
title = {Neural networks generative models for time series},
volume = {34},
issn = {1319-1578},
url = {https://www.sciencedirect.com/science/article/pii/S1319157822002361},
doi = {10.1016/j.jksuci.2022.07.010},
abstract = {Nowadays, time series are a widely-exploited methodology to describe phenomena belonging to different fields. In fact, electrical consumption can be explained, from a data analysis perspective, with a time series, as for healthcare, financial index, air pollution or parking occupancy rate. Applying time series to different areas of interest has contributed to the exponential rise in interest by both practitioners and academics. On the other side, especially regarding static data, a new trend is acquiring even more relevance in the data analysis community, namely neural network generative approaches. Generative approaches aim to generate new, fake samples given a dataset of real data by implicitly learning the probability distribution underlining data. In this way, several tasks can be addressed, such as data augmentation, class imbalance, anomaly detection or privacy. However, even if this topic is relatively well-established in the literature related to static data regarding time series, the debate is still open. This paper contributes to this debate by comparing four neural network-based generative approaches for time series belonging to the state-of-the-art methodologies in literature. The comparison has been carried out on five public and private datasets and on different time granularities, with a total number of 13 experimental scenario. Our work aims to provide a wide overview of the performances of the compared methodologies when working in different conditions like seasonality, strong autoregressive components and long or short sequences.},
pages = {7920--7939},
number = {10},
journaltitle = {Journal of King Saud University - Computer and Information Sciences},
shortjournal = {Journal of King Saud University - Computer and Information Sciences},
author = {Gatta, Federico and Giampaolo, Fabio and Prezioso, Edoardo and Mei, Gang and Cuomo, Salvatore and Piccialli, Francesco},
urldate = {2023-10-11},
date = {2022-11-01},
keywords = {Deep learning, Generative adversarial networks, Healthcare, Industry 4.0, Time series},
file = {Full Text:/Users/victormylle/Zotero/storage/ZU6BCM28/Gatta et al. - 2022 - Neural networks generative models for time series.pdf:application/pdf;ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/2HSHCJN7/S1319157822002361.html:text/html},
}
@article{dumas_deep_2022,
title = {A deep generative model for probabilistic energy forecasting in power systems: normalizing flows},
volume = {305},
issn = {03062619},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0306261921011909},
doi = {10.1016/j.apenergy.2021.117871},
shorttitle = {A deep generative model for probabilistic energy forecasting in power systems},
abstract = {Greater direct electrification of end-use sectors with a higher share of renewables is one of the pillars to power a carbon-neutral society by 2050. However, in contrast to conventional power plants, renewable energy is subject to uncertainty raising challenges for their interaction with power systems. Scenario-based probabilistic forecasting models have become a vital tool to equip decision-makers. This paper presents to the power systems forecasting practitioners a recent deep learning technique, the normalizing flows, to produce accurate scenario-based probabilistic forecasts that are crucial to face the new challenges in power systems applications. The strength of this technique is to directly learn the stochastic multivariate distribution of the underlying process by maximizing the likelihood. Through comprehensive empirical evaluations using the open data of the Global Energy Forecasting Competition 2014, we demonstrate that this methodology is competitive with other state-of-the-art deep learning generative models: generative adversarial networks and variational autoencoders. The models producing weather-based wind, solar power, and load scenarios are properly compared in terms of forecast value by considering the case study of an energy retailer and quality using several complementary metrics. The numerical experiments are simple and easily reproducible. Thus, we hope it will encourage other forecasting practitioners to test and use normalizing flows in power system applications such as bidding on electricity markets, scheduling power systems with high renewable energy sources penetration, energy management of virtual power plan or microgrids, and unit commitment.},
pages = {117871},
journaltitle = {Applied Energy},
shortjournal = {Applied Energy},
author = {Dumas, Jonathan and Wehenkel, Antoine and Lanaspeze, Damien and Cornélusse, Bertrand and Sutera, Antonio},
urldate = {2023-10-11},
date = {2022-01},
langid = {english},
file = {Dumas et al. - 2022 - A deep generative model for probabilistic energy f.pdf:/Users/victormylle/Zotero/storage/3CW249QI/Dumas et al. - 2022 - A deep generative model for probabilistic energy f.pdf:application/pdf},
}
@article{lu_scenarios_2022,
title = {Scenarios modelling for forecasting day-ahead electricity prices: Case studies in Australia},
volume = {308},
issn = {0306-2619},
url = {https://www.sciencedirect.com/science/article/pii/S0306261921015555},
doi = {10.1016/j.apenergy.2021.118296},
shorttitle = {Scenarios modelling for forecasting day-ahead electricity prices},
abstract = {Electricity prices in spot markets are volatile and can be affected by various factors, such as generation and demand, system contingencies, local weather patterns, bidding strategies of market participants, and uncertain renewable energy outputs. Because of these factors, electricity price forecasting is challenging. This paper proposes a scenario modeling approach to improve forecasting accuracy, conditioning time series generative adversarial networks on external factors. After data pre-processing and condition selection, a conditional {TSGAN} or {CTSGAN} is designed to forecast electricity prices. Wasserstein Distance, weights limitation, and {RMSProp} optimizer are used to ensure that the {CTGAN} training process is stable. By changing the dimensionality of random noise input, the point forecasting model can be transformed into a probabilistic forecasting model. For electricity price point forecasting, the proposed {CTSGAN} model has better accuracy and has better generalization ability than the {TSGAN} and other deep learning methods. For probabilistic forecasting, the proposed {CTSGAN} model can significantly improve the continuously ranked probability score and Winkler score. The effectiveness and superiority of the proposed {CTSGAN} forecasting model are verified by case studies.},
pages = {118296},
journaltitle = {Applied Energy},
shortjournal = {Applied Energy},
author = {Lu, Xin and Qiu, Jing and Lei, Gang and Zhu, Jianguo},
urldate = {2023-10-13},
date = {2022-02-15},
keywords = {Generative adversarial networks, Conditions, Electricity Price, Point forecasting, Probabilistic forecasting},
file = {Lu et al. - 2022 - Scenarios modelling for forecasting day-ahead elec.pdf:/Users/victormylle/Zotero/storage/3XL3T253/Lu et al. - 2022 - Scenarios modelling for forecasting day-ahead elec.pdf:application/pdf;ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/9K2RFGGU/S0306261921015555.html:text/html},
}
@article{gabrielli_data-driven_2022,
title = {Data-driven modeling for long-term electricity price forecasting},
volume = {244},
issn = {03605442},
url = {https://linkinghub.elsevier.com/retrieve/pii/S036054422200010X},
doi = {10.1016/j.energy.2022.123107},
abstract = {Estimating the financial viability of renewable energy investments requires the availability of long-term, finely-resolved electricity prices over the investment lifespan. This entails, however, two major challenges: (i) the combination of extensive time horizons and fine time resolutions, and (ii) the prediction of out-of-sample electricity prices in future energy and market scenarios, or shifts in pricing regime, that were not observed in the past. This paper tackles such challenges by proposing a data-driven model for the long-term prediction of electricity market prices that is based on Fourier analysis. The electricity price is decomposed into components leading to its base evolution, which are described through the amplitudes of the main frequencies of the Fourier series, and components leading to high price volatility, which are described by the residual frequencies. The former are predicted via a regression model that uses as input annual values of relevant energy and market quantities, such as electricity generation, prices and demands. The proposed method shows capable of (i) predicting the most relevant dynamics of the electricity price; (ii) generalization by capturing the market mechanisms of previously unseen electricity markets. These findings support the relevance and validity of data-driven, finely-resolved, long-term predictions and highlight the potential for hybrid data-driven and market-based models.},
pages = {123107},
journaltitle = {Energy},
shortjournal = {Energy},
author = {Gabrielli, Paolo and Wüthrich, Moritz and Blume, Steffen and Sansavini, Giovanni},
urldate = {2023-10-15},
date = {2022-04},
langid = {english},
file = {Gabrielli et al. - 2022 - Data-driven modeling for long-term electricity pri.pdf:/Users/victormylle/Zotero/storage/YHDVP399/Gabrielli et al. - 2022 - Data-driven modeling for long-term electricity pri.pdf:application/pdf},
}
@misc{kollovieh_predict_2023,
title = {Predict, Refine, Synthesize: Self-Guiding Diffusion Models for Probabilistic Time Series Forecasting},
url = {http://arxiv.org/abs/2307.11494},
shorttitle = {Predict, Refine, Synthesize},
abstract = {Diffusion models have achieved state-of-the-art performance in generative modeling tasks across various domains. Prior works on time series diffusion models have primarily focused on developing conditional models tailored to specific forecasting or imputation tasks. In this work, we explore the potential of task-agnostic, unconditional diffusion models for several time series applications. We propose {TSDiff}, an unconditionally trained diffusion model for time series. Our proposed self-guidance mechanism enables conditioning {TSDiff} for downstream tasks during inference, without requiring auxiliary networks or altering the training procedure. We demonstrate the effectiveness of our method on three different time series tasks: forecasting, refinement, and synthetic data generation. First, we show that {TSDiff} is competitive with several task-specific conditional forecasting methods (predict). Second, we leverage the learned implicit probability density of {TSDiff} to iteratively refine the predictions of base forecasters with reduced computational overhead over reverse diffusion (refine). Notably, the generative performance of the model remains intact -- downstream forecasters trained on synthetic samples from {TSDiff} outperform forecasters that are trained on samples from other state-of-the-art generative time series models, occasionally even outperforming models trained on real data (synthesize).},
number = {{arXiv}:2307.11494},
publisher = {{arXiv}},
author = {Kollovieh, Marcel and Ansari, Abdul Fatir and Bohlke-Schneider, Michael and Zschiegner, Jasper and Wang, Hao and Wang, Yuyang},
urldate = {2023-10-15},
date = {2023-07-21},
eprinttype = {arxiv},
eprint = {2307.11494 [cs, stat]},
keywords = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence, Statistics - Machine Learning, {TODO}},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/PBVHEPD9/2307.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/QIBWKG57/Kollovieh et al. - 2023 - Predict, Refine, Synthesize Self-Guiding Diffusio.pdf:application/pdf},
}
@misc{rasul_autoregressive_2021,
title = {Autoregressive Denoising Diffusion Models for Multivariate Probabilistic Time Series Forecasting},
url = {http://arxiv.org/abs/2101.12072},
abstract = {In this work, we propose {\textbackslash}texttt\{{TimeGrad}\}, an autoregressive model for multivariate probabilistic time series forecasting which samples from the data distribution at each time step by estimating its gradient. To this end, we use diffusion probabilistic models, a class of latent variable models closely connected to score matching and energy-based methods. Our model learns gradients by optimizing a variational bound on the data likelihood and at inference time converts white noise into a sample of the distribution of interest through a Markov chain using Langevin sampling. We demonstrate experimentally that the proposed autoregressive denoising diffusion model is the new state-of-the-art multivariate probabilistic forecasting method on real-world data sets with thousands of correlated dimensions. We hope that this method is a useful tool for practitioners and lays the foundation for future research in this area.},
number = {{arXiv}:2101.12072},
publisher = {{arXiv}},
author = {Rasul, Kashif and Seward, Calvin and Schuster, Ingmar and Vollgraf, Roland},
urldate = {2023-10-15},
date = {2021-02-02},
eprinttype = {arxiv},
eprint = {2101.12072 [cs]},
keywords = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/8LIRWZ4G/2101.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/QPPFJVR5/Rasul et al. - 2021 - Autoregressive Denoising Diffusion Models for Mult.pdf:application/pdf},
}
@article{cramer_normalizing_2022,
title = {Normalizing flow-based day-ahead wind power scenario generation for profitable and reliable delivery commitments by wind farm operators},
volume = {166},
issn = {0098-1354},
url = {https://www.sciencedirect.com/science/article/pii/S0098135422002617},
doi = {10.1016/j.compchemeng.2022.107923},
abstract = {We present a specialized scenario generation method that utilizes forecast information to generate scenarios for day-ahead scheduling problems. In particular, we use normalizing flows to generate wind power scenarios by sampling from a conditional distribution that uses wind speed forecasts to tailor the scenarios to a specific day. We apply the generated scenarios in a stochastic day-ahead bidding problem of a wind electricity producer and analyze whether the scenarios yield profitable decisions. Compared to Gaussian copulas and Wasserstein-generative adversarial networks, the normalizing flow successfully narrows the range of scenarios around the daily trends while maintaining a diverse variety of possible realizations. In the stochastic day-ahead bidding problem, the conditional scenarios from all methods lead to significantly more stable profitable results compared to an unconditional selection of historical scenarios. The normalizing flow consistently obtains the highest profits, even for small sets scenarios.},
pages = {107923},
journaltitle = {Computers \& Chemical Engineering},
shortjournal = {Computers \& Chemical Engineering},
author = {Cramer, Eike and Paeleke, Leonard and Mitsos, Alexander and Dahmen, Manuel},
urldate = {2023-10-18},
date = {2022-10-01},
keywords = {Scenario generation, Stability, Stochastic programming, Wind power},
file = {ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/PT76E9DL/S0098135422002617.html:text/html;Submitted Version:/Users/victormylle/Zotero/storage/M9KFSG3M/Cramer et al. - 2022 - Normalizing flow-based day-ahead wind power scenar.pdf:application/pdf},
}
@inproceedings{zhang_diffusion_2021,
title = {Diffusion Normalizing Flow},
volume = {34},
url = {https://proceedings.neurips.cc/paper/2021/hash/876f1f9954de0aa402d91bb988d12cd4-Abstract.html},
abstract = {We present a novel generative modeling method called diffusion normalizing flow based on stochastic differential equations ({SDEs}). The algorithm consists of two neural {SDEs}: a forward {SDE} that gradually adds noise to the data to transform the data into Gaussian random noise, and a backward {SDE} that gradually removes the noise to sample from the data distribution. By jointly training the two neural {SDEs} to minimize a common cost function that quantifies the difference between the two, the backward {SDE} converges to a diffusion process the starts with a Gaussian distribution and ends with the desired data distribution. Our method is closely related to normalizing flow and diffusion probabilistic models, and can be viewed as a combination of the two. Compared with normalizing flow, diffusion normalizing flow is able to learn distributions with sharp boundaries. Compared with diffusion probabilistic models, diffusion normalizing flow requires fewer discretization steps and thus has better sampling efficiency. Our algorithm demonstrates competitive performance in both high-dimension data density estimation and image generation tasks.},
pages = {16280--16291},
booktitle = {Advances in Neural Information Processing Systems},
publisher = {Curran Associates, Inc.},
author = {Zhang, Qinsheng and Chen, Yongxin},
urldate = {2023-10-18},
date = {2021},
keywords = {{TODO}},
file = {Full Text PDF:/Users/victormylle/Zotero/storage/U45EUFZU/Zhang and Chen - 2021 - Diffusion Normalizing Flow.pdf:application/pdf},
}
@misc{rezende_variational_2016,
title = {Variational Inference with Normalizing Flows},
url = {http://arxiv.org/abs/1505.05770},
abstract = {The choice of approximate posterior distribution is one of the core problems in variational inference. Most applications of variational inference employ simple families of posterior approximations in order to allow for efficient inference, focusing on mean-field or other simple structured approximations. This restriction has a significant impact on the quality of inferences made using variational methods. We introduce a new approach for specifying flexible, arbitrarily complex and scalable approximate posterior distributions. Our approximations are distributions constructed through a normalizing flow, whereby a simple initial density is transformed into a more complex one by applying a sequence of invertible transformations until a desired level of complexity is attained. We use this view of normalizing flows to develop categories of finite and infinitesimal flows and provide a unified view of approaches for constructing rich posterior approximations. We demonstrate that the theoretical advantages of having posteriors that better match the true posterior, combined with the scalability of amortized variational approaches, provides a clear improvement in performance and applicability of variational inference.},
number = {{arXiv}:1505.05770},
publisher = {{arXiv}},
author = {Rezende, Danilo Jimenez and Mohamed, Shakir},
urldate = {2023-10-18},
date = {2016-06-14},
eprinttype = {arxiv},
eprint = {1505.05770 [cs, stat]},
note = {version: 6},
keywords = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence, Statistics - Machine Learning, Statistics - Computation, Statistics - Methodology},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/2J7MPVV5/1505.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/GQWIFAAN/Rezende and Mohamed - 2016 - Variational Inference with Normalizing Flows.pdf:application/pdf},
}
@article{sweidan_probabilistic_nodate,
title = {Probabilistic Prediction in scikit-learn},
abstract = {Adding confidence measures to predictive models should increase the trustworthiness, but only if the models are well-calibrated. Historically, some algorithms like logistic regression, but also neural networks, have been considered to produce well-calibrated probability estimates off-the-shelf. Other techniques, like decision trees and Naive Bayes, on the other hand, are infamous for being significantly overconfident in their probabilistic predictions. In this paper, a large experimental study is conducted to investigate how well-calibrated models produced by a number of algorithms in the scikit-learn library are out-of-the-box, but also if either the built-in calibration techniques Platt scaling and isotonic regression, or Venn-Abers, can be used to improve the calibration. The results show that of the seven algorithms evaluated, the only one obtaining well-calibrated models without the external calibration is logistic regression. All other algorithms, i.e., decision trees, adaboost, gradient boosting, {kNN}, naive Bayes and random forest benefit from using any of the calibration techniques. In particular, decision trees, Naive Bayes and the boosted models are substantially improved using external calibration. From a practitioners perspective, the obvious recommendation becomes to incorporate calibration when using probabilistic prediction. Comparing the different calibration techniques, Platt scaling and {VennAbers} generally outperform isotonic regression, on these rather small datasets. Finally, the unique ability of Venn-Abers to output not only well-calibrated probability estimates, but also the confidence in these estimates is demonstrated.},
author = {Sweidan, Dirar and Johansson, Ulf},
langid = {english},
file = {Sweidan and Johansson - Probabilistic Prediction in scikit-learn.pdf:/Users/victormylle/Zotero/storage/8LDMB83T/Sweidan and Johansson - Probabilistic Prediction in scikit-learn.pdf:application/pdf},
}
@article{baskan_scenario-based_2023,
title = {A Scenario-Based Model Comparison for Short-Term Day-Ahead Electricity Prices in Times of Economic and Political Tension},
volume = {16},
issn = {1999-4893},
url = {https://www.mdpi.com/1999-4893/16/4/177},
doi = {10.3390/a16040177},
abstract = {In recent years, energy prices have become increasingly volatile, making it more challenging to predict them accurately. This uncertain market trend behavior makes it harder for market participants, e.g., power plant dispatchers, to make reliable decisions. Machine learning ({ML}) has recently emerged as a powerful artificial intelligence ({AI}) technique to get reliable predictions in particularly volatile and unforeseeable situations. This development makes {ML} models an attractive complement to other approaches that require more extensive human modeling effort and assumptions about market mechanisms. This study investigates the application of machine and deep learning approaches to predict day-ahead electricity prices for a 7-day horizon on the German spot market to give power plants enough time to ramp up or down. A qualitative and quantitative analysis is conducted, assessing model performance concerning the forecast horizon and their robustness depending on the selected hyperparameters. For evaluation purposes, three test scenarios with different characteristics are manually chosen. Various models are trained, optimized, and compared with each other using common performance metrics. This study shows that deep learning models outperform tree-based and statistical models despite or because of the volatile energy prices.},
pages = {177},
number = {4},
journaltitle = {Algorithms},
shortjournal = {Algorithms},
author = {Baskan, Denis E. and Meyer, Daniel and Mieck, Sebastian and Faubel, Leonhard and Klöpper, Benjamin and Strem, Nika and Wagner, Johannes A. and Koltermann, Jan J.},
urldate = {2023-10-22},
date = {2023-03-24},
langid = {english},
file = {Baskan et al. - 2023 - A Scenario-Based Model Comparison for Short-Term D.pdf:/Users/victormylle/Zotero/storage/TU5JX5D4/Baskan et al. - 2023 - A Scenario-Based Model Comparison for Short-Term D.pdf:application/pdf},
}
@misc{narayan_regularization_2021,
title = {Regularization Strategies for Quantile Regression},
url = {http://arxiv.org/abs/2102.05135},
abstract = {We investigate different methods for regularizing quantile regression when predicting either a subset of quantiles or the full inverse {CDF}. We show that minimizing an expected pinball loss over a continuous distribution of quantiles is a good regularizer even when only predicting a specific quantile. For predicting multiple quantiles, we propose achieving the classic goal of non-crossing quantiles by using deep lattice networks that treat the quantile as a monotonic input feature, and we discuss why monotonicity on other features is an apt regularizer for quantile regression. We show that lattice models enable regularizing the predicted distribution to a location-scale family. Lastly, we propose applying rate constraints to improve the calibration of the quantile predictions on specific subsets of interest and improve fairness metrics. We demonstrate our contributions on simulations, benchmark datasets, and real quantile regression problems.},
number = {{arXiv}:2102.05135},
publisher = {{arXiv}},
author = {Narayan, Taman and Wang, Serena and Canini, Kevin and Gupta, Maya},
urldate = {2023-11-14},
date = {2021-02-09},
eprinttype = {arxiv},
eprint = {2102.05135 [cs, stat]},
note = {version: 1},
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning, Statistics - Methodology},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/DQZGHBIS/2102.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/W6WTUZQ3/Narayan et al. - 2021 - Regularization Strategies for Quantile Regression.pdf:application/pdf},
}
@misc{chung_beyond_2021,
title = {Beyond Pinball Loss: Quantile Methods for Calibrated Uncertainty Quantification},
url = {http://arxiv.org/abs/2011.09588},
shorttitle = {Beyond Pinball Loss},
abstract = {Among the many ways of quantifying uncertainty in a regression setting, specifying the full quantile function is attractive, as quantiles are amenable to interpretation and evaluation. A model that predicts the true conditional quantiles for each input, at all quantile levels, presents a correct and efficient representation of the underlying uncertainty. To achieve this, many current quantile-based methods focus on optimizing the so-called pinball loss. However, this loss restricts the scope of applicable regression models, limits the ability to target many desirable properties (e.g. calibration, sharpness, centered intervals), and may produce poor conditional quantiles. In this work, we develop new quantile methods that address these shortcomings. In particular, we propose methods that can apply to any class of regression model, allow for selecting a trade-off between calibration and sharpness, optimize for calibration of centered intervals, and produce more accurate conditional quantiles. We provide a thorough experimental evaluation of our methods, which includes a high dimensional uncertainty quantification task in nuclear fusion.},
number = {{arXiv}:2011.09588},
publisher = {{arXiv}},
author = {Chung, Youngseog and Neiswanger, Willie and Char, Ian and Schneider, Jeff},
urldate = {2023-12-14},
date = {2021-12-09},
eprinttype = {arxiv},
eprint = {2011.09588 [cs, stat]},
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/WWFHI3UN/2011.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/SHMRZ3Q7/Chung et al. - 2021 - Beyond Pinball Loss Quantile Methods for Calibrat.pdf:application/pdf},
}
@online{noauthor_liberalised_nodate,
title = {The liberalised electricity market includes many parties who all have to work together and at the same time try to make a profit. An overview of the most...},
url = {https://www.next-kraftwerke.be/en/knowledge-hub/players-in-the-belgian-power-market/},
abstract = {The liberalised electricity market includes many parties who all have to work together and at the same time try to make a profit. An overview of the most...},
urldate = {2024-03-20},
file = {Snapshot:/Users/victormylle/Zotero/storage/M9XWVY6F/players-in-the-belgian-power-market.html:text/html},
}
@misc{ho_denoising_2020,
title = {Denoising Diffusion Probabilistic Models},
url = {http://arxiv.org/abs/2006.11239},
doi = {10.48550/arXiv.2006.11239},
abstract = {We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional {CIFAR}10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art {FID} score of 3.17. On 256x256 {LSUN}, we obtain sample quality similar to {ProgressiveGAN}. Our implementation is available at https://github.com/hojonathanho/diffusion},
number = {{arXiv}:2006.11239},
publisher = {{arXiv}},
author = {Ho, Jonathan and Jain, Ajay and Abbeel, Pieter},
urldate = {2024-04-02},
date = {2020-12-16},
eprinttype = {arxiv},
eprint = {2006.11239 [cs, stat]},
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning},
file = {arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/CYMHCMUT/Ho et al. - 2020 - Denoising Diffusion Probabilistic Models.pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/CE8R84V5/2006.html:text/html},
}
@inproceedings{dumas_probabilistic_2019,
title = {Probabilistic Forecasting of Imbalance Prices in the Belgian Context},
url = {http://arxiv.org/abs/2106.07361},
doi = {10.1109/EEM.2019.8916375},
abstract = {Forecasting imbalance prices is essential for strategic participation in the short-term energy markets. A novel two-step probabilistic approach is proposed, with a particular focus on the Belgian case. The first step consists of computing the net regulation volume state transition probabilities. It is modeled as a matrix computed using historical data. This matrix is then used to infer the imbalance prices since the net regulation volume can be related to the level of reserves activated and the corresponding marginal prices for each activation level are published by the Belgian Transmission System Operator one day before electricity delivery. This approach is compared to a deterministic model, a multi-layer perceptron, and a widely used probabilistic technique, Gaussian Processes.},
pages = {1--7},
booktitle = {2019 16th International Conference on the European Energy Market ({EEM})},
author = {Dumas, Jonathan and Boukas, Ioannis and de Villena, Miguel Manuel and Mathieu, Sébastien and Cornélusse, Bertrand},
urldate = {2024-04-17},
date = {2019-09},
eprinttype = {arxiv},
eprint = {2106.07361 [cs, eess, q-fin]},
keywords = {Computer Science - Machine Learning, Electrical Engineering and Systems Science - Signal Processing, Quantitative Finance - Statistical Finance},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/3N56FPYP/2106.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/958MBH5M/Dumas et al. - 2019 - Probabilistic Forecasting of Imbalance Prices in t.pdf:application/pdf},
}
@article{gunduz_transfer_2023,
title = {Transfer learning for electricity price forecasting},
volume = {34},
issn = {2352-4677},
url = {https://www.sciencedirect.com/science/article/pii/S2352467723000048},
doi = {10.1016/j.segan.2023.100996},
abstract = {Electricity price forecasting is an essential task in all the deregulated markets of the world. The accurate prediction of day-ahead electricity prices is an active research field and available data from various markets can be used as input for forecasting. A collection of models have been proposed for this task, but the fundamental question on how to use the available big data is often neglected. In this paper, we propose to use transfer learning as a tool for utilizing information from other electricity price markets for forecasting. We pre-train a neural network model on source markets and finally do a fine-tuning for the target market. Moreover, we test different ways to use the rich input data from various electricity price markets to forecast 24 steps ahead in hourly frequency. Our experiments on four different day-ahead markets indicate that transfer learning improves the electricity price forecasting performance in a statistically significant manner. Furthermore, we compare our results with state-of-the-art methods in a rolling window scheme to demonstrate the performance of the transfer learning approach. Our method improves the performance of the state-of-the-art algorithms by 7\% for the French market and 3\% for the German market.},
pages = {100996},
journaltitle = {Sustainable Energy, Grids and Networks},
shortjournal = {Sustainable Energy, Grids and Networks},
author = {Gunduz, Salih and Ugurlu, Umut and Oksuz, Ilkay},
urldate = {2024-04-17},
date = {2023-06-01},
keywords = {Artificial neural networks, Electricity price forecasting, Market integration, Transfer learning},
file = {ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/BWI5FHS4/S2352467723000048.html:text/html;Submitted Version:/Users/victormylle/Zotero/storage/62FHBWJ8/Gunduz et al. - 2023 - Transfer learning for electricity price forecastin.pdf:application/pdf},
}
@article{lago_forecasting_2018,
title = {Forecasting spot electricity prices: Deep learning approaches and empirical comparison of traditional algorithms},
volume = {221},
issn = {0306-2619},
url = {https://www.sciencedirect.com/science/article/pii/S030626191830196X},
doi = {10.1016/j.apenergy.2018.02.069},
shorttitle = {Forecasting spot electricity prices},
abstract = {In this paper, a novel modeling framework for forecasting electricity prices is proposed. While many predictive models have been already proposed to perform this task, the area of deep learning algorithms remains yet unexplored. To fill this scientific gap, we propose four different deep learning models for predicting electricity prices and we show how they lead to improvements in predictive accuracy. In addition, we also consider that, despite the large number of proposed methods for predicting electricity prices, an extensive benchmark is still missing. To tackle that, we compare and analyze the accuracy of 27 common approaches for electricity price forecasting. Based on the benchmark results, we show how the proposed deep learning models outperform the state-of-the-art methods and obtain results that are statistically significant. Finally, using the same results, we also show that: (i) machine learning methods yield, in general, a better accuracy than statistical models; (ii) moving average terms do not improve the predictive accuracy; (iii) hybrid models do not outperform their simpler counterparts.},
pages = {386--405},
journaltitle = {Applied Energy},
shortjournal = {Applied Energy},
author = {Lago, Jesus and De Ridder, Fjo and De Schutter, Bart},
urldate = {2024-04-17},
date = {2018-07-01},
keywords = {Deep learning, Electricity price forecasting, Benchmark study},
file = {Full Text:/Users/victormylle/Zotero/storage/SZAAF5RK/Lago et al. - 2018 - Forecasting spot electricity prices Deep learning.pdf:application/pdf;ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/5JH9JLSM/S030626191830196X.html:text/html},
}
@article{weron_electricity_2014,
title = {Electricity price forecasting: A review of the state-of-the-art with a look into the future},
volume = {30},
issn = {0169-2070},
url = {https://www.sciencedirect.com/science/article/pii/S0169207014001083},
doi = {10.1016/j.ijforecast.2014.08.008},
shorttitle = {Electricity price forecasting},
abstract = {A variety of methods and ideas have been tried for electricity price forecasting ({EPF}) over the last 15 years, with varying degrees of success. This review article aims to explain the complexity of available solutions, their strengths and weaknesses, and the opportunities and threats that the forecasting tools offer or that may be encountered. The paper also looks ahead and speculates on the directions {EPF} will or should take in the next decade or so. In particular, it postulates the need for objective comparative {EPF} studies involving (i) the same datasets, (ii) the same robust error evaluation procedures, and (iii) statistical testing of the significance of one models outperformance of another.},
pages = {1030--1081},
number = {4},
journaltitle = {International Journal of Forecasting},
shortjournal = {International Journal of Forecasting},
author = {Weron, Rafał},
urldate = {2024-05-02},
date = {2014-10-01},
keywords = {Electricity price forecasting, Autoregression, Day-ahead market, Factor model, Forecast combination, Neural network, Probabilistic forecast, Seasonality},
file = {ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/DDGF263F/S0169207014001083.html:text/html},
}
@article{poggi_electricity_2023,
title = {Electricity Price Forecasting via Statistical and Deep Learning Approaches: The German Case},
volume = {3},
rights = {http://creativecommons.org/licenses/by/3.0/},
issn = {2673-9909},
url = {https://www.mdpi.com/2673-9909/3/2/18},
doi = {10.3390/appliedmath3020018},
shorttitle = {Electricity Price Forecasting via Statistical and Deep Learning Approaches},
abstract = {Our research involves analyzing the latest models used for electricity price forecasting, which include both traditional inferential statistical methods and newer deep learning techniques. Through our analysis of historical data and the use of multiple weekday dummies, we have proposed an innovative solution for forecasting electricity spot prices. This solution involves breaking down the spot price series into two components: a seasonal trend component and a stochastic component. By utilizing this approach, we are able to provide highly accurate predictions for all considered time frames.},
pages = {316--342},
number = {2},
journaltitle = {{AppliedMath}},
author = {Poggi, Aurora and Di Persio, Luca and Ehrhardt, Matthias},
urldate = {2024-05-02},
date = {2023-06},
langid = {english},
note = {Number: 2
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {deep learning, autoregressive, electricity price forecasting, machine learning, neural network, statistical method, univariate model},
file = {Full Text PDF:/Users/victormylle/Zotero/storage/3IR29RU3/Poggi et al. - 2023 - Electricity Price Forecasting via Statistical and .pdf:application/pdf},
}
@online{noauthor_welcome_nodate,
title = {Welcome — Elia Open Data Portal},
url = {https://opendata.elia.be/pages/home/},
urldate = {2024-05-18},
file = {Welcome — Elia Open Data Portal:/Users/victormylle/Zotero/storage/SYR9PM3Z/home.html:text/html},
}
@online{noauthor_imbalance_nodate,
title = {Imbalance prices per quarter-hour (Historical data)},
url = {https://opendata.elia.be/explore/dataset/ods047/information/?sort=datetime},
abstract = {System imbalance prices applied if an imbalance is found between injections and offtakes in a balance responsible parties ({BRPs}) balance area. When imbalance prices are published on a quarter-hourly basis, the published prices have not yet been validated and can therefore only be used as an indication of the imbalance price.Only after the published prices have been validated can they be used for invoicing purposes. The records for month M are validated after the 15th of month M+1. Contains the historical data and is refreshed daily.This dataset contains data until 21/05/2024 (before {MARI} local go-live).},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/PZI6PTQ2/information.html:text/html},
}
@online{noauthor_measured_nodate,
title = {Measured and forecasted total load on the Belgian grid (Historical data)},
url = {https://opendata.elia.be/explore/dataset/ods001/table/?sort=datetime},
abstract = {Measured and upscaled, most recent, day-ahead and week-ahead forecasts for total load on the Belgian grid.},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/8857IXIQ/table.html:text/html},
}
@online{noauthor_measured_nodate-1,
title = {Measured and forecasted total load on the Belgian grid (Historical data)},
url = {https://opendata.elia.be/explore/dataset/ods001/table/?sort=datetime},
abstract = {Measured and upscaled, most recent, day-ahead and week-ahead forecasts for total load on the Belgian grid.},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/88FLT7BA/table.html:text/html},
}
@online{noauthor_photovoltaic_nodate,
title = {Photovoltaic power production estimation and forecast on Belgian grid (Historical)},
url = {https://opendata.elia.be/explore/dataset/ods032/table/?sort=datetime},
abstract = {Measured and upscaled photovoltaic power generation on the Belgian grid.Please note that the measured and forecast values are in {MW}, it is of the users responsibility to interpret the values as such.},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/7VB5YHYE/table.html:text/html},
}
@online{noauthor_wind_nodate,
title = {Wind power production estimation and forecast on Belgian grid (Historical)},
url = {https://opendata.elia.be/explore/dataset/ods031/information/},
abstract = {Measured and upscaled wind power generation on the Belgian grid.Please note that the measured and forecast values are in {MW}, it is of the users responsibility to interpret the values as such.},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/UTJUH5VQ/information.html:text/html},
}
@online{noauthor_intraday_nodate,
title = {Intraday implicit net position (Belgium's balance)},
url = {https://opendata.elia.be/explore/dataset/ods022/information/?sort=datetime},
abstract = {Net sum of intraday nominations of the implicit capacity allocated for energy exchanges for Belgium.},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/XJ7KBDWG/information.html:text/html},
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

After

Width:  |  Height:  |  Size: 32 KiB

View File

@@ -188,22 +188,6 @@
file = {Full Text PDF:/Users/victormylle/Zotero/storage/U45EUFZU/Zhang and Chen - 2021 - Diffusion Normalizing Flow.pdf:application/pdf},
}
@misc{rezende_variational_2016,
title = {Variational Inference with Normalizing Flows},
url = {http://arxiv.org/abs/1505.05770},
abstract = {The choice of approximate posterior distribution is one of the core problems in variational inference. Most applications of variational inference employ simple families of posterior approximations in order to allow for efficient inference, focusing on mean-field or other simple structured approximations. This restriction has a significant impact on the quality of inferences made using variational methods. We introduce a new approach for specifying flexible, arbitrarily complex and scalable approximate posterior distributions. Our approximations are distributions constructed through a normalizing flow, whereby a simple initial density is transformed into a more complex one by applying a sequence of invertible transformations until a desired level of complexity is attained. We use this view of normalizing flows to develop categories of finite and infinitesimal flows and provide a unified view of approaches for constructing rich posterior approximations. We demonstrate that the theoretical advantages of having posteriors that better match the true posterior, combined with the scalability of amortized variational approaches, provides a clear improvement in performance and applicability of variational inference.},
number = {{arXiv}:1505.05770},
publisher = {{arXiv}},
author = {Rezende, Danilo Jimenez and Mohamed, Shakir},
urldate = {2023-10-18},
date = {2016-06-14},
eprinttype = {arxiv},
eprint = {1505.05770 [cs, stat]},
note = {version: 6},
keywords = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence, Statistics - Machine Learning, Statistics - Computation, Statistics - Methodology},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/2J7MPVV5/1505.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/GQWIFAAN/Rezende and Mohamed - 2016 - Variational Inference with Normalizing Flows.pdf:application/pdf},
}
@article{sweidan_probabilistic_nodate,
title = {Probabilistic Prediction in scikit-learn},
abstract = {Adding confidence measures to predictive models should increase the trustworthiness, but only if the models are well-calibrated. Historically, some algorithms like logistic regression, but also neural networks, have been considered to produce well-calibrated probability estimates off-the-shelf. Other techniques, like decision trees and Naive Bayes, on the other hand, are infamous for being significantly overconfident in their probabilistic predictions. In this paper, a large experimental study is conducted to investigate how well-calibrated models produced by a number of algorithms in the scikit-learn library are out-of-the-box, but also if either the built-in calibration techniques Platt scaling and isotonic regression, or Venn-Abers, can be used to improve the calibration. The results show that of the seven algorithms evaluated, the only one obtaining well-calibrated models without the external calibration is logistic regression. All other algorithms, i.e., decision trees, adaboost, gradient boosting, {kNN}, naive Bayes and random forest benefit from using any of the calibration techniques. In particular, decision trees, Naive Bayes and the boosted models are substantially improved using external calibration. From a practitioners perspective, the obvious recommendation becomes to incorporate calibration when using probabilistic prediction. Comparing the different calibration techniques, Platt scaling and {VennAbers} generally outperform isotonic regression, on these rather small datasets. Finally, the unique ability of Venn-Abers to output not only well-calibrated probability estimates, but also the confidence in these estimates is demonstrated.},
@@ -438,3 +422,248 @@ Publisher: Multidisciplinary Digital Publishing Institute},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/XJ7KBDWG/information.html:text/html},
}
@misc{narajewski_probabilistic_2022,
title = {Probabilistic forecasting of German electricity imbalance prices},
url = {http://arxiv.org/abs/2205.11439},
abstract = {The exponential growth of renewable energy capacity has brought much uncertainty to electricity prices and to electricity generation. To address this challenge, the energy exchanges have been developing further trading possibilities, especially the intraday and balancing markets. For an energy trader participating in both markets, the forecasting of imbalance prices is of particular interest. Therefore, in this manuscript we conduct a very short-term probabilistic forecasting of imbalance prices, contributing to the scarce literature in this novel subject. The forecasting is performed 30 minutes before the delivery, so that the trader might still choose the trading place. The distribution of the imbalance prices is modelled and forecasted using methods well-known in the electricity price forecasting literature: lasso with bootstrap, gamlss, and probabilistic neural networks. The methods are compared with a naive benchmark in a meaningful rolling window study. The results provide evidence of the efficiency between the intraday and balancing markets as the sophisticated methods do not substantially overperform the intraday continuous price index. On the other hand, they significantly improve the empirical coverage. The analysis was conducted on the German market, however it could be easily applied to any other market of similar structure.},
number = {{arXiv}:2205.11439},
publisher = {{arXiv}},
author = {Narajewski, Michał},
urldate = {2024-05-19},
date = {2022-05-23},
langid = {english},
eprinttype = {arxiv},
eprint = {2205.11439 [econ, q-fin, stat]},
keywords = {Economics - Econometrics, Quantitative Finance - Statistical Finance, Statistics - Machine Learning},
file = {Narajewski - 2022 - Probabilistic forecasting of German electricity im.pdf:/Users/victormylle/Zotero/storage/3D2WFRCD/Narajewski - 2022 - Probabilistic forecasting of German electricity im.pdf:application/pdf},
}
@online{noauthor_geliberaliseerde_nodate,
title = {De geliberaliseerde elektriciteitsmarkt omvat vele partijen die allen samen moeten werken en tegelijkertijd proberen winst te maken. Hieronder volgt een...},
url = {https://www.next-kraftwerke.be/nl/weten/spelers-energiemarkt},
abstract = {De geliberaliseerde elektriciteitsmarkt omvat vele partijen die allen samen moeten werken en tegelijkertijd proberen winst te maken. Hieronder volgt een...},
urldate = {2024-05-19},
langid = {dutch},
file = {Snapshot:/Users/victormylle/Zotero/storage/ZC2QU3PI/spelers-energiemarkt.html:text/html},
}
@article{baetens_imbalance_nodate,
title = {{IMBALANCE} {PRICE} {PREDICTION} {FOR} {THE} {IMPLICIT} {DEMAND} {RESPONSE} {POTENTIAL} {EVALUATION} {OF} {AN} {ELECTRODE} {BOILER}},
abstract = {Increasing Renewable Energy Sources ({RES}) penetration in the electricity grid increases the electricity market price volatility. This mechanism could be economically exploited by electrifying the heat demand in industry. An electrode boiler could assist the existing gas-fired boiler in steam production, decreasing the overall {CO}2 intensity of the produced steam. In this work, the predictability of the Net Regulation Volume is shown and used to estimate the imbalance price for a current quarter-hour. The electrode boiler is steered based on the predicted imbalance price, making use of the price volatility and single imbalance pricing mechanism as used in Belgium.},
author = {Baetens, Jens},
langid = {english},
file = {Baetens - IMBALANCE PRICE PREDICTION FOR THE IMPLICIT DEMAND.pdf:/Users/victormylle/Zotero/storage/EZ3MXHGN/Baetens - IMBALANCE PRICE PREDICTION FOR THE IMPLICIT DEMAND.pdf:application/pdf},
}
@article{rintamaki_does_2017,
title = {Does renewable energy generation decrease the volatility of electricity prices? An analysis of Denmark and Germany},
volume = {62},
issn = {0140-9883},
url = {https://www.sciencedirect.com/science/article/pii/S0140988317300063},
doi = {10.1016/j.eneco.2016.12.019},
shorttitle = {Does renewable energy generation decrease the volatility of electricity prices?},
abstract = {Although variable renewable energy ({VRE}) technologies with zero marginal costs decrease electricity prices, the literature is inconclusive about how the resulting shift in the supply curves impacts price volatility. Because the flexibility to respond to high peak and low off-peak prices is crucial for demand-response applications and may compensate for the losses of conventional generators caused by lower average prices, there is a need to understand how the penetration of {VRE} affects volatility. In this paper, we build distributed lag models with Danish and German data to estimate the impact of {VRE} generation on electricity price volatility. We find that in Denmark wind power decreases the daily volatility of prices by flattening the hourly price profile, but in Germany it increases the volatility because it has a stronger impact on off-peak prices. Our analysis suggests that access to flexible generation capacity and wind power generation patterns contribute to these differing impacts. Meanwhile, solar power decreases price volatility in Germany. By contrast, the weekly volatility of prices increases in both areas due to the intermittency of {VRE}. Thus, policy measures for facilitating the integration of {VRE} should be tailored to such region-specific patterns.},
pages = {270--282},
journaltitle = {Energy Economics},
shortjournal = {Energy Economics},
author = {Rintamäki, Tuomas and Siddiqui, Afzal S. and Salo, Ahti},
urldate = {2024-05-19},
date = {2017-02-01},
keywords = {{EEX}, Electricity price volatility, Nord Pool, Solar power, Time-series model, Wind power},
file = {Submitted Version:/Users/victormylle/Zotero/storage/FN6FC96E/Rintamäki et al. - 2017 - Does renewable energy generation decrease the vola.pdf:application/pdf},
}
@report{commission_for_electricity_and_gas_regulation_creg_study_2023,
location = {Brussels, Belgium},
title = {Study on the Functioning and Price Evolution of the Belgian Wholesale Electricity Market - Monitoring Report 2022},
url = {https://www.creg.be/sites/default/files/assets/Publications/Studies/F2537EN.pdf},
number = {F2537},
institution = {Commission for Electricity and Gas Regulation ({CREG})},
author = {{Commission for Electricity and Gas Regulation (CREG)}},
date = {2023-06},
}
@online{noauthor_role_nodate,
title = {Role of {BRP}},
url = {https://www.elia.be/en/electricity-market-and-system/role-of-brp},
urldate = {2024-05-19},
langid = {english},
file = {Snapshot:/Users/victormylle/Zotero/storage/RLRZAFBM/role-of-brp.html:text/html},
}
@online{noauthor_fcr_nodate,
title = {{FCR}},
url = {https://www.elia.be/en/electricity-market-and-system/system-services/keeping-the-balance/fcr},
urldate = {2024-05-19},
langid = {english},
}
@online{noauthor_afrr_nodate,
title = {{aFRR}},
url = {https://www.elia.be/en/electricity-market-and-system/system-services/keeping-the-balance/afrr},
urldate = {2024-05-19},
langid = {english},
}
@online{noauthor_mfrr_nodate,
title = {{mFRR}},
url = {https://www.elia.be/en/electricity-market-and-system/system-services/keeping-the-balance/mfrr},
urldate = {2024-05-19},
langid = {english},
file = {Snapshot:/Users/victormylle/Zotero/storage/P92IN76K/mfrr.html:text/html},
}
@misc{elia_tariffs_2022,
title = {Tariffs for Maintaining and Restoring the Residual Balance of Individual Access Responsible Parties},
url = {https://www.elia.be/-/media/project/elia/elia-site/customers/tarrifs-and-invoicing/tariffs-and-invoicing/en/grille-tarifaire-desequilibre-2022-en-v20220214s.pdf},
author = {{Elia}},
date = {2022-02},
}
@article{hagfors_modeling_2016,
title = {Modeling the {UK} electricity price distributions using quantile regression},
volume = {102},
issn = {0360-5442},
url = {https://www.sciencedirect.com/science/article/pii/S0360544216300688},
doi = {10.1016/j.energy.2016.02.025},
abstract = {In this paper we develop fundamental quantile regression models for the {UK} electricity price in each trading period. Intraday properties of price risk, as represented by the predictive distribution rather than expected values, have previously not been fully analyzed. The sample covers half hourly data from 2005 to 2012. From our analysis we are able to show how the sensitivity towards different fundamental factors changes across quantiles and time of day. In the {UK} the supply of electricity is to a large extent generated from coal and gas plants, thus the price of gas and coal, as well as the carbon emission price, are included as fundamental factors in our model. We also include the electricity price lagged by one day, as well as demand and margin forecasts. We find that the sensitivities vary across the price distribution. Our findings also suggest that the sensitivity to fundamental factors exhibit intraday variation. We find that the sensitivity to gas relative to coal is higher in high quantiles and lower in low quantiles, as well as some indications of market power being exercised during peak hours. We have demonstrated a scenario analysis based on the quantile regression models, showing how changes in the values of the fundamentals influence the electricity price distribution.},
pages = {231--243},
journaltitle = {Energy},
shortjournal = {Energy},
author = {Hagfors, Lars Ivar and Bunn, Derek and Kristoffersen, Eline and Staver, Tiril Toftdahl and Westgaard, Sjur},
urldate = {2024-05-19},
date = {2016-05-01},
keywords = {Electricity markets, Prices, Quantile regression, Risk},
file = {ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/M3768XYM/S0360544216300688.html:text/html;Submitted Version:/Users/victormylle/Zotero/storage/X4EUFX4U/Hagfors et al. - 2016 - Modeling the UK electricity price distributions us.pdf:application/pdf},
}
@article{koenker_regression_1978,
title = {Regression Quantiles},
volume = {46},
issn = {0012-9682},
url = {https://www.jstor.org/stable/1913643},
doi = {10.2307/1913643},
abstract = {A simple minimization problem yielding the ordinary sample quantiles in the location model is shown to generalize naturally to the linear model generating a new class of statistics we term "regression quantiles." The estimator which minimizes the sum of absolute residuals is an important special case. Some equivariance properties and the joint asymptotic distribution of regression quantiles are established. These results permit a natural generalization of the linear model of certain well-known robust estimators of location. Estimators are suggested, which have comparable efficiency to least squares for Gaussian linear models while substantially out-performing the least-squares estimator over a wide class of non-Gaussian error distributions.},
pages = {33--50},
number = {1},
journaltitle = {Econometrica},
author = {Koenker, Roger and Bassett, Gilbert},
urldate = {2024-05-19},
date = {1978},
note = {Publisher: [Wiley, Econometric Society]},
file = {JSTOR Full Text PDF:/Users/victormylle/Zotero/storage/QK6JQA54/Koenker and Bassett - 1978 - Regression Quantiles.pdf:application/pdf},
}
@misc{dhariwal_diffusion_2021,
title = {Diffusion Models Beat {GANs} on Image Synthesis},
url = {http://arxiv.org/abs/2105.05233},
doi = {10.48550/arXiv.2105.05233},
abstract = {We show that diffusion models can achieve image sample quality superior to the current state-of-the-art generative models. We achieve this on unconditional image synthesis by finding a better architecture through a series of ablations. For conditional image synthesis, we further improve sample quality with classifier guidance: a simple, compute-efficient method for trading off diversity for fidelity using gradients from a classifier. We achieve an {FID} of 2.97 on {ImageNet} 128\${\textbackslash}times\$128, 4.59 on {ImageNet} 256\${\textbackslash}times\$256, and 7.72 on {ImageNet} 512\${\textbackslash}times\$512, and we match {BigGAN}-deep even with as few as 25 forward passes per sample, all while maintaining better coverage of the distribution. Finally, we find that classifier guidance combines well with upsampling diffusion models, further improving {FID} to 3.94 on {ImageNet} 256\${\textbackslash}times\$256 and 3.85 on {ImageNet} 512\${\textbackslash}times\$512. We release our code at https://github.com/openai/guided-diffusion},
number = {{arXiv}:2105.05233},
publisher = {{arXiv}},
author = {Dhariwal, Prafulla and Nichol, Alex},
urldate = {2024-05-19},
date = {2021-06-01},
eprinttype = {arxiv},
eprint = {2105.05233 [cs, stat]},
note = {version: 4},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Computer Vision and Pattern Recognition, Computer Science - Machine Learning, Statistics - Machine Learning},
file = {arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/YRPKR9RL/Dhariwal and Nichol - 2021 - Diffusion Models Beat GANs on Image Synthesis.pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/5W4HS765/2105.html:text/html},
}
@misc{ho_classifier-free_2022,
title = {Classifier-Free Diffusion Guidance},
url = {http://arxiv.org/abs/2207.12598},
doi = {10.48550/arXiv.2207.12598},
abstract = {Classifier guidance is a recently introduced method to trade off mode coverage and sample fidelity in conditional diffusion models post training, in the same spirit as low temperature sampling or truncation in other types of generative models. Classifier guidance combines the score estimate of a diffusion model with the gradient of an image classifier and thereby requires training an image classifier separate from the diffusion model. It also raises the question of whether guidance can be performed without a classifier. We show that guidance can be indeed performed by a pure generative model without such a classifier: in what we call classifier-free guidance, we jointly train a conditional and an unconditional diffusion model, and we combine the resulting conditional and unconditional score estimates to attain a trade-off between sample quality and diversity similar to that obtained using classifier guidance.},
number = {{arXiv}:2207.12598},
publisher = {{arXiv}},
author = {Ho, Jonathan and Salimans, Tim},
urldate = {2024-05-19},
date = {2022-07-25},
eprinttype = {arxiv},
eprint = {2207.12598 [cs]},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Machine Learning},
file = {arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/8AB5GKED/Ho and Salimans - 2022 - Classifier-Free Diffusion Guidance.pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/JDFPZBZW/2207.html:text/html},
}
@misc{goodfellow_generative_2014,
title = {Generative Adversarial Networks},
url = {http://arxiv.org/abs/1406.2661},
doi = {10.48550/arXiv.1406.2661},
abstract = {We propose a new framework for estimating generative models via an adversarial process, in which we simultaneously train two models: a generative model G that captures the data distribution, and a discriminative model D that estimates the probability that a sample came from the training data rather than G. The training procedure for G is to maximize the probability of D making a mistake. This framework corresponds to a minimax two-player game. In the space of arbitrary functions G and D, a unique solution exists, with G recovering the training data distribution and D equal to 1/2 everywhere. In the case where G and D are defined by multilayer perceptrons, the entire system can be trained with backpropagation. There is no need for any Markov chains or unrolled approximate inference networks during either training or generation of samples. Experiments demonstrate the potential of the framework through qualitative and quantitative evaluation of the generated samples.},
number = {{arXiv}:1406.2661},
publisher = {{arXiv}},
author = {Goodfellow, Ian J. and Pouget-Abadie, Jean and Mirza, Mehdi and Xu, Bing and Warde-Farley, David and Ozair, Sherjil and Courville, Aaron and Bengio, Yoshua},
urldate = {2024-05-19},
date = {2014-06-10},
eprinttype = {arxiv},
eprint = {1406.2661 [cs, stat]},
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning},
file = {arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/VSATPZJ9/Goodfellow et al. - 2014 - Generative Adversarial Networks.pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/3BQ8BDKU/1406.html:text/html},
}
@inproceedings{rezende_variational_2015,
location = {Lille, France},
title = {Variational Inference with Normalizing Flows},
volume = {37},
url = {https://proceedings.mlr.press/v37/rezende15.html},
series = {Proceedings of Machine Learning Research},
abstract = {The choice of the approximate posterior distribution is one of the core problems in variational inference. Most applications of variational inference employ simple families of posterior approximations in order to allow for efficient inference, focusing on mean-field or other simple structured approximations. This restriction has a significant impact on the quality of inferences made using variational methods. We introduce a new approach for specifying flexible, arbitrarily complex and scalable approximate posterior distributions. Our approximations are distributions constructed through a normalizing flow, whereby a simple initial density is transformed into a more complex one by applying a sequence of invertible transformations until a desired level of complexity is attained. We use this view of normalizing flows to develop categories of finite and infinitesimal flows and provide a unified view of approaches for constructing rich posterior approximations. We demonstrate that the theoretical advantages of having posteriors that better match the true posterior, combined with the scalability of amortized variational approaches, provides a clear improvement in performance and applicability of variational inference.},
pages = {1530--1538},
booktitle = {Proceedings of the 32nd International Conference on Machine Learning},
publisher = {{PMLR}},
author = {Rezende, Danilo and Mohamed, Shakir},
editor = {Bach, Francis and Blei, David},
date = {2015-07-07},
}
@misc{sohl-dickstein_deep_2015,
title = {Deep Unsupervised Learning using Nonequilibrium Thermodynamics},
url = {http://arxiv.org/abs/1503.03585},
doi = {10.48550/arXiv.1503.03585},
abstract = {A central problem in machine learning involves modeling complex data-sets using highly flexible families of probability distributions in which learning, sampling, inference, and evaluation are still analytically or computationally tractable. Here, we develop an approach that simultaneously achieves both flexibility and tractability. The essential idea, inspired by non-equilibrium statistical physics, is to systematically and slowly destroy structure in a data distribution through an iterative forward diffusion process. We then learn a reverse diffusion process that restores structure in data, yielding a highly flexible and tractable generative model of the data. This approach allows us to rapidly learn, sample from, and evaluate probabilities in deep generative models with thousands of layers or time steps, as well as to compute conditional and posterior probabilities under the learned model. We additionally release an open source reference implementation of the algorithm.},
number = {{arXiv}:1503.03585},
publisher = {{arXiv}},
author = {Sohl-Dickstein, Jascha and Weiss, Eric A. and Maheswaranathan, Niru and Ganguli, Surya},
urldate = {2024-05-19},
date = {2015-11-18},
eprinttype = {arxiv},
eprint = {1503.03585 [cond-mat, q-bio, stat]},
keywords = {Computer Science - Machine Learning, Condensed Matter - Disordered Systems and Neural Networks, Quantitative Biology - Neurons and Cognition, Statistics - Machine Learning},
file = {arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/YUMKKECP/Sohl-Dickstein et al. - 2015 - Deep Unsupervised Learning using Nonequilibrium Th.pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/F96F2JL6/1503.html:text/html},
}
@misc{kingma_auto-encoding_2022,
title = {Auto-Encoding Variational Bayes},
url = {http://arxiv.org/abs/1312.6114},
doi = {10.48550/arXiv.1312.6114},
abstract = {How can we perform efficient inference and learning in directed probabilistic models, in the presence of continuous latent variables with intractable posterior distributions, and large datasets? We introduce a stochastic variational inference and learning algorithm that scales to large datasets and, under some mild differentiability conditions, even works in the intractable case. Our contributions are two-fold. First, we show that a reparameterization of the variational lower bound yields a lower bound estimator that can be straightforwardly optimized using standard stochastic gradient methods. Second, we show that for i.i.d. datasets with continuous latent variables per datapoint, posterior inference can be made especially efficient by fitting an approximate inference model (also called a recognition model) to the intractable posterior using the proposed lower bound estimator. Theoretical advantages are reflected in experimental results.},
number = {{arXiv}:1312.6114},
publisher = {{arXiv}},
author = {Kingma, Diederik P. and Welling, Max},
urldate = {2024-05-19},
date = {2022-12-10},
eprinttype = {arxiv},
eprint = {1312.6114 [cs, stat]},
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning},
file = {arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/W2KMR8B9/Kingma and Welling - 2022 - Auto-Encoding Variational Bayes.pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/6M4IJ4B8/1312.html:text/html},
}
@article{gneiting_strictly_2007,
title = {Strictly Proper Scoring Rules, Prediction, and Estimation},
volume = {102},
issn = {0162-1459, 1537-274X},
url = {http://www.tandfonline.com/doi/abs/10.1198/016214506000001437},
doi = {10.1198/016214506000001437},
pages = {359--378},
number = {477},
journaltitle = {Journal of the American Statistical Association},
shortjournal = {Journal of the American Statistical Association},
author = {Gneiting, Tilmann and Raftery, Adrian E},
urldate = {2024-05-19},
date = {2007-03},
langid = {english},
file = {Gneiting and Raftery - 2007 - Strictly Proper Scoring Rules, Prediction, and Est.pdf:/Users/victormylle/Zotero/storage/UTDSA82K/Gneiting and Raftery - 2007 - Strictly Proper Scoring Rules, Prediction, and Est.pdf:application/pdf},
}

View File

@@ -1,12 +1,12 @@
\relax
\providecommand\hyper@newdestlabel[2]{}
\@writefile{toc}{\contentsline {section}{\numberline {A}Appendix}{51}{appendix.A}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {20}{\ignorespaces Comparison of the autoregressive models with the diffusion model\relax }}{51}{figure.caption.33}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{20}{51}{Comparison of the autoregressive models with the diffusion model\relax }{figure.caption.33}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {21}{\ignorespaces Comparison of the non-autoregressive models with the diffusion model\relax }}{52}{figure.caption.34}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{21}{52}{Comparison of the non-autoregressive models with the diffusion model\relax }{figure.caption.34}{}}
\@writefile{toc}{\contentsline {section}{\numberline {A}Appendix}{57}{appendix.A}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {21}{\ignorespaces Comparison of the autoregressive models with the diffusion model\relax }}{57}{figure.caption.35}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{21}{57}{Comparison of the autoregressive models with the diffusion model\relax }{figure.caption.35}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {22}{\ignorespaces Comparison of the non-autoregressive models with the diffusion model\relax }}{58}{figure.caption.36}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{22}{58}{Comparison of the non-autoregressive models with the diffusion model\relax }{figure.caption.36}{}}
\@setckpt{sections/appendix}{
\setcounter{page}{53}
\setcounter{page}{59}
\setcounter{equation}{8}
\setcounter{enumi}{0}
\setcounter{enumii}{0}
@@ -20,161 +20,38 @@
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{21}
\setcounter{table}{13}
\setcounter{figure}{22}
\setcounter{table}{14}
\setcounter{parentequation}{0}
\setcounter{float@type}{4}
\setcounter{caption@flags}{6}
\setcounter{continuedfloat}{0}
\setcounter{subfigure}{4}
\setcounter{subtable}{0}
\setcounter{tabx@nest}{0}
\setcounter{listtotal}{0}
\setcounter{listcount}{0}
\setcounter{liststart}{0}
\setcounter{liststop}{0}
\setcounter{citecount}{0}
\setcounter{citetotal}{0}
\setcounter{multicitecount}{0}
\setcounter{multicitetotal}{0}
\setcounter{instcount}{24}
\setcounter{maxnames}{2}
\setcounter{minnames}{1}
\setcounter{maxitems}{999}
\setcounter{minitems}{1}
\setcounter{citecounter}{0}
\setcounter{maxcitecounter}{0}
\setcounter{savedcitecounter}{0}
\setcounter{uniquelist}{0}
\setcounter{uniquename}{0}
\setcounter{refsection}{0}
\setcounter{refsegment}{0}
\setcounter{maxextratitle}{0}
\setcounter{maxextratitleyear}{0}
\setcounter{maxextraname}{2}
\setcounter{maxextradate}{0}
\setcounter{maxextraalpha}{0}
\setcounter{abbrvpenalty}{50}
\setcounter{highnamepenalty}{50}
\setcounter{lownamepenalty}{25}
\setcounter{maxparens}{3}
\setcounter{parenlevel}{0}
\setcounter{blx@maxsection}{0}
\setcounter{mincomprange}{10}
\setcounter{maxcomprange}{100000}
\setcounter{mincompwidth}{1}
\setcounter{afterword}{0}
\setcounter{savedafterword}{0}
\setcounter{annotator}{0}
\setcounter{savedannotator}{0}
\setcounter{author}{0}
\setcounter{savedauthor}{0}
\setcounter{bookauthor}{0}
\setcounter{savedbookauthor}{0}
\setcounter{commentator}{0}
\setcounter{savedcommentator}{0}
\setcounter{editor}{0}
\setcounter{savededitor}{0}
\setcounter{editora}{0}
\setcounter{savededitora}{0}
\setcounter{editorb}{0}
\setcounter{savededitorb}{0}
\setcounter{editorc}{0}
\setcounter{savededitorc}{0}
\setcounter{foreword}{0}
\setcounter{savedforeword}{0}
\setcounter{holder}{0}
\setcounter{savedholder}{0}
\setcounter{introduction}{0}
\setcounter{savedintroduction}{0}
\setcounter{namea}{0}
\setcounter{savednamea}{0}
\setcounter{nameb}{0}
\setcounter{savednameb}{0}
\setcounter{namec}{0}
\setcounter{savednamec}{0}
\setcounter{translator}{0}
\setcounter{savedtranslator}{0}
\setcounter{shortauthor}{0}
\setcounter{savedshortauthor}{0}
\setcounter{shorteditor}{0}
\setcounter{savedshorteditor}{0}
\setcounter{narrator}{0}
\setcounter{savednarrator}{0}
\setcounter{execproducer}{0}
\setcounter{savedexecproducer}{0}
\setcounter{execdirector}{0}
\setcounter{savedexecdirector}{0}
\setcounter{with}{0}
\setcounter{savedwith}{0}
\setcounter{labelname}{0}
\setcounter{savedlabelname}{0}
\setcounter{institution}{0}
\setcounter{savedinstitution}{0}
\setcounter{lista}{0}
\setcounter{savedlista}{0}
\setcounter{listb}{0}
\setcounter{savedlistb}{0}
\setcounter{listc}{0}
\setcounter{savedlistc}{0}
\setcounter{listd}{0}
\setcounter{savedlistd}{0}
\setcounter{liste}{0}
\setcounter{savedliste}{0}
\setcounter{listf}{0}
\setcounter{savedlistf}{0}
\setcounter{location}{0}
\setcounter{savedlocation}{0}
\setcounter{organization}{0}
\setcounter{savedorganization}{0}
\setcounter{origlocation}{0}
\setcounter{savedoriglocation}{0}
\setcounter{origpublisher}{0}
\setcounter{savedorigpublisher}{0}
\setcounter{publisher}{0}
\setcounter{savedpublisher}{0}
\setcounter{language}{0}
\setcounter{savedlanguage}{0}
\setcounter{origlanguage}{0}
\setcounter{savedoriglanguage}{0}
\setcounter{citation}{0}
\setcounter{savedcitation}{0}
\setcounter{pageref}{0}
\setcounter{savedpageref}{0}
\setcounter{textcitecount}{0}
\setcounter{textcitetotal}{0}
\setcounter{textcitemaxnames}{0}
\setcounter{biburlbigbreakpenalty}{100}
\setcounter{biburlbreakpenalty}{200}
\setcounter{biburlnumpenalty}{0}
\setcounter{biburlucpenalty}{0}
\setcounter{biburllcpenalty}{0}
\setcounter{smartand}{1}
\setcounter{bbx:relatedcount}{0}
\setcounter{bbx:relatedtotal}{0}
\setcounter{NAT@ctr}{31}
\setcounter{section@level}{0}
\setcounter{Item}{0}
\setcounter{Hfootnote}{0}
\setcounter{bookmark@seq@number}{33}
\setcounter{bookmark@seq@number}{34}
\setcounter{g@acro@QR@int}{0}
\setcounter{g@acro@AQR@int}{0}
\setcounter{g@acro@NAQR@int}{1}
\setcounter{g@acro@GRU@int}{0}
\setcounter{g@acro@LSTM@int}{0}
\setcounter{g@acro@GAN@int}{1}
\setcounter{g@acro@CTSGAN@int}{1}
\setcounter{g@acro@GAN@int}{0}
\setcounter{g@acro@CTSGAN@int}{0}
\setcounter{g@acro@VAE@int}{0}
\setcounter{g@acro@MLP@int}{1}
\setcounter{g@acro@GP@int}{1}
\setcounter{g@acro@MLP@int}{0}
\setcounter{g@acro@GP@int}{0}
\setcounter{g@acro@MSE@int}{4}
\setcounter{g@acro@MAE@int}{4}
\setcounter{g@acro@CRPS@int}{3}
\setcounter{g@acro@TSPA@int}{1}
\setcounter{g@acro@PLF@int}{1}
\setcounter{g@acro@CRPS@int}{2}
\setcounter{g@acro@TSPA@int}{0}
\setcounter{g@acro@PLF@int}{0}
\setcounter{g@acro@NRV@int}{12}
\setcounter{g@acro@PV@int}{0}
\setcounter{g@acro@NP@int}{0}
\setcounter{g@acro@TSO@int}{2}
\setcounter{g@acro@TSO@int}{3}
\setcounter{g@acro@DSO@int}{0}
\setcounter{g@acro@BRP@int}{1}
\setcounter{g@acro@BSP@int}{1}

View File

@@ -1,43 +1,62 @@
\relax
\providecommand\hyper@newdestlabel[2]{}
\@writefile{toc}{\contentsline {section}{\numberline {2}Electricity market}{3}{section.2}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Overview of the most important parties in the electricity market\relax }}{3}{table.caption.1}\protected@file@percent }
\citation{noauthor_geliberaliseerde_nodate}
\citation{noauthor_role_nodate}
\@writefile{toc}{\contentsline {section}{\numberline {2}Electricity market}{4}{section.2}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Overview of the most important parties in the electricity market\relax }}{4}{table.caption.1}\protected@file@percent }
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
\newlabel{tab:parties}{{1}{3}{Overview of the most important parties in the electricity market\relax }{table.caption.1}{}}
\ACRO{recordpage}{BRP}{4}{1}{3}
\ACRO{recordpage}{TSO}{5}{1}{4}
\ACRO{recordpage}{FCR}{6}{1}{5}
\ACRO{recordpage}{BSP}{6}{1}{5}
\ACRO{recordpage}{aFRR}{6}{1}{5}
\ACRO{recordpage}{mFRR}{6}{1}{5}
\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces Prices paid by the BRPs\relax }}{6}{table.caption.2}\protected@file@percent }
\newlabel{tab:imbalance_price}{{2}{6}{Prices paid by the BRPs\relax }{table.caption.2}{}}
\@writefile{toc}{\contentsline {section}{\numberline {3}Generative modeling}{7}{section.3}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Quantile Regression}{7}{subsection.3.1}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Example of a cumulative distribution function and some quantiles. The quantiles are the values below which a certain proportion of observations fall.\relax }}{8}{figure.caption.3}\protected@file@percent }
\newlabel{fig:quantile_example}{{1}{8}{Example of a cumulative distribution function and some quantiles. The quantiles are the values below which a certain proportion of observations fall.\relax }{figure.caption.3}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Example of quantile regression output for one-quarter of the NRV, showing interpolated values for quantiles at 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These quantiles are used to reconstruct the cumulative distribution function.\relax }}{9}{figure.caption.4}\protected@file@percent }
\newlabel{fig:quantile_regression_example}{{2}{9}{Example of quantile regression output for one-quarter of the NRV, showing interpolated values for quantiles at 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These quantiles are used to reconstruct the cumulative distribution function.\relax }{figure.caption.4}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Autoregressive vs Non-Autoregressive models}{10}{subsection.3.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Model Types}{11}{subsection.3.3}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.1}Linear Model}{11}{subsubsection.3.3.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.2}Non-Linear Model}{12}{subsubsection.3.3.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.3}Recurrent Neural Network (RNN)}{12}{subsubsection.3.3.3}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces RNN model input and output visualization\relax }}{13}{figure.caption.5}\protected@file@percent }
\newlabel{fig:rnn_model_visualization}{{3}{13}{RNN model input and output visualization\relax }{figure.caption.5}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Diffusion models}{13}{subsection.3.4}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.1}Overview}{13}{subsubsection.3.4.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.2}Applications}{14}{subsubsection.3.4.2}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Example of the diffusion process. The image of a cat is generated by starting from noise and iteratively denoising the image.\relax }}{14}{figure.caption.6}\protected@file@percent }
\newlabel{fig:diffusion_example}{{4}{14}{Example of the diffusion process. The image of a cat is generated by starting from noise and iteratively denoising the image.\relax }{figure.caption.6}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.3}Generation process}{14}{subsubsection.3.4.3}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Diffusion process}}{16}{figure.caption.7}\protected@file@percent }
\newlabel{fig:diffusion_process}{{5}{16}{Diffusion process}{figure.caption.7}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Evaluation}{16}{subsection.3.5}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Visualization of the CRPS metric\relax }}{17}{figure.caption.8}\protected@file@percent }
\newlabel{fig:crps_visualization}{{6}{17}{Visualization of the CRPS metric\relax }{figure.caption.8}{}}
\newlabel{tab:parties}{{1}{4}{Overview of the most important parties in the electricity market\relax }{table.caption.1}{}}
\ACRO{recordpage}{BRP}{5}{1}{4}
\ACRO{recordpage}{TSO}{6}{1}{5}
\citation{noauthor_fcr_nodate}
\citation{noauthor_afrr_nodate}
\citation{noauthor_mfrr_nodate}
\ACRO{recordpage}{FCR}{7}{1}{6}
\ACRO{recordpage}{BSP}{7}{1}{6}
\ACRO{recordpage}{aFRR}{7}{1}{6}
\ACRO{recordpage}{mFRR}{7}{1}{6}
\citation{elia_tariffs_2022}
\citation{elia_tariffs_2022}
\citation{elia_tariffs_2022}
\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces Prices paid by the BRPs \cite {elia_tariffs_2022}\relax }}{7}{table.caption.2}\protected@file@percent }
\newlabel{tab:imbalance_price}{{2}{7}{Prices paid by the BRPs \cite {elia_tariffs_2022}\relax }{table.caption.2}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Example of a bid ladder. The bid ladder shows the total price for the activation of a certain volume of energy.\relax }}{8}{figure.caption.3}\protected@file@percent }
\newlabel{fig:bid_ladder}{{1}{8}{Example of a bid ladder. The bid ladder shows the total price for the activation of a certain volume of energy.\relax }{figure.caption.3}{}}
\@writefile{toc}{\contentsline {section}{\numberline {3}Generative modeling}{8}{section.3}\protected@file@percent }
\citation{goodfellow_generative_2014}
\citation{kingma_auto-encoding_2022}
\citation{rezende_variational_2015}
\citation{sohl-dickstein_deep_2015}
\citation{koenker_regression_1978}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Quantile Regression}{9}{subsection.3.1}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Example of a cumulative distribution function and some quantiles. The quantiles are the values below which a certain proportion of observations fall.\relax }}{10}{figure.caption.4}\protected@file@percent }
\newlabel{fig:quantile_example}{{2}{10}{Example of a cumulative distribution function and some quantiles. The quantiles are the values below which a certain proportion of observations fall.\relax }{figure.caption.4}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Example of quantile regression output for one-quarter of the NRV, showing interpolated values for quantiles at 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These quantiles are used to reconstruct the cumulative distribution function.\relax }}{11}{figure.caption.5}\protected@file@percent }
\newlabel{fig:quantile_regression_example}{{3}{11}{Example of quantile regression output for one-quarter of the NRV, showing interpolated values for quantiles at 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These quantiles are used to reconstruct the cumulative distribution function.\relax }{figure.caption.5}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Autoregressive vs Non-Autoregressive models}{12}{subsection.3.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Model Types}{13}{subsection.3.3}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.1}Linear Model}{13}{subsubsection.3.3.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.2}Non-Linear Model}{14}{subsubsection.3.3.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.3}Recurrent Neural Network (RNN)}{14}{subsubsection.3.3.3}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces RNN model input and output visualization\relax }}{15}{figure.caption.6}\protected@file@percent }
\newlabel{fig:rnn_model_visualization}{{4}{15}{RNN model input and output visualization\relax }{figure.caption.6}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Diffusion models}{15}{subsection.3.4}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.1}Overview}{15}{subsubsection.3.4.1}\protected@file@percent }
\citation{ho_denoising_2020}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.2}Applications}{16}{subsubsection.3.4.2}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Example of the diffusion process. The image of a cat is generated by starting from noise and iteratively denoising the image.\relax }}{16}{figure.caption.7}\protected@file@percent }
\newlabel{fig:diffusion_example}{{5}{16}{Example of the diffusion process. The image of a cat is generated by starting from noise and iteratively denoising the image.\relax }{figure.caption.7}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.3}Generation process}{16}{subsubsection.3.4.3}\protected@file@percent }
\citation{ho_denoising_2020}
\citation{ho_denoising_2020}
\citation{gneiting_strictly_2007}
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Diffusion process}}{18}{figure.caption.8}\protected@file@percent }
\newlabel{fig:diffusion_process}{{6}{18}{Diffusion process}{figure.caption.8}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Evaluation}{18}{subsection.3.5}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Visualization of the CRPS metric\relax }}{19}{figure.caption.9}\protected@file@percent }
\newlabel{fig:crps_visualization}{{7}{19}{Visualization of the CRPS metric\relax }{figure.caption.9}{}}
\@setckpt{sections/background}{
\setcounter{page}{18}
\setcounter{page}{20}
\setcounter{equation}{7}
\setcounter{enumi}{0}
\setcounter{enumii}{0}
@@ -51,7 +70,7 @@
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{6}
\setcounter{figure}{7}
\setcounter{table}{2}
\setcounter{parentequation}{0}
\setcounter{float@type}{4}
@@ -59,130 +78,7 @@
\setcounter{continuedfloat}{0}
\setcounter{subfigure}{0}
\setcounter{subtable}{0}
\setcounter{tabx@nest}{0}
\setcounter{listtotal}{0}
\setcounter{listcount}{0}
\setcounter{liststart}{0}
\setcounter{liststop}{0}
\setcounter{citecount}{0}
\setcounter{citetotal}{0}
\setcounter{multicitecount}{0}
\setcounter{multicitetotal}{0}
\setcounter{instcount}{1}
\setcounter{maxnames}{2}
\setcounter{minnames}{1}
\setcounter{maxitems}{999}
\setcounter{minitems}{1}
\setcounter{citecounter}{0}
\setcounter{maxcitecounter}{0}
\setcounter{savedcitecounter}{0}
\setcounter{uniquelist}{0}
\setcounter{uniquename}{0}
\setcounter{refsection}{0}
\setcounter{refsegment}{0}
\setcounter{maxextratitle}{0}
\setcounter{maxextratitleyear}{0}
\setcounter{maxextraname}{2}
\setcounter{maxextradate}{0}
\setcounter{maxextraalpha}{0}
\setcounter{abbrvpenalty}{50}
\setcounter{highnamepenalty}{50}
\setcounter{lownamepenalty}{25}
\setcounter{maxparens}{3}
\setcounter{parenlevel}{0}
\setcounter{blx@maxsection}{0}
\setcounter{mincomprange}{10}
\setcounter{maxcomprange}{100000}
\setcounter{mincompwidth}{1}
\setcounter{afterword}{0}
\setcounter{savedafterword}{0}
\setcounter{annotator}{0}
\setcounter{savedannotator}{0}
\setcounter{author}{0}
\setcounter{savedauthor}{0}
\setcounter{bookauthor}{0}
\setcounter{savedbookauthor}{0}
\setcounter{commentator}{0}
\setcounter{savedcommentator}{0}
\setcounter{editor}{0}
\setcounter{savededitor}{0}
\setcounter{editora}{0}
\setcounter{savededitora}{0}
\setcounter{editorb}{0}
\setcounter{savededitorb}{0}
\setcounter{editorc}{0}
\setcounter{savededitorc}{0}
\setcounter{foreword}{0}
\setcounter{savedforeword}{0}
\setcounter{holder}{0}
\setcounter{savedholder}{0}
\setcounter{introduction}{0}
\setcounter{savedintroduction}{0}
\setcounter{namea}{0}
\setcounter{savednamea}{0}
\setcounter{nameb}{0}
\setcounter{savednameb}{0}
\setcounter{namec}{0}
\setcounter{savednamec}{0}
\setcounter{translator}{0}
\setcounter{savedtranslator}{0}
\setcounter{shortauthor}{0}
\setcounter{savedshortauthor}{0}
\setcounter{shorteditor}{0}
\setcounter{savedshorteditor}{0}
\setcounter{narrator}{0}
\setcounter{savednarrator}{0}
\setcounter{execproducer}{0}
\setcounter{savedexecproducer}{0}
\setcounter{execdirector}{0}
\setcounter{savedexecdirector}{0}
\setcounter{with}{0}
\setcounter{savedwith}{0}
\setcounter{labelname}{0}
\setcounter{savedlabelname}{0}
\setcounter{institution}{0}
\setcounter{savedinstitution}{0}
\setcounter{lista}{0}
\setcounter{savedlista}{0}
\setcounter{listb}{0}
\setcounter{savedlistb}{0}
\setcounter{listc}{0}
\setcounter{savedlistc}{0}
\setcounter{listd}{0}
\setcounter{savedlistd}{0}
\setcounter{liste}{0}
\setcounter{savedliste}{0}
\setcounter{listf}{0}
\setcounter{savedlistf}{0}
\setcounter{location}{0}
\setcounter{savedlocation}{0}
\setcounter{organization}{0}
\setcounter{savedorganization}{0}
\setcounter{origlocation}{0}
\setcounter{savedoriglocation}{0}
\setcounter{origpublisher}{0}
\setcounter{savedorigpublisher}{0}
\setcounter{publisher}{0}
\setcounter{savedpublisher}{0}
\setcounter{language}{0}
\setcounter{savedlanguage}{0}
\setcounter{origlanguage}{0}
\setcounter{savedoriglanguage}{0}
\setcounter{citation}{0}
\setcounter{savedcitation}{0}
\setcounter{pageref}{0}
\setcounter{savedpageref}{0}
\setcounter{textcitecount}{0}
\setcounter{textcitetotal}{0}
\setcounter{textcitemaxnames}{0}
\setcounter{biburlbigbreakpenalty}{100}
\setcounter{biburlbreakpenalty}{200}
\setcounter{biburlnumpenalty}{0}
\setcounter{biburlucpenalty}{0}
\setcounter{biburllcpenalty}{0}
\setcounter{smartand}{1}
\setcounter{bbx:relatedcount}{0}
\setcounter{bbx:relatedtotal}{0}
\setcounter{NAT@ctr}{0}
\setcounter{section@level}{0}
\setcounter{Item}{0}
\setcounter{Hfootnote}{0}
@@ -205,7 +101,7 @@
\setcounter{g@acro@NRV@int}{3}
\setcounter{g@acro@PV@int}{0}
\setcounter{g@acro@NP@int}{0}
\setcounter{g@acro@TSO@int}{2}
\setcounter{g@acro@TSO@int}{3}
\setcounter{g@acro@DSO@int}{0}
\setcounter{g@acro@BRP@int}{1}
\setcounter{g@acro@BSP@int}{1}

View File

@@ -1,5 +1,5 @@
\section{Electricity market}
The electricity market consists of many different parties who all work together and want to make a profit in the end. An overview of the most important parties can be found in Table \ref{tab:parties}. Each of them has a different role in the market.
The electricity market consists of many different parties who all work together and want to make a profit in the end. An overview of the most important parties can be found in Table \ref{tab:parties} \cite{noauthor_geliberaliseerde_nodate}.
% table
\begin{table}[h]
@@ -28,7 +28,7 @@ The electricity market consists of many different parties who all work together
The most important aspect of the electricity market is that the grid needs to be balanced at all times. This means that the amount of electricity consumed and generated must be equal at all times. If this is not the case, the grid can become unstable which can lead to blackouts and disrupt equipment. One company is responsible for keeping the grid balanced. This company is called the Transmission System Operator (TSO). In Belgium, this party is Elia. The TSO keeps the grid balanced by activating reserves when needed. These reserves, however, are expensive and need to be paid by the market participants. The prices paid for the activations of these reserves are called the imbalance price.
At every access point of the grid, there is a designated \acf{BRP}. This party may be a producer, major consumer, energy supplier or trader. The BRP must take all reasonable measures to maintain the balance between injections, offtakes and commercial power trades within its portfolio. Each day, the BRP submits a daily balance schedule for the next day to the TSO. This schedule contains the expected physical injections and offtakes from the grid as well as the commercial power trades with other BRPs or other countries. These schedules are forecasts and are not always 100\% accurate. A lot of factors can influence the production and consumption of electricity like the weather, the economy, the time of day etc. The BRP must take all reasonable measures to be balanced on a quarter-hourly basis. This can be done by day-ahead or intra-day trading with other BRPs. If the BRP is not balanced for a certain quarter, it will need to pay the imbalance price for the deviation. The imbalance of a BRP is the quarter-hourly difference between total injections and offtakes from the grid.
At every access point of the grid, there is a designated \acf{BRP}. This party may be a producer, major consumer, energy supplier or trader. The BRP must take all reasonable measures to maintain the balance between injections, offtakes and commercial power trades within its portfolio. Each day, the BRP submits a daily balance schedule for the next day to the TSO. This schedule contains the expected physical injections and offtakes from the grid as well as the commercial power trades with other BRPs or other countries. These schedules are forecasts and are not always 100\% accurate. A lot of factors can influence the production and consumption of electricity like the weather, the economy, the time of day etc. The BRP must take all reasonable measures to be balanced on a quarter-hourly basis. This can be done by day-ahead or intra-day trading with other BRPs. If the BRP is not balanced for a certain quarter, it will need to pay the imbalance price for the deviation. The imbalance of a BRP is the quarter-hourly difference between total injections and offtakes from the grid. \cite{noauthor_role_nodate}
The imbalance price, which is a crucial factor in the management of electricity grids, is set by the Transmission System Operator (TSO). This price is calculated based on the total imbalance within the grid. The net regulation volume (NRV) plays a key role in this process. The NRV represents the amount of energy that Elia, the TSO for Belgium, utilizes to ensure the stability and balance of the electricity grid within the Elia control area.
@@ -41,13 +41,13 @@ The Transmission System Operator (TSO) can activate reserves to maintain grid st
Elia, the \acf{TSO} in Belgium, maintains grid stability by activating three types of reserves, each designed to address specific conditions of imbalance. These reserves are crucial for ensuring that the electricity supply continuously meets the demand, thereby maintaining the frequency within the required operational limits. The reserves include:
1) \textbf{ \acf{FCR}} \\
FCR is a reserve that responds automatically to frequency deviations in the grid. The reserve responds automatically in seconds and provides a proportional response to the frequency deviation. Elia must provide a minimal share of this volume within the Belgian control area. This type of volume can also be offered by the \acsp{BSP}.
FCR is a reserve that responds automatically to frequency deviations in the grid. The reserve responds automatically in seconds and provides a proportional response to the frequency deviation. Elia must provide a minimal share of this volume within the Belgian control area. This type of volume can also be offered by the \acsp{BSP}. \cite{noauthor_fcr_nodate}
2) \textbf{ \acf{aFRR}} \\
aFRR is the second reserve that Elia can activate to restore the frequency to 50Hz. The aFRR is activated when the FCR is not sufficient to restore the frequency. Every 4 seconds, Elia sends a set-point to the BSPs. The BSPs use this set-point to adjust their production or consumption. The BSPs have a 7.5-minute window to activate the full requested energy volume. This reserve can also be offered by the BSPs.
aFRR is the second reserve that Elia can activate to restore the frequency to 50Hz. The aFRR is activated when the FCR is not sufficient to restore the frequency. Every 4 seconds, Elia sends a set point to the BSPs. The BSPs use this set-point to adjust their production or consumption. The BSPs have a 7.5-minute window to activate the full requested energy volume. This reserve can also be offered by the BSPs. \cite{noauthor_afrr_nodate}
3) \textbf{ \acf{mFRR}} \\
Sometimes the FCR and aFRR are not enough to restore the imbalance between generation and consumption. Elia activates the mFRR manually and the requested energy volume is to be activated in 15 minutes. This reserve is the slowest and is used when the other reserves are not sufficient. This reserve can also be offered by the BSPs.
Sometimes the FCR and aFRR are not enough to restore the imbalance between generation and consumption. Elia activates the mFRR manually and the requested energy volume is to be activated in 15 minutes. This reserve is the slowest and is used when the other reserves are not sufficient. This reserve can also be offered by the BSPs. \cite{noauthor_mfrr_nodate}
The order in which the reserves are activated is FCR, aFRR, and mFRR. The reserves are activated in this order because of the response time of the reserves. The FCR is the fastest reserve and can respond automatically in seconds. The aFRR is the second reserve and can respond in 7.5 minutes. The mFRR is the slowest reserve and can respond in 15 minutes. The reserves are activated in this order to ensure that the grid remains stable and that the frequency remains within the required operational limits.
@@ -58,7 +58,7 @@ Elia selects the bids based on the order of activation and then the price. The h
\item \textbf{Positive SI + Positive BRP Imbalance }\\
This means that the BRP injects more energy into the grid than it takes out. The BRP has a positive imbalance. The System Imbalance is also positive which means that the grid has a surplus of injections. The BRP will need to pay Elia for the surplus injections. The price paid by the BRP is the Marginal price of downward activation (MDP) minus an extra parameter \(\alpha\).
\item \textbf{Positive SI + Negative BRP Imbalance }\\
The BRP takes more energy out of the grid than it injects. The BRP has a negative imbalance. The System Imbalance is positive which means that the grid has a surplus of injections. Elia will need to downward activate reserves to balance the grid. Elia needs to pay the BRP for the surplus of offtakes. The price paid by Elia is the Marginal price of downward activation (MIP) minus an extra parameter \(\alpha\).
The BRP takes more energy out of the grid than it injects. The BRP has a negative imbalance. The System Imbalance is positive which means that the grid has a surplus of injections. Elia will need to downward activate reserves to balance the grid. Elia needs to pay the BRP for the surplus of offtakes. The price paid by Elia is the Marginal price of downward activation (MDP) minus an extra parameter \(\alpha\).
\item \textbf{Negative SI + Positive BRP Imbalance }\\
The BRP injects more energy into the grid than it takes out. The BRP has a positive imbalance. The System Imbalance is negative which means that the grid has a deficit of injections. Elia will need to upward activate reserves to balance the grid. Elia needs to pay the BRP for the surplus of injections. The price paid by Elia is the Marginal price of upward activation (MIP) plus an extra parameter \(\alpha\).
\item \textbf{Negative SI + Negative BRP Imbalance }\\
@@ -78,19 +78,25 @@ Elia selects the bids based on the order of activation and then the price. The h
\textbf{Negative} & MDP - \(\alpha\) & MIP + \(\alpha\) \\
\hline
\end{tabular}
\caption{Prices paid by the BRPs}
\caption{Prices paid by the BRPs \cite{elia_tariffs_2022}}
\label{tab:imbalance_price}
\end{table}
The imbalance price calculation includes the following variables: \\
- MDP: Marginal price of downward activation \\
- MIP: Marginal price of upward activation \\
- \(\alpha\): Extra parameter dependent on System Imbalance \\
\\
- \(\alpha\): Extra parameter dependent on System Imbalance
TODO: Add more information about the imbalance price calculation, alpha?
The formulas used to calculate the imbalance price can change. Elia publishes the tariffs and formulas used to calculate the imbalance price \cite{elia_tariffs_2022}.
Given the bids of the BSPs for a certain quarter or day and knowing System Imbalance, the imbalance price can be reconstructed using the calculation provided by Elia. During this thesis, the system imbalance is assumed to be almost the same as the Net Regulation Volume. This is a simplification but it is a good approximation. The goal of this thesis is to model the Net Regulation Volume which can then be used to reconstruct the imbalance price and to make decisions on when to buy or sell electricity.
Given the bids of the BSPs for a certain quarter or day and knowing System Imbalance, the imbalance price can be reconstructed using the calculation provided by Elia. During this thesis, the system imbalance is assumed to be almost the same as the Net Regulation Volume. This is a simplification but it is a good approximation. The goal of this thesis is to model the Net Regulation Volume which can then be used to reconstruct the imbalance price and to make decisions on when to buy or sell electricity. To reconstruct the imbalance price from the NRV value for a certain quarter, the bids of the BSPs are needed. These bids can be transformed into a bid ladder. This bid ladder aggregates the bids of the BSPs and shows the total price for the activation of a certain volume of energy. This way, the highest marginal prices can easily be determined for the activation of a certain volume of energy. A bid ladder is shown in Figure \ref{fig:bid_ladder}.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{images/bid_ladder.png}
\caption{Example of a bid ladder. The bid ladder shows the total price for the activation of a certain volume of energy.}
\label{fig:bid_ladder}
\end{figure}
\section{Generative modeling}
Forecasting the imbalance price is a difficult task. The price is influenced by many different factors like the weather, time of day, ... but also by the formulas used by the TSO to calculate the imbalance price. The formulas can change which results in a different imbalance price distribution. This makes it hard to train a model to forecast the imbalance price using historical data. Another method to forecast the imbalance price is to forecast the Net Regulation Volume (NRV) and then use the formulas provided by the TSO to calculate the imbalance price. This way, the model does not need to learn the imbalance price distribution but only the NRV distribution.
@@ -103,16 +109,16 @@ In this thesis, generative modeling can be used to model the NRV of the Belgian
There exist many different types of generative models. Some of the most popular ones are:
\begin{itemize}
\item Generative Adversarial Networks (GANs)
\item Variational Autoencoders (VAEs)
\item Normalizing Flows
\item Diffusion models
\item Generative Adversarial Networks (GANs) \cite{goodfellow_generative_2014}
\item Variational Autoencoders (VAEs) \cite{kingma_auto-encoding_2022}
\item Normalizing Flows \cite{rezende_variational_2015}
\item Diffusion models \cite{sohl-dickstein_deep_2015}
\end{itemize}
\subsection{Quantile Regression}
Any feedforward neural network can also be used to output distributions for the target values. For example, if the distribution is assumed to be normal, the model can output the mean and the variance of the target value. This way, the model can output a distribution for the target value instead of a single forecast value. The outputted distribution allows for multiple samples to be drawn from the distribution. This can be used to generate multiple full-day generations of the NRV.
This method requires that the distributions of the target values be known in advance, or at least assumed. However, it is common for these distributions to be unknown. Fortunately, there is an alternative approach that can estimate the distribution of the target values without prior knowledge of the distribution. This technique is known as quantile regression.
This method requires that the distributions of the target values be known in advance, or at least assumed. However, it is common for these distributions to be unknown. Fortunately, there is an alternative approach that can estimate the distribution of the target values without prior knowledge of the distribution. This technique is known as quantile regression, introduced by Koenker and Bassett \cite{koenker_regression_1978}.
Quantile regression is a method that uses feedforward neural networks to estimate multiple quantiles of the target values. A quantile is a statistical value of a random variable below which a certain proportion of observations fall. For example, the 25th quantile is the value below which 25\% of the observations fall. By estimating multiple quantiles using quantile regression, the distribution of the target values can be reconstructed. For each quarter of the day, the quantiles of the NRV are estimated by the model and used to reconstruct the distributions of the NRV. For each quarter of the day, a distribution can be reconstructed and samples can be drawn from this distribution. This way, multiple full-day generations of the NRV can be generated.
@@ -123,10 +129,8 @@ Quantile regression is a method that uses feedforward neural networks to estimat
\label{fig:quantile_example}
\end{figure}
The model outputs quantiles that can be used to reconstruct the cumulative distribution function of a target NRV value. This distribution can then be used to sample the NRV value for a quarter. An example of the output of a quantile regression model is shown in figure \ref{fig:quantile_regression_example}. The output values of the different quantiles are plotted and interpolated to get the cumulative distribution function. In this thesis, the quantiles used are 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These are chosen to get a good approximation of the cumulative distribution function. More quantiles at the tails of the distribution are used because the edges of the distribution are more important for the imbalance price calculation.
% TODO: edges important?
The model outputs quantiles that can be used to reconstruct the cumulative distribution function of a target NRV value. This distribution can then be used to sample the NRV value for a quarter. An example of the output of a quantile regression model is shown in figure \ref{fig:quantile_regression_example}. The output values of the different quantiles are plotted and interpolated to get the cumulative distribution function. In this thesis, the quantiles used are 1\%, 5\%, 10\%, 15\%, 30\%, 40\%, 50\%, 60\%, 70\%, 85\%, 90\%, 95\%, and 99\%. These are chosen to get a good approximation of the cumulative distribution function. More quantiles at the tails of the distribution are used because the edges of the distribution are important. They capture extreme outcomes, which are crucial for risk management, decision-making under uncertainty, and ensuring model robustness and accuracy.
TODO: figure goes under 0, maybe use other values or other interpolation? + inverse the values to real values
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{images/quantile_regression/reconstructed_cdf.png}
@@ -244,7 +248,7 @@ The input feature structure is designed to provide the model with a comprehensiv
Diffusion models are a type of probabilistic model designed to generate high-quality, diverse samples from complex data distributions. The way this type of model is trained is unique. The model is trained to reverse an iterative noise process that is applied to the data. This process is called the diffusion process. The model denoises the data in each iteration. During the training, the model learns to reverse the diffusion process. A training sample is transformed into a noise sample by applying the diffusion process. The model is then trained to recover the original sample from the noise sample. The model is trained to maximize the likelihood of the data given the noise. By doing this, the model learns to generate samples from the data distribution. Starting from the noise, the model can generate samples that look like the data. The model can also be conditioned on additional information to generate samples that follow other distributions.
\subsubsection{Applications}
Diffusion models gained popularity in the field of computer vision. They are used for inpainting, super-resolution, image generation, image editing etc. The paper introducing "Denoising Diffusion Probabilistic Models" (DDPM) \parencite{ho_denoising_2020} showed that diffusion models can achieve state-of-the-art results in image generation. This type of model was then applied to other fields like text generation, audio generation etc. The most popular application of diffusion models is still image generation. Many different models and products exist that make use of diffusion models to generate images. Some examples are DALL·E, Stable Diffusion, Midjourney, etc. These models can generate or edit images based on a given text description.
Diffusion models gained popularity in the field of computer vision. They are used for inpainting, super-resolution, image generation, image editing etc. The paper introducing "Denoising Diffusion Probabilistic Models" (DDPM) by \citet{ho_denoising_2020} showed that diffusion models can achieve state-of-the-art results in image generation. This type of model was then applied to other fields like text generation, audio generation etc. The most popular application of diffusion models is still image generation. Many different models and products exist that make use of diffusion models to generate images. Some examples are DALL·E, Stable Diffusion, Midjourney, etc. These models can generate or edit images based on a given text description.
This method can also be applied to other fields like audio generation, text generation etc. In this thesis, diffusion models are explored to model time series data conditioned on additional information. A small example of the diffusion process is shown in Figure \ref{fig:diffusion_example}. An image of a cat is generated by starting from noise and iteratively denoising the image.
@@ -276,7 +280,6 @@ The generation process is quite different in comparison to other models. For exa
In the reverse process, each step aims to undo the diffusion by estimating what the previous, less noisy state might have been. This is done using a series of conditional Gaussian distributions $p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t)$. For each of these Gaussians, a neural network with parameters $\theta$ is used to estimate the mean $\mu_{\theta}(\mathbf{x}_t, t)$ and the covariance $\Sigma_{\theta}(\mathbf{x}_t, t)$ of the distribution. The joint distribution $p_{\theta}(\mathbf{x}_{0:T})$ is then the product the marginal distribution of the last timestep $p(\mathbf{x}_T)$ and the conditional distributions $p_{\theta}(\mathbf{x}_{t-1}|\mathbf{x}_t)$ for each timestep.
\item \textbf{Training} \\
TODO: explain better! \\
The model training is done by optimizing the variational bound of the negative log-likelihood. This is also called the evidence lower bound (ELBO) in the context of generative models.
\begin{align*}
\log p(x) \geq & \mathbb{E}_q \left[ \log p_{\theta} (x_0 | x_1) | x_1 , x_0 \right] \\
@@ -295,7 +298,7 @@ The diffusion process can be seen in Figure \ref{fig:diffusion_process}. The mod
\begin{figure}[h]
\centering
\includegraphics[width=0.8\textwidth]{images/diffusion/diffusion_graphical_model.png}
\caption[Diffusion process]{Diffusion process \parencite{ho2020denoising}.}
\caption[Diffusion process]{Diffusion process \cite{ho_denoising_2020}.}
\label{fig:diffusion_process}
\end{figure}
@@ -319,7 +322,7 @@ MSE is more sensitive to outliers than MAE because it squares the error between
MSE = \frac{1}{N} \sum_{i=1}^{N} \frac{1}{96} \sum_{j=1}^{96} (y_{ij} - \hat{y}_{ij})^2
\end{equation}
The MAE and MSE metrics do not compare the distribution of the NRV to the real NRV value but only take into account the sampled values. Evaluating the outputted distribution for the NRV must be done differently. The Continuous Ranked Probability Score (CRPS) can be used to evaluate the distribution to the real NRV value. The CRPS metric is used to evaluate the accuracy of the predicted cumulative distribution function. The CRPS can be seen as a generalization of the MAE for probabilistic forecasts. The formula for the CRPS is:
The MAE and MSE metrics do not compare the distribution of the NRV to the real NRV value but only take into account the sampled values. Evaluating the outputted distribution for the NRV must be done differently. The Continuous Ranked Probability Score (CRPS) \cite{gneiting_strictly_2007} can be used to evaluate the distribution to the real NRV value. The CRPS metric is used to evaluate the accuracy of the predicted cumulative distribution function. The CRPS can be seen as a generalization of the MAE for probabilistic forecasts. The formula for the CRPS is:
\begin{equation}
CRPS(F, x) = \int_{-\infty}^{\infty} (F(y) - \mathbbm{1}(y - x))^2 \, dy
@@ -337,10 +340,9 @@ The MAE and MSE metrics do not compare the distribution of the NRV to the real N
The mean CRPS can be calculated over the different days to get a single value. The lower this value, the better the NRV is modeled. The CRPS metric can be visualized as shown in figure \ref{fig:crps_visualization}. The CRPS is the area between the predicted cumulative distribution function and the Heavyside function. The lower the area between the curves, the better the NRV is modeled.
TODO: improve visualisation? -> echte NRV + y as cummulative prob
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{images/quantile_regression/crps_visualization.png}
\includegraphics[width=\textwidth]{images/quantile_regression/crps_visualization.png}
\caption{Visualization of the CRPS metric}
\label{fig:crps_visualization}
\end{figure}

View File

@@ -1,12 +1,14 @@
\relax
\providecommand\hyper@newdestlabel[2]{}
\citation{commission_for_electricity_and_gas_regulation_creg_study_2023}
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{2}{section.1}\protected@file@percent }
\ACRO{recordpage}{TSO}{3}{1}{2}
\ACRO{recordpage}{TSO}{3}{1}{2}
\ACRO{recordpage}{NRV}{3}{1}{2}
\ACRO{recordpage}{NRV}{3}{1}{2}
\ACRO{recordpage}{NRV}{3}{1}{2}
\ACRO{recordpage}{NRV}{4}{1}{3}
\ACRO{recordpage}{NRV}{4}{1}{3}
\@setckpt{sections/introduction}{
\setcounter{page}{3}
\setcounter{page}{4}
\setcounter{equation}{0}
\setcounter{enumi}{0}
\setcounter{enumii}{0}
@@ -28,130 +30,7 @@
\setcounter{continuedfloat}{0}
\setcounter{subfigure}{0}
\setcounter{subtable}{0}
\setcounter{tabx@nest}{0}
\setcounter{listtotal}{0}
\setcounter{listcount}{0}
\setcounter{liststart}{0}
\setcounter{liststop}{0}
\setcounter{citecount}{0}
\setcounter{citetotal}{0}
\setcounter{multicitecount}{0}
\setcounter{multicitetotal}{0}
\setcounter{instcount}{0}
\setcounter{maxnames}{2}
\setcounter{minnames}{1}
\setcounter{maxitems}{999}
\setcounter{minitems}{1}
\setcounter{citecounter}{0}
\setcounter{maxcitecounter}{0}
\setcounter{savedcitecounter}{0}
\setcounter{uniquelist}{0}
\setcounter{uniquename}{0}
\setcounter{refsection}{0}
\setcounter{refsegment}{0}
\setcounter{maxextratitle}{0}
\setcounter{maxextratitleyear}{0}
\setcounter{maxextraname}{2}
\setcounter{maxextradate}{0}
\setcounter{maxextraalpha}{0}
\setcounter{abbrvpenalty}{50}
\setcounter{highnamepenalty}{50}
\setcounter{lownamepenalty}{25}
\setcounter{maxparens}{3}
\setcounter{parenlevel}{0}
\setcounter{blx@maxsection}{0}
\setcounter{mincomprange}{10}
\setcounter{maxcomprange}{100000}
\setcounter{mincompwidth}{1}
\setcounter{afterword}{0}
\setcounter{savedafterword}{0}
\setcounter{annotator}{0}
\setcounter{savedannotator}{0}
\setcounter{author}{0}
\setcounter{savedauthor}{0}
\setcounter{bookauthor}{0}
\setcounter{savedbookauthor}{0}
\setcounter{commentator}{0}
\setcounter{savedcommentator}{0}
\setcounter{editor}{0}
\setcounter{savededitor}{0}
\setcounter{editora}{0}
\setcounter{savededitora}{0}
\setcounter{editorb}{0}
\setcounter{savededitorb}{0}
\setcounter{editorc}{0}
\setcounter{savededitorc}{0}
\setcounter{foreword}{0}
\setcounter{savedforeword}{0}
\setcounter{holder}{0}
\setcounter{savedholder}{0}
\setcounter{introduction}{0}
\setcounter{savedintroduction}{0}
\setcounter{namea}{0}
\setcounter{savednamea}{0}
\setcounter{nameb}{0}
\setcounter{savednameb}{0}
\setcounter{namec}{0}
\setcounter{savednamec}{0}
\setcounter{translator}{0}
\setcounter{savedtranslator}{0}
\setcounter{shortauthor}{0}
\setcounter{savedshortauthor}{0}
\setcounter{shorteditor}{0}
\setcounter{savedshorteditor}{0}
\setcounter{narrator}{0}
\setcounter{savednarrator}{0}
\setcounter{execproducer}{0}
\setcounter{savedexecproducer}{0}
\setcounter{execdirector}{0}
\setcounter{savedexecdirector}{0}
\setcounter{with}{0}
\setcounter{savedwith}{0}
\setcounter{labelname}{0}
\setcounter{savedlabelname}{0}
\setcounter{institution}{0}
\setcounter{savedinstitution}{0}
\setcounter{lista}{0}
\setcounter{savedlista}{0}
\setcounter{listb}{0}
\setcounter{savedlistb}{0}
\setcounter{listc}{0}
\setcounter{savedlistc}{0}
\setcounter{listd}{0}
\setcounter{savedlistd}{0}
\setcounter{liste}{0}
\setcounter{savedliste}{0}
\setcounter{listf}{0}
\setcounter{savedlistf}{0}
\setcounter{location}{0}
\setcounter{savedlocation}{0}
\setcounter{organization}{0}
\setcounter{savedorganization}{0}
\setcounter{origlocation}{0}
\setcounter{savedoriglocation}{0}
\setcounter{origpublisher}{0}
\setcounter{savedorigpublisher}{0}
\setcounter{publisher}{0}
\setcounter{savedpublisher}{0}
\setcounter{language}{0}
\setcounter{savedlanguage}{0}
\setcounter{origlanguage}{0}
\setcounter{savedoriglanguage}{0}
\setcounter{citation}{0}
\setcounter{savedcitation}{0}
\setcounter{pageref}{0}
\setcounter{savedpageref}{0}
\setcounter{textcitecount}{0}
\setcounter{textcitetotal}{0}
\setcounter{textcitemaxnames}{0}
\setcounter{biburlbigbreakpenalty}{100}
\setcounter{biburlbreakpenalty}{200}
\setcounter{biburlnumpenalty}{0}
\setcounter{biburlucpenalty}{0}
\setcounter{biburllcpenalty}{0}
\setcounter{smartand}{1}
\setcounter{bbx:relatedcount}{0}
\setcounter{bbx:relatedtotal}{0}
\setcounter{NAT@ctr}{0}
\setcounter{section@level}{0}
\setcounter{Item}{0}
\setcounter{Hfootnote}{0}
@@ -174,7 +53,7 @@
\setcounter{g@acro@NRV@int}{3}
\setcounter{g@acro@PV@int}{0}
\setcounter{g@acro@NP@int}{0}
\setcounter{g@acro@TSO@int}{1}
\setcounter{g@acro@TSO@int}{2}
\setcounter{g@acro@DSO@int}{0}
\setcounter{g@acro@BRP@int}{0}
\setcounter{g@acro@BSP@int}{0}

View File

@@ -1,7 +1,9 @@
\section{Introduction}
The electricity market is a complex system influenced by numerous factors. The rise of renewable energy sources adds to this complexity, introducing greater volatility compared to traditional energy sources. Renewables, with their unpredictable nature, exacerbate the challenge of maintaining a stable balance between supply and demand. This critical balance is managed by the \ac{TSO}, Elia in Belgium, which utilizes reserves to mitigate any potential shortages or surpluses, directly influencing electricity prices.
Market participants with big enough flexible assets (eg. industrial batteries) can help keep the grid stable. This helps Elia to use fewer of its reserves, which in turn makes the system cheaper for everyone. The market participants are then paid for their services by Elia. The main goal of the market participants is not to help stabilize the grid, but to make a profit. They can do this by buying electricity when it is cheap and selling it when Elia pays a high price for it.
Market participants with big enough flexible assets (e.g., industrial batteries) can help keep the grid stable. This helps Elia to use fewer of its reserves, which in turn makes the system cheaper for everyone. The market participants are then paid for their services by Elia. The main goal of the market participants is not to help stabilize the grid, but to make a profit. They can do this by buying electricity when it is cheap and selling it when Elia pays a high price for it.
The integration of renewable energy sources has significantly increased the complexity and volatility of the electricity market. Unlike traditional energy sources, renewables such as wind and solar power are inherently variable and less predictable. This variability leads to frequent imbalances between electricity supply and demand, necessitating a greater reliance on reserves to stabilize the grid. Consequently, the needed reserves are increasing as the share of renewable energy in the energy mix grows, making it more challenging for the \ac{TSO} to maintain system stability and manage costs. For instance, the proportion of quarter-hours with negative system imbalances has grown, reflecting the increased volatility introduced by renewables. This increase in volatility directly impacts imbalance prices, often causing them to spike during periods of high renewable generation and drop when renewable output is low. \cite{commission_for_electricity_and_gas_regulation_creg_study_2023}
Forecasting the imbalance price is vital for market participants engaged in buying or selling electricity. It enables them to make informed decisions on the optimal times to buy or sell, aiming to maximize their profits. However, current industry practices often rely on simplistic policies, such as adhering to a fixed price for transactions. This approach is not optimal and overlooks the potential benefits of adaptive policies that consider the forecasted imbalance prices.
@@ -9,4 +11,4 @@ The goal of this thesis is to generatively model the Belgian electricity market.
Forecasting the system imbalance will become increasingly important as the share of renewable energy sources continues to grow.
This thesis can be divided into two main parts. The first part focuses on modeling the \ac{NRV} of the Belgian electricity market for the next day. This modeling is conditioned on multiple inputs that can be obtained from data provided by Elia. The second part of the thesis focuses on optimizing a simple policy using the \ac{NRV} generations for the next day. The policy tries to maximize profit by charging and discharging a battery and thereby buying and selling electricity on the market. Multiple models are trained and tested to model the \ac{NRV} and compared to each other based on their profit optimization.
This thesis can be divided into two main parts. The first part focuses on modeling the \ac{NRV} of the Belgian electricity market for the next day. This modeling is conditioned on multiple inputs that can be obtained from data provided by Elia. The second part of the thesis focuses on optimizing a simple policy using the \ac{NRV} generations for the next day. The policy tries to maximize profit by charging and discharging a battery and thereby buying and selling electricity on the market. Multiple models are trained and tested to model the \ac{NRV} and compared to each other based on their profit optimization.

View File

@@ -1,17 +1,20 @@
\relax
\providecommand\hyper@newdestlabel[2]{}
\@writefile{toc}{\contentsline {section}{\numberline {5}Literature Study}{20}{section.5}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Electricity Price Forecasting}{20}{subsection.5.1}\protected@file@percent }
\ACRO{recordpage}{GAN}{21}{1}{20}
\ACRO{recordpage}{CTSGAN}{21}{1}{20}
\ACRO{recordpage}{MLP}{22}{1}{21}
\ACRO{recordpage}{GP}{22}{1}{21}
\ACRO{recordpage}{TSPA}{22}{1}{21}
\ACRO{recordpage}{PLF}{22}{1}{21}
\ACRO{recordpage}{CRPS}{22}{1}{21}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Policies for Battery Optimization}{21}{subsection.5.2}\protected@file@percent }
\citation{weron_electricity_2014}
\citation{poggi_electricity_2023}
\citation{lago_forecasting_2018}
\citation{hagfors_modeling_2016}
\citation{lu_scenarios_2022}
\citation{dumas_deep_2022}
\citation{rasul_autoregressive_2021}
\@writefile{toc}{\contentsline {section}{\numberline {5}Literature Study}{22}{section.5}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Day-Ahead Electricity Price Forecasting}{22}{subsection.5.1}\protected@file@percent }
\citation{dumas_probabilistic_2019}
\citation{narajewski_probabilistic_2022}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Imbalance Price Forecasting}{23}{subsection.5.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {5.3}Policies for Battery Optimization}{23}{subsection.5.3}\protected@file@percent }
\@setckpt{sections/literature_study}{
\setcounter{page}{22}
\setcounter{page}{24}
\setcounter{equation}{7}
\setcounter{enumi}{0}
\setcounter{enumii}{0}
@@ -21,11 +24,11 @@
\setcounter{mpfootnote}{0}
\setcounter{part}{0}
\setcounter{section}{5}
\setcounter{subsection}{2}
\setcounter{subsection}{3}
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{6}
\setcounter{figure}{7}
\setcounter{table}{2}
\setcounter{parentequation}{0}
\setcounter{float@type}{4}
@@ -33,153 +36,30 @@
\setcounter{continuedfloat}{0}
\setcounter{subfigure}{0}
\setcounter{subtable}{0}
\setcounter{tabx@nest}{0}
\setcounter{listtotal}{0}
\setcounter{listcount}{0}
\setcounter{liststart}{0}
\setcounter{liststop}{0}
\setcounter{citecount}{0}
\setcounter{citetotal}{0}
\setcounter{multicitecount}{0}
\setcounter{multicitetotal}{0}
\setcounter{instcount}{7}
\setcounter{maxnames}{2}
\setcounter{minnames}{1}
\setcounter{maxitems}{999}
\setcounter{minitems}{1}
\setcounter{citecounter}{0}
\setcounter{maxcitecounter}{0}
\setcounter{savedcitecounter}{0}
\setcounter{uniquelist}{0}
\setcounter{uniquename}{0}
\setcounter{refsection}{0}
\setcounter{refsegment}{0}
\setcounter{maxextratitle}{0}
\setcounter{maxextratitleyear}{0}
\setcounter{maxextraname}{2}
\setcounter{maxextradate}{0}
\setcounter{maxextraalpha}{0}
\setcounter{abbrvpenalty}{50}
\setcounter{highnamepenalty}{50}
\setcounter{lownamepenalty}{25}
\setcounter{maxparens}{3}
\setcounter{parenlevel}{0}
\setcounter{blx@maxsection}{0}
\setcounter{mincomprange}{10}
\setcounter{maxcomprange}{100000}
\setcounter{mincompwidth}{1}
\setcounter{afterword}{0}
\setcounter{savedafterword}{0}
\setcounter{annotator}{0}
\setcounter{savedannotator}{0}
\setcounter{author}{0}
\setcounter{savedauthor}{0}
\setcounter{bookauthor}{0}
\setcounter{savedbookauthor}{0}
\setcounter{commentator}{0}
\setcounter{savedcommentator}{0}
\setcounter{editor}{0}
\setcounter{savededitor}{0}
\setcounter{editora}{0}
\setcounter{savededitora}{0}
\setcounter{editorb}{0}
\setcounter{savededitorb}{0}
\setcounter{editorc}{0}
\setcounter{savededitorc}{0}
\setcounter{foreword}{0}
\setcounter{savedforeword}{0}
\setcounter{holder}{0}
\setcounter{savedholder}{0}
\setcounter{introduction}{0}
\setcounter{savedintroduction}{0}
\setcounter{namea}{0}
\setcounter{savednamea}{0}
\setcounter{nameb}{0}
\setcounter{savednameb}{0}
\setcounter{namec}{0}
\setcounter{savednamec}{0}
\setcounter{translator}{0}
\setcounter{savedtranslator}{0}
\setcounter{shortauthor}{0}
\setcounter{savedshortauthor}{0}
\setcounter{shorteditor}{0}
\setcounter{savedshorteditor}{0}
\setcounter{narrator}{0}
\setcounter{savednarrator}{0}
\setcounter{execproducer}{0}
\setcounter{savedexecproducer}{0}
\setcounter{execdirector}{0}
\setcounter{savedexecdirector}{0}
\setcounter{with}{0}
\setcounter{savedwith}{0}
\setcounter{labelname}{0}
\setcounter{savedlabelname}{0}
\setcounter{institution}{0}
\setcounter{savedinstitution}{0}
\setcounter{lista}{0}
\setcounter{savedlista}{0}
\setcounter{listb}{0}
\setcounter{savedlistb}{0}
\setcounter{listc}{0}
\setcounter{savedlistc}{0}
\setcounter{listd}{0}
\setcounter{savedlistd}{0}
\setcounter{liste}{0}
\setcounter{savedliste}{0}
\setcounter{listf}{0}
\setcounter{savedlistf}{0}
\setcounter{location}{0}
\setcounter{savedlocation}{0}
\setcounter{organization}{0}
\setcounter{savedorganization}{0}
\setcounter{origlocation}{0}
\setcounter{savedoriglocation}{0}
\setcounter{origpublisher}{0}
\setcounter{savedorigpublisher}{0}
\setcounter{publisher}{0}
\setcounter{savedpublisher}{0}
\setcounter{language}{0}
\setcounter{savedlanguage}{0}
\setcounter{origlanguage}{0}
\setcounter{savedoriglanguage}{0}
\setcounter{citation}{0}
\setcounter{savedcitation}{0}
\setcounter{pageref}{0}
\setcounter{savedpageref}{0}
\setcounter{textcitecount}{0}
\setcounter{textcitetotal}{0}
\setcounter{textcitemaxnames}{0}
\setcounter{biburlbigbreakpenalty}{100}
\setcounter{biburlbreakpenalty}{200}
\setcounter{biburlnumpenalty}{0}
\setcounter{biburlucpenalty}{0}
\setcounter{biburllcpenalty}{0}
\setcounter{smartand}{1}
\setcounter{bbx:relatedcount}{0}
\setcounter{bbx:relatedtotal}{0}
\setcounter{NAT@ctr}{0}
\setcounter{section@level}{0}
\setcounter{Item}{0}
\setcounter{Hfootnote}{0}
\setcounter{bookmark@seq@number}{20}
\setcounter{bookmark@seq@number}{21}
\setcounter{g@acro@QR@int}{0}
\setcounter{g@acro@AQR@int}{0}
\setcounter{g@acro@NAQR@int}{0}
\setcounter{g@acro@GRU@int}{0}
\setcounter{g@acro@LSTM@int}{0}
\setcounter{g@acro@GAN@int}{1}
\setcounter{g@acro@CTSGAN@int}{1}
\setcounter{g@acro@GAN@int}{0}
\setcounter{g@acro@CTSGAN@int}{0}
\setcounter{g@acro@VAE@int}{0}
\setcounter{g@acro@MLP@int}{1}
\setcounter{g@acro@GP@int}{1}
\setcounter{g@acro@MLP@int}{0}
\setcounter{g@acro@GP@int}{0}
\setcounter{g@acro@MSE@int}{0}
\setcounter{g@acro@MAE@int}{0}
\setcounter{g@acro@CRPS@int}{1}
\setcounter{g@acro@TSPA@int}{1}
\setcounter{g@acro@PLF@int}{1}
\setcounter{g@acro@CRPS@int}{0}
\setcounter{g@acro@TSPA@int}{0}
\setcounter{g@acro@PLF@int}{0}
\setcounter{g@acro@NRV@int}{3}
\setcounter{g@acro@PV@int}{0}
\setcounter{g@acro@NP@int}{0}
\setcounter{g@acro@TSO@int}{2}
\setcounter{g@acro@TSO@int}{3}
\setcounter{g@acro@DSO@int}{0}
\setcounter{g@acro@BRP@int}{1}
\setcounter{g@acro@BSP@int}{1}

View File

@@ -2,14 +2,28 @@
% - Literatuur forecasting imbalance price
% - Literatuur policies adhv forecasts
\subsection{Day-Ahead Electricity Price Forecasting}
Forecasting electricity prices is crucial for market participants aiming to make informed decisions and optimize their operations and profits. Since the early 2000s, significant research has focused on predicting day-ahead electricity prices. Initial models relied on time series analysis, using methods such as autoregression and moving averages (ARMA, ARIMA). However, these often fell short in capturing the complex and nonlinear patterns of electricity prices.
\subsection{Electricity Price Forecasting}
Forecasting the electricity price is a challenging task that has been researched extensively. Knowing the future electricity price is crucial for market participants to make informed decisions and optimize their operations and profit. Already since the early 2000s, researchers have been trying to predict the electricity price. The first models were based on time series analysis, but with the rise of machine learning, more advanced models have been developed. A rise in publications on this topic can be observed since 2005. This is described in the literature review by \parencite{weron_electricity_2014}. An overview is given of the evolution of the methods used for electricity price forecasting. A significant shift can be observed towards integrating machine learning techniques with traditional statistical methods. The earliest models were based on time series analysis involving methods like autoregression, moving averages and their combinations (ARMA, ARIMA). These methods are not always able to capture the complex patterns in the electricity price. Therefore, researchers started to use more advanced models like neural networks, support vector machines, and random forests. The combination of statistical and machine learning models is more accurate. The statistical models are used to capture the linear patterns, while the machine learning models are used to capture the more complex non-linear patterns. This results in a more accurate and robust model. The more recent paper \parencite{poggi_electricity_2023} compares the performance of statistical and machine learning methods for electricity price forecasting. The authors use ARIMA and SARIMA as statistical methods and XGBoost as a machine learning method. They also compare the performance of Long Short-Term Memory (LSTM) networks for electricity price forecasting.
With the rise of machine learning, more advanced models have emerged. A notable increase in publications since 2005, highlighted by \citet{weron_electricity_2014}, marks a shift towards integrating machine learning with traditional statistical methods. Early models like ARMA and ARIMA focused on linear patterns, but the complexity of price movements led to the adoption of more sophisticated models. Researchers began using neural networks, support vector machines, and random forests to better capture nonlinear aspects of price behavior.
Because forecasting the electricity price is a challenging task with a lot of uncertainty, other generative methods to model the electricity price were researched. Generative modeling is a type of unsupervised learning that can be used to generate new samples from the same distribution as the training data. This can be used to generate new electricity price samples. The authors of \parencite{lu_scenarios_2022} use \acfp{GAN} to generate new electricity price scenarios. They introduce a deep learning framework called \acf{CTSGAN} to generate electricity price scenarios. This enhances the traditional forecasting models by allowing the generation of a diverse set of potential future scenarios. This capability allows the modeling of the uncertainty in the electricity price. The authors show that the CTSGAN model outperforms traditional forecasting models in terms of forecasting accuracy. Other generative models like normalizing flows can also be used to generate new electricity price samples. The authors of \parencite{dumas_deep_2022} use normalizing flows to generate new electricity price samples. They show that normalizing flow models for electricity price forecasting are more accurate in quality than other generative models like GANs and Variational Autoencoders (VAEs). Not a lot of research has been done on using diffusion models for electricity price forecasting. The authors of \parencite{rasul_autoregressive_2021}, however, show that autoregressive diffusion models can be used for time series forecasting and achieve good results. They apply the model on multiple datasets which includes an electricity price dataset. The use of diffusion models for NRV modeling is further explored in this thesis.
Combining statistical and machine learning models has proven more accurate. Statistical models capture linear patterns, while machine learning models identify complex, nonlinear patterns. This hybrid approach results in robust forecasting models. For instance, \citet{poggi_electricity_2023} compare ARIMA and SARIMA with machine learning methods like XGBoost and Long Short-Term Memory (LSTM) networks.
Most research on forecasting for the electricity market focuses on the day-ahead electricity price. Another important aspect of the electricity market, however, is the imbalance price. Not many papers have been published on forecasting the imbalance price. The authors of \parencite{dumas_probabilistic_2019} describe the forecasting of the imbalance price. They use a two-step approach that is also used in this thesis. First, a forecast is made for the Net Regulation Volume (NRV). This forecast is then converted into an imbalance price forecast using data published by the Transmission System Operator (TSO). The authors compare several methods including a deterministic \acf{MLP}, a probabilistic technique using \acfp{GP} and a \acf{TSPA}. The probabilistic techniques are evaluated using the \acf{PLF} and the \acf{CRPS}. The authors show that the two-step probabilistic approach outperforms other approaches on probabilistic error measures but is less accurate at predicting the precise imbalance prices.
\citet{lago_forecasting_2018} introduce a novel deep learning framework for forecasting electricity prices, comparing traditional algorithms with deep learning approaches. Their results show that deep learning models, such as LSTM and GRU networks, significantly improve predictive accuracy over traditional statistical models. This comprehensive benchmarking study highlights the potential of deep learning models to enhance the accuracy and reliability of electricity price forecasts, making them valuable for market participants.
Given the challenges and uncertainties in forecasting electricity prices, researchers have also tried to model the prices. Generative modeling, an unsupervised learning approach, generates new samples mimicking the training data distribution, helping model price uncertainty. The paper by \citet{hagfors_modeling_2016} uses Quantile Regression to model the electricity prices. This is a method that outputs the quantiles of the distribution of the target variable. This can be used to model the uncertainty in electricity prices.
More advanced modeling techniques can also be used. \citet{lu_scenarios_2022} utilize Generative Adversarial Networks (GANs) to generate new electricity price scenarios for the Australian market, introducing a deep learning framework called CTSGAN. This framework enhances traditional models by generating diverse potential future scenarios, improving accuracy.
Other generative models, such as normalizing flows, have also been used to generate new electricity price samples. \citet{dumas_deep_2022} show that normalizing flow models are more accurate than GANs and Variational Autoencoders (VAEs) for electricity price forecasting. While diffusion models are less researched, \citet{rasul_autoregressive_2021} demonstrate that autoregressive diffusion models can effectively forecast time series, including electricity prices. This thesis further explores the use of diffusion models for NRV (Net Regulation Volume) modeling.
\subsection{Imbalance Price Forecasting}
This thesis focuses on forecasting the imbalance price, which, while related to the day-ahead electricity price, is more volatile and harder to predict. Techniques used for day-ahead price forecasting can be applied to imbalance prices, but the latter presents additional challenges due to its greater variability. Most research on electricity market forecasting has centered on day-ahead prices, with fewer studies addressing imbalance prices.
\citet{dumas_probabilistic_2019} provide a notable study on imbalance price forecasting. They use a two-step approach, first forecasting the Net Regulation Volume (NRV) and then converting this forecast into an imbalance price using data from the Transmission System Operator (TSO). This approach leverages various methods, including a deterministic Multilayer Perceptron (MLP), Gaussian Processes (GP), and a Two-Stage Probabilistic Approach (TSPA). Their findings indicate that the two-step probabilistic approach outperforms other methods in terms of probabilistic error measures, although it is less precise in predicting exact imbalance prices.
Another study by \citet{narajewski_probabilistic_2022} examines short-term forecasting of German imbalance prices using methods such as Lasso regression, Generalized Additive Models for Location, Scale, and Shape (GAMLSS), and probabilistic neural networks. Their results show that while these advanced models do not significantly outperform a simple benchmark in terms of point forecasts, they provide more reliable probabilistic forecasts. This is particularly valuable for market participants who need to manage the inherent uncertainty and volatility in imbalance prices.
TODO: more information?
\subsection{Policies for Battery Optimization}
\subsection{Policies for Battery Optimization}

View File

@@ -1,11 +1,11 @@
\relax
\providecommand\hyper@newdestlabel[2]{}
\@writefile{toc}{\contentsline {section}{\numberline {4}Policies}{18}{section.4}\protected@file@percent }
\newlabel{sec:policies}{{4}{18}{Policies}{section.4}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Baselines}{18}{subsection.4.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Policies based on NRV generations}{19}{subsection.4.2}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {4}Policies}{20}{section.4}\protected@file@percent }
\newlabel{sec:policies}{{4}{20}{Policies}{section.4}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Baselines}{20}{subsection.4.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Policies based on NRV generations}{21}{subsection.4.2}\protected@file@percent }
\@setckpt{sections/policies}{
\setcounter{page}{20}
\setcounter{page}{22}
\setcounter{equation}{7}
\setcounter{enumi}{0}
\setcounter{enumii}{0}
@@ -19,7 +19,7 @@
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{6}
\setcounter{figure}{7}
\setcounter{table}{2}
\setcounter{parentequation}{0}
\setcounter{float@type}{4}
@@ -27,130 +27,7 @@
\setcounter{continuedfloat}{0}
\setcounter{subfigure}{0}
\setcounter{subtable}{0}
\setcounter{tabx@nest}{0}
\setcounter{listtotal}{0}
\setcounter{listcount}{0}
\setcounter{liststart}{0}
\setcounter{liststop}{0}
\setcounter{citecount}{0}
\setcounter{citetotal}{0}
\setcounter{multicitecount}{0}
\setcounter{multicitetotal}{0}
\setcounter{instcount}{1}
\setcounter{maxnames}{2}
\setcounter{minnames}{1}
\setcounter{maxitems}{999}
\setcounter{minitems}{1}
\setcounter{citecounter}{0}
\setcounter{maxcitecounter}{0}
\setcounter{savedcitecounter}{0}
\setcounter{uniquelist}{0}
\setcounter{uniquename}{0}
\setcounter{refsection}{0}
\setcounter{refsegment}{0}
\setcounter{maxextratitle}{0}
\setcounter{maxextratitleyear}{0}
\setcounter{maxextraname}{2}
\setcounter{maxextradate}{0}
\setcounter{maxextraalpha}{0}
\setcounter{abbrvpenalty}{50}
\setcounter{highnamepenalty}{50}
\setcounter{lownamepenalty}{25}
\setcounter{maxparens}{3}
\setcounter{parenlevel}{0}
\setcounter{blx@maxsection}{0}
\setcounter{mincomprange}{10}
\setcounter{maxcomprange}{100000}
\setcounter{mincompwidth}{1}
\setcounter{afterword}{0}
\setcounter{savedafterword}{0}
\setcounter{annotator}{0}
\setcounter{savedannotator}{0}
\setcounter{author}{0}
\setcounter{savedauthor}{0}
\setcounter{bookauthor}{0}
\setcounter{savedbookauthor}{0}
\setcounter{commentator}{0}
\setcounter{savedcommentator}{0}
\setcounter{editor}{0}
\setcounter{savededitor}{0}
\setcounter{editora}{0}
\setcounter{savededitora}{0}
\setcounter{editorb}{0}
\setcounter{savededitorb}{0}
\setcounter{editorc}{0}
\setcounter{savededitorc}{0}
\setcounter{foreword}{0}
\setcounter{savedforeword}{0}
\setcounter{holder}{0}
\setcounter{savedholder}{0}
\setcounter{introduction}{0}
\setcounter{savedintroduction}{0}
\setcounter{namea}{0}
\setcounter{savednamea}{0}
\setcounter{nameb}{0}
\setcounter{savednameb}{0}
\setcounter{namec}{0}
\setcounter{savednamec}{0}
\setcounter{translator}{0}
\setcounter{savedtranslator}{0}
\setcounter{shortauthor}{0}
\setcounter{savedshortauthor}{0}
\setcounter{shorteditor}{0}
\setcounter{savedshorteditor}{0}
\setcounter{narrator}{0}
\setcounter{savednarrator}{0}
\setcounter{execproducer}{0}
\setcounter{savedexecproducer}{0}
\setcounter{execdirector}{0}
\setcounter{savedexecdirector}{0}
\setcounter{with}{0}
\setcounter{savedwith}{0}
\setcounter{labelname}{0}
\setcounter{savedlabelname}{0}
\setcounter{institution}{0}
\setcounter{savedinstitution}{0}
\setcounter{lista}{0}
\setcounter{savedlista}{0}
\setcounter{listb}{0}
\setcounter{savedlistb}{0}
\setcounter{listc}{0}
\setcounter{savedlistc}{0}
\setcounter{listd}{0}
\setcounter{savedlistd}{0}
\setcounter{liste}{0}
\setcounter{savedliste}{0}
\setcounter{listf}{0}
\setcounter{savedlistf}{0}
\setcounter{location}{0}
\setcounter{savedlocation}{0}
\setcounter{organization}{0}
\setcounter{savedorganization}{0}
\setcounter{origlocation}{0}
\setcounter{savedoriglocation}{0}
\setcounter{origpublisher}{0}
\setcounter{savedorigpublisher}{0}
\setcounter{publisher}{0}
\setcounter{savedpublisher}{0}
\setcounter{language}{0}
\setcounter{savedlanguage}{0}
\setcounter{origlanguage}{0}
\setcounter{savedoriglanguage}{0}
\setcounter{citation}{0}
\setcounter{savedcitation}{0}
\setcounter{pageref}{0}
\setcounter{savedpageref}{0}
\setcounter{textcitecount}{0}
\setcounter{textcitetotal}{0}
\setcounter{textcitemaxnames}{0}
\setcounter{biburlbigbreakpenalty}{100}
\setcounter{biburlbreakpenalty}{200}
\setcounter{biburlnumpenalty}{0}
\setcounter{biburlucpenalty}{0}
\setcounter{biburllcpenalty}{0}
\setcounter{smartand}{1}
\setcounter{bbx:relatedcount}{0}
\setcounter{bbx:relatedtotal}{0}
\setcounter{NAT@ctr}{0}
\setcounter{section@level}{0}
\setcounter{Item}{0}
\setcounter{Hfootnote}{0}
@@ -173,7 +50,7 @@
\setcounter{g@acro@NRV@int}{3}
\setcounter{g@acro@PV@int}{0}
\setcounter{g@acro@NP@int}{0}
\setcounter{g@acro@TSO@int}{2}
\setcounter{g@acro@TSO@int}{3}
\setcounter{g@acro@DSO@int}{0}
\setcounter{g@acro@BRP@int}{1}
\setcounter{g@acro@BSP@int}{1}

View File

@@ -2,8 +2,8 @@
As discussed in the background information, the imbalance prices are based on the Net Regulation Volume (NRV). This means that the imbalance prices can be reconstructed from the sampled NRV. Multiple baselines and models will be compared that forecast and model the NRV using different metrics. The data utilized in this thesis is provided by Elia. Elia makes a lot of data public and provides them in quarterly hour or minute intervals. The data used in this thesis is on a quarterly hourly basis. This makes the number of input features and output features way more manageable and makes the training more computationally efficient. A full-day sample of the NRV exists of 96 values. One value for every quarter. Further research could be done using smaller data intervals to see if this improves the models.
\subsection{Data}
Elia offers a variety of data on their website \cite{elia_open_data}. They provide data for the following categories:
(TODO: Relevant? or too much information?)
Elia offers a variety of data on their website \cite{noauthor_welcome_nodate}. They provide data for the following categories:
\begin{itemize}
\item Balancing
\item Transmission
@@ -15,31 +15,28 @@ Elia offers a variety of data on their website \cite{elia_open_data}. They provi
The data useful to model the NRV is scattered over multiple categories. The data used in this thesis is the following:
% TODO: better citations
\begin{itemize}
\item \textbf{Imbalance prices per quarter-hour (Historical data) } \\
% https://opendata.elia.be/explore/dataset/ods047/information/?sort=datetime
This dataset contains the NRV and system imbalance in a quarter-hour interval. The data is available from 01-01-2015 to the present day. The NRV is used as the target variable that needs to be modeled but can also be used as input features. The next day NRV modeling can be conditioned on the real NRV of the previous day. \parencite{noauthor_imbalance_nodate}
This dataset contains the NRV and system imbalance in a quarter-hour interval. The data is available from 01-01-2015 to the present day. The NRV is used as the target variable that needs to be modeled but can also be used as input features. The next day NRV modeling can be conditioned on the real NRV of the previous day. \cite{noauthor_imbalance_nodate}
\item \textbf{Measured and forecasted total load on the Belgian grid (Historical data)} \\
% https://opendata.elia.be/explore/dataset/ods001/table/?sort=datetime
Elia publishes what the total load on the Belgian grid is. This data is also provided in a quarter-hour interval. This data consists of the real load for a certain quarter but also the different forecasted loads. There are day-ahead and week-ahead forecasts available. The total load on the Belgian grid can be used as input features for the NRV modeling. The data is also available from 01-01-2015 to the present day. \parencite{noauthor_measured_nodate}
Elia publishes what the total load on the Belgian grid is. This data is also provided in a quarter-hour interval. This data consists of the real load for a certain quarter but also the different forecasted loads. There are day-ahead and week-ahead forecasts available. The total load on the Belgian grid can be used as input features for the NRV modeling. The data is also available from 01-01-2015 to the present day. \cite{noauthor_measured_nodate}
\item \textbf{Photovoltaic power production estimation and forecast on Belgian grid (Historical)} \\
% https://opendata.elia.be/explore/dataset/ods032/table/?sort=datetime
The photovoltanic power production is also available in a quarter-hour interval. The production is also forecasted day-ahead and week-ahead. The data is provided for each of the provinces in Belgium. Forecasts are also available for the 3 Belgian regions (Flanders, Wallonia, Brussels) and the total Belgian production. The photovoltanic data has been provided since 01-04-2018 and is available to the present day. \parencite{noauthor_photovoltaic_nodate}
The photovoltanic power production is also available in a quarter-hour interval. The production is also forecasted day-ahead and week-ahead. The data is provided for each of the provinces in Belgium. Forecasts are also available for the 3 Belgian regions (Flanders, Wallonia, Brussels) and the total Belgian production. The photovoltanic data has been provided since 01-04-2018 and is available to the present day. \cite{noauthor_photovoltaic_nodate}
\item \textbf{Wind power production estimation and forecast on Belgian grid (Historical)} \\
% https://opendata.elia.be/explore/dataset/ods031/information/
Just as the photovoltanic power production data, wind power production is available in a quarterly-hour interval for each of the provinces and regions in Belgium. This data also includes the real production and the forecasts. An additional column is available that shows if the power is generated offshore or onshore. During this thesis, the offshore and onshore data will be combined. The wind power production data has been provided since 01-01-2015 and is available to the present day. \parencite{noauthor_wind_nodate}
Just as the photovoltanic power production data, wind power production is available in a quarterly-hour interval for each of the provinces and regions in Belgium. This data also includes the real production and the forecasts. An additional column is available that shows if the power is generated offshore or onshore. During this thesis, the offshore and onshore data will be combined. The wind power production data has been provided since 01-01-2015 and is available to the present day. \cite{noauthor_wind_nodate}
\item \textbf{Day-ahead implicit net position (Belgium's balance)} \\
% https://opendata.elia.be/explore/dataset/ods022/information/?sort=datetime
The day-ahead implicit net position shows the total amount of electricity that will be imported or exported to neighboring countries. The trades are done on the day-ahead market and are thus known in advance. This data is available in a quarter-hour interval and has been provided since 01-11-2020 and is available to the present day. The data before 01-11-2020 is also available but only in hourly intervals. \parencite{noauthor_intraday_nodate}
The day-ahead implicit net position shows the total amount of electricity that will be imported or exported to neighboring countries. The trades are done on the day-ahead market and are thus known in advance. This data is available in a quarter-hour interval and has been provided since 01-11-2020 and is available to the present day. The data before 01-11-2020 is also available but only in hourly intervals. \cite{noauthor_intraday_nodate}
\end{itemize}
The open data can be accessed at: \url{https://opendata.elia.be/pages/home/}
A lot of data is available but only the most relevant data needs to be used. Experiments will be done to identify which data and features improve the NRV modeling. The data will be split into a training and test set. The training dataset starts depending on which data features are used but ends on 31-12-2022. The test set starts on 01-01-2023 and ends on 12-12-2023. This makes sure enough data is available to train the models and the test set is large enough to evaluate the models. The year 2023 is chosen as the test set because it is the most recent data available when the thesis experiments were conducted. Using data from 2022 in the test set also does not make a lot of sense because the trained models would be used to predict the future. Data from 2022 is not relevant anymore to evaluate the models. Some data features are missing for certain periods, this is taken into account and those periods are excluded from the training and test set even if the unavailable feature is not used. This makes sure the data is consistent and results can be compared fairly.
\subsection{Quantile Regression}
@@ -55,7 +52,7 @@ A lot of data is available but only the most relevant data needs to be used. Exp
\newpage
\subsection{Policies for battery optimization}
The goal of this thesis is to model the NRV data and use this to optimize the buying and selling of electricity to make a profit. Different models and methods can be used to model the NRV data which can all result in different results. To evaluate the performance of the models, the generated profit on the test set can be used as a metric. First of all, baselines are needed to be able to compare the models to if adding NRV predictions to the policies improves the profit. The baselines are already discussed in the background section. It is very important to compare the baselines and other policies fairly. The profit depends on the number of charge cycles that are used. The more charge cycles a policy uses, the more profit it will be able to make. Using too many charge cycles is bad for the health of the battery. A penalty parameter can be used to penalize the policy when too many charge cycles are used in a day. To fairly compare the policies with different models and baselines, a maximum number of charge cycles is determined for the test period. The test period starts on 01-01-2023 and ends on (TODO: check the end date). Assuming a maximum of 400 charge cycles can be used in a year, only 293 charge cycles can be used during the test period. The penalty parameter is optimized using a simple gradient descent approach to make sure only 293 charge cycles are used during the test period. The profit is then calculated using the optimized penalty parameter.
The goal of this thesis is to model the NRV data and use this to optimize the buying and selling of electricity to make a profit. Different models and methods can be used to model the NRV data which can all result in different results. To evaluate the performance of the models, the generated profit on the test set can be used as a metric. First of all, baselines are needed to be able to compare the models to if adding NRV predictions to the policies improves the profit. The baselines are already discussed in the background section. It is very important to compare the baselines and other policies fairly. The profit depends on the number of charge cycles that are used. The more charge cycles a policy uses, the more profit it will be able to make. Using too many charge cycles is bad for the health of the battery. A penalty parameter can be used to penalize the policy when too many charge cycles are used in a day. To fairly compare the policies with different models and baselines, a maximum number of charge cycles is determined for the test period. The test period starts on 01-01-2023 and ends on 12-12-2023. In this period, there are only 258 days that can be used. Some days have missing input feature data. These days are thus excluded from the test set. Assuming a maximum of 400 charge cycles can be used in a year, only 283 charge cycles can be used during the test period. The penalty parameter is optimized using a simple gradient descent approach to make sure only 283 charge cycles are used during the test period. The profit is then calculated using the optimized penalty parameter.
To evaluate the policies, a battery of 2 MWh is used with a maximum charge and discharge power of 1 MW. The battery is charged and discharged in quarter-hour intervals at the price of that quarter-hour.

View File

@@ -1,13 +1,12 @@
\subsection{Diffusion}
Another type of model that can be used to generatively model the NRV is the diffusion model. This type of model is very popular for image generation. In the context of images, the diffusion model is trained by iteratively adding noise to a training image until there is only noise left. From this noise, the model tries to reverse the diffusion process to get the original image back. To sample new images using this model, a noise vector is sampled and iteratively denoised by the model. This process results in a new image.
Another type of model that can be used to generatively model the NRV is the diffusion model. This type of model is very popular for image generation. In the context of images, the diffusion model is trained by iteratively adding noise to a training image until there is only noise left. From this noise, the model tries to reverse the diffusion process to get the original image back. To sample new images using this model, a noise vector is sampled and iteratively denoised by the model. This process results in a new image.
This training process can also be used for other data types. An image is just a 2D grid of data points. A time series can be seen as a 1D sequence of data points. The diffusion model can thus be trained on the NRV data to generate new samples for a certain day based on a given input.
Once the diffusion model is trained, it can be used efficiently to generate new samples. The model can generate samples in parallel, which is not possible with autoregressive models. It combines the parallel sample generation of the non-autoregressive models while the quarter NRV values still depend on each other. A batch of noise vectors can be sampled and passed through the model in one batch to generate the new samples. The generated samples contain the 96 NRV values for the next day without needing to sample every quarter sequentially.
The model is trained in a completely different way than the quantile regression models. A simple implementation of the Denoising Diffusion Probabilistic Model (DDPM) is used to perform the experiments. More complex implementations with more advanced techniques could be used to improve the results. This is out of the scope of this thesis. The goal is to show that more recent generative models can also be used to model the NRV data. These results can then be compared to the quantile regression models to see if the diffusion model can generate better samples.
The model is trained in a completely different way than the quantile regression models. A simple implementation of the Denoising Diffusion Probabilistic Model (DDPM) \cite{ho_denoising_2020} is used to perform the experiments. More complex implementations with more advanced techniques could be used to improve the results. This is out of the scope of this thesis. The goal is to show that more recent generative models can also be used to model the NRV data. These results can then be compared to the quantile regression models to see if the diffusion model can generate better samples.
% TODO: In background information?
First of all, the model architecture needs to be chosen. The model takes multiple inputs which include the noisy NRV time series, the positional encoding of the current denoising step and the conditional input features. The model needs to predict the noise in the current time series. The time series can then be denoised by subtracting the predicted noise in every denoising step. Multiple model architectures can be used as long as the model can predict the noise in the time series. A simple feedforward neural network is used. The neural network exists of multiple linear layers with ReLu activation functions. To predict the noise in a noisy time series, the current denoising step index must also be provided. This integer is then transformed into a vector using sine and cosine functions. The positional encoding is then concatenated with the noisy time series and the conditional input features. This tensor is then passed through the first linear layer and activation function of the neural network. This results in a tensor of the hidden size that was chosen. Before passing this tensor to the next layer, the positional encoding and conditional input features are concatenated again. This process is repeated until the last layer is reached. This provides every layer in the neural network with the necessary information to predict the noise in the time series. The output of the last layer is then the predicted noise in the time series. The model is trained by minimizing the mean squared error between the predicted noise and the real noise in the time series.
Other hyperparameters that need to be chosen are the number of denoising steps, number of layers and hidden size of the neural network. Experiments are performed to get an insight into the influence these parameters have on the model performance. Results are shown in Table \ref{tab:diffusion_results}.
@@ -15,22 +14,20 @@ Other hyperparameters that need to be chosen are the number of denoising steps,
\begin{figure}[h]
\centering
\begin{tikzpicture}
% First row
% Node for Image 1
\node (img1) {\includegraphics[width=0.45\textwidth]{images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 1_00000000.jpeg}};
% Node for Image 2 with an arrow from Image 1
\node[right=of img1] (img2) {\includegraphics[width=0.45\textwidth]{images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 2_00000000.jpeg}};
\draw[-latex] (img1) -- (img2);
% Second row
% Node for Image 3 below Image 1 with an arrow from Image 2
\node[below=of img1] (img3) {\includegraphics[width=0.45\textwidth]{images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 3_00000000.jpeg}};
% Node for Image 4 with an arrow from Image 3
\node[right=of img3] (img4) {\includegraphics[width=0.45\textwidth]{images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 4_00000000.jpeg}};
\draw[-latex] (img3) -- (img4);
% Complex arrow from Image 2 to Image 3
% Calculate midpoint for the horizontal segment
\coordinate (Middle) at ($(img2.south)!0.5!(img3.north)$);
\draw[-latex] (img2.south) |- (Middle) -| (img3.north);
\end{tikzpicture}
@@ -80,9 +77,9 @@ In Figure \ref{fig:diffusion_intermediates}, multiple intermediate steps of the
\label{tab:diffusion_results}
\end{table}
In Table \ref{tab:diffusion_results}, the results of the experiments for the diffusion model can be seen. The diffusion model that was used is a simple implementation of the Denoising Diffusion Probabilistic Model (DDPM). The model itself exists of multiple linear layers with ReLU activation functions. The diffusion steps were set to 300 for the experiments. This number was determined by doing a few experiments with more and fewer steps. The model performance did not improve when more steps were used. This parameter could be further optimized together with the other parameters to find the best-performing model. This would take a lot of time and is not the goal of this thesis.
In Table \ref{tab:diffusion_results}, the results of the experiments for the diffusion model can be seen. The diffusion model that was used is a simple implementation of the Denoising Diffusion Probabilistic Model (DDPM) \cite{ho_denoising_2020}. The model itself exists of multiple linear layers with ReLU activation functions. The diffusion steps were set to 300 for the experiments. This number was determined by doing a few experiments with more and fewer steps. The model performance did not improve when more steps were used. This parameter could be further optimized together with the other parameters to find the best-performing model. This would take a lot of time and is not the goal of this thesis.
The first observation that can be made is the higher error metrics when more input features are used. This is counterintuitive because the model has more information to generate the samples. The reason for this behavior is not immediately clear. One reason could be that the model conditioning is not optimal. Now the input features are passed to every layer of the model together with the time series that needs to be denoised. The model could be improved by using a more advanced conditioning mechanism like classifier guidance and classifier-free guidance.
The first observation that can be made is the higher error metrics when more input features are used. This is counterintuitive because the model has more information to generate the samples. The reason for this behavior is not immediately clear. One reason could be that the model conditioning is not optimal. Now the input features are passed to every layer of the model together with the time series that needs to be denoised. The model could be improved by using more advanced conditioning mechanisms like classifier guidance \cite{dhariwal_diffusion_2021} and classifier-free guidance \cite{ho_classifier-free_2022}.
\\
\begin{figure}[ht]
\centering

View File

@@ -1,5 +1,4 @@
\subsubsection{Linear Model}
% TODO: explainedi nsection reference ?
The simplest model to be trained for the NRV modeling is the linear model. The linear model is trained using the pinball loss function explained in the section above. The outputs of the model are values for the chosen quantiles. The linear model can be trained in an autoregressive and non-autoregressive way. Both methods will be compared to each other. The linear model is trained using the Adam optimizer with a learning rate of 1e-4. Early stopping is used with a patience of 5 epochs. The linear model is evaluated using the mean squared error (MSE), mean absolute error (MAE), and continuous ranked probability score (CRPS). The influence of the input features is also evaluated by training the models with different input feature sets.
There is a big difference in the number of parameters between the autoregressive linear model and the non-autoregressive linear model. The autoregressive model only needs to output the NRV quantiles for one value while the non-autoregressive model needs to output the NRV quantiles for all the quarters of the day. Assuming thirteen quantiles are used, the autoregressive has 13 output parameters while the non-autoregressive model has 13 * 96 = 1248 output parameters. The total number of parameters for the autoregressive model is 13 * (number of input features + 1) while the total number of parameters for the non-autoregressive model is 13 * 96 * (number of input features + 1). Assuming only the NRV history of the previous day is used as input features, the autoregressive model has 1261 trainable parameters while the non-autoregressive model has 121056 parameters. This is a huge difference in the number of parameters and thus the complexity of the model.
@@ -52,8 +51,6 @@ Providing the autoregressive model with the quarter of the day can be done in mu
The sine and cosine values are then concatenated with the input features. Another method that can be used is adding an embedding layer to the model. The discrete quarter of the day value can then be mapped to a vector. The embedding layer itself is learned during the training process which allows the model to learn patterns between quarters. The length of the embedding vector can be chosen and experimented with. The quarter-of-the-day information is then concatenated with the input features. Other information (eg. day of the week, month, year) can also easily be added to the model using this method by just increasing the size of the embedding layer. The results of the linear model with the quarter information are shown in Table \ref{tab:autoregressive_linear_model_quarter_embedding_baseline_results}.
% TODO: Ask Jonas: Find cleaner way to present this table (remove repitition)
% TODO: Add more time information like day of week, month
\begin{table}[ht]
\centering
\begin{tabular}{@{}lccc@{}}

View File

@@ -3,6 +3,45 @@ The generated full-day samples can be used to improve the profit of the policy.
A low CRPS value does not necessarily mean the policy will generate a high profit. Because of this, the CRPS metric can not be used to evaluate the model during the training phase and use this metric to do early stopping. To fairly evaluate and compare the models, a validation set is split off from the training set. The validation set is used to evaluate the profit of the policy during the training and use this to do early stopping. The last two months of the training set are used as the validation set. This range starts on 01-11-2022 and ends on 31-12-2022. Two months are chosen to make sure enough data is available to have a good estimate of the profit while making sure the validation set is not too large. The policy can be evaluated quite fast on the validation set which is feasible to do during the training after a certain number of epochs.
\begin{table}[ht]
\centering
\begin{adjustbox}{max width=\textwidth}
\begin{tabular}{lccccc}
\toprule
Layers & Test CRPS & Test Profit (€) & Test Charge Cycles & Test Penalty \\
\midrule
\multicolumn{5}{l}{\textbf{Only NRV}} \\
\midrule
Non-Linear (2 - 256) & 86.67 & 190,521.14 & 282.69 & 694.37 \\
Non-Linear (4 - 256) & \textbf{84.64} & 191,305.88 & 283.25 & 904.63 \\
Non-Linear (4 - 512) & 87.77 & 191,374.56 & 282.88 & 1095.56 \\
Non-Linear (8 - 256) & 87.93 & 192,110.72 & 282.56 & 1034.63 \\
Non-Linear (2 - 512) & 87.03 & 190,924.44 & 282.94 & 621.38 \\
Non-Linear (8 - 512) & 100.52 & 195,388.01 & 282.13 & 4153.81 \\
GRU (2 - 256) & 96.66 & \textbf{196,655.36} & 283.81 & 801.44 \\
GRU (4 - 256) & 86.62 & 190,208.00 & 282.31 & 633.87 \\
GRU (2 - 512) & 99.85 & 195,864.99 & 283.06 & 584.72 \\
GRU (4 - 512) & 88.32 & 191,077.33 & 282.19 & 627.13 \\
\midrule
\multicolumn{5}{l}{\textbf{All Features}} \\
\midrule
Non-Linear (2 - 256) & 79.33 & 190,466.07 & 282.56 & 689.89 \\
Non-Linear (4 - 256) & \textbf{80.20} & 192,269.40 & 283.88 & 614.49 \\
Non-Linear (8 - 256) & 84.83 & 192,655.81 & 282.69 & 1029.75 \\
Non-Linear (4 - 512) & 107.99 & \textbf{196,999.03} & 284.88 & 819.43 \\
Non-Linear (8 - 512) & 90.63 & 193,654.29 & 282.69 & 1173.56 \\
GRU (2 - 256) & 94.86 & 190,715.84 & 282.56 & 436.41 \\
GRU (4 - 256) & 92.52 & 189,946.76 & 283.13 & 420.54 \\
GRU (2 - 512) & 87.96 & 191,142.48 & 282.44 & 475.10 \\
GRU (4 - 512) & 90.47 & 193,970.81 & 283.31 & 516.62 \\
\bottomrule
\end{tabular}
\end{adjustbox}
\caption{Comparison of AQR: Non-linear and GRU models using different hyperparameters. Early stopping is done based on the profit using the validation set.}
\label{tab:aqr_model_comparison}
\end{table}
\begin{table}[ht]
\centering
\begin{adjustbox}{max width=\textwidth}
@@ -36,7 +75,7 @@ A low CRPS value does not necessarily mean the policy will generate a high profi
\label{tab:diffusion_policy_comparison}
\end{table}
From the results shown in Table \ref{tab:diffusion_policy_comparison} multiple conclusions can be made. First of all, a lower CRPS metric does not correlate with a higher profit. The CRPS metric captures how well the NRV distribution is modeled. The profit metric captures how well the policy can make a profit using the generated samples. A lower CRPS means a better modeling of the NRV but in the table, it can be seen that the lowest CRPS is achieved by the model with layers 1024 - 1024 and 20 steps. This model also uses all features as input. The model achieves a CRPS of 100.36 with a profit of €215,686.32. The highest profit, however, is achieved by the model with layers 256 - 256 and using 50 steps. This model only uses the NRV as input. This model achieves a CRPS of 139.61 with a profit of €218,170.75. The CRPS here is higher which means a worse modeling of the NRV but the profit is higher.
Table \ref{tab:diffusion_policy_comparison} shows the achieved profit and CRPS for diffusion models with different hyperparameters. Multiple conclusions can be made from this table. First of all, a lower CRPS metric does not correlate with a higher profit. The CRPS metric captures how well the NRV distribution is modeled. The profit metric captures how well the policy can make a profit using the generated samples. A lower CRPS means a better modeling of the NRV but in the table, it can be seen that the lowest CRPS is achieved by the model with layers 1024 - 1024 and 20 steps. This model also uses all features as input. The model achieves a CRPS of 100.36 with a profit of €215,686.32. The highest profit, however, is achieved by the model with layers 256 - 256 and using 50 steps. This model only uses the NRV as input. This model achieves a CRPS of 139.61 with a profit of €218,170.75. The CRPS here is higher which means a worse modeling of the NRV but the profit is higher.
Some examples of the generated samples from the model with the lowest CRPS and the model with the highest profit are shown in Figure \ref{fig:diffusion_policy_comparison_high_low_crps}. A significant difference in the confidence intervals can be observed. The left model clearly shows a better modeling of the NRV compared to the right model. The right model has confidence intervals that range from the minimum to the maximum value of the NRV. There is a high variance in the generated samples. This shows that the policy does not only benefit from good modeling of the NRV but also from a high variance in the generated samples. The model with the highest profit also only uses the NRV as input. This shows that more features do not necessarily result in a higher profit and that better modeling of the NRV does not necessarily improve the achievable profit of the policy.
@@ -82,7 +121,12 @@ A comparison of the baselines and the best-performing models is shown in Table \
\midrule
\multicolumn{4}{l}{\textbf{Models}} \\
\midrule
Diffusion (256 - 256, 50 steps, only NRV) & 218,170.75 & 283.00 & +9.74\% \\
AQR: Non-Linear (4 - 512, All) & 196,999.03 & 284.88 & -0,91\% \\
AQR: GRU (2 - 256, Only NRV) & 196,655.36 & 283.81 & -1.08\% \\
Diffusion (2 - 256, 50 steps, Only NRV) & 218,170.75 & 283.00 & \textbf{+9.74\%} \\
\bottomrule
\end{tabular}
\end{adjustbox}

View File

@@ -1,7 +1,6 @@
\relax
\providecommand\babel@aux[2]{}
\@nameuse{bbl@beforestart}
\abx@aux@refcontext{nyt/global//global/global}
\providecommand\hyper@newdestlabel[2]{}
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
@@ -23,157 +22,133 @@
\babel@aux{english}{}
\@input{sections/introduction.aux}
\@input{sections/background.aux}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{ho_denoising_2020}
\abx@aux@segm{0}{0}{ho_denoising_2020}
\abx@aux@page{1}{14}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{ho2020denoising}
\abx@aux@segm{0}{0}{ho2020denoising}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{ho2020denoising}
\abx@aux@segm{0}{0}{ho2020denoising}
\@input{sections/policies.aux}
\@input{sections/literature_study.aux}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{weron_electricity_2014}
\abx@aux@segm{0}{0}{weron_electricity_2014}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{poggi_electricity_2023}
\abx@aux@segm{0}{0}{poggi_electricity_2023}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{lu_scenarios_2022}
\abx@aux@segm{0}{0}{lu_scenarios_2022}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{dumas_deep_2022}
\abx@aux@segm{0}{0}{dumas_deep_2022}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{rasul_autoregressive_2021}
\abx@aux@segm{0}{0}{rasul_autoregressive_2021}
\abx@aux@page{2}{20}
\abx@aux@page{3}{20}
\abx@aux@page{4}{20}
\abx@aux@page{5}{20}
\abx@aux@page{6}{20}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{dumas_probabilistic_2019}
\abx@aux@segm{0}{0}{dumas_probabilistic_2019}
\abx@aux@page{7}{21}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{elia_open_data}
\abx@aux@segm{0}{0}{elia_open_data}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{noauthor_imbalance_nodate}
\abx@aux@segm{0}{0}{noauthor_imbalance_nodate}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{noauthor_measured_nodate}
\abx@aux@segm{0}{0}{noauthor_measured_nodate}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{noauthor_photovoltaic_nodate}
\abx@aux@segm{0}{0}{noauthor_photovoltaic_nodate}
\@writefile{toc}{\contentsline {section}{\numberline {6}Results \& Discussion}{22}{section.6}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {6.1}Data}{22}{subsection.6.1}\protected@file@percent }
\abx@aux@page{8}{22}
\abx@aux@page{9}{22}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{noauthor_wind_nodate}
\abx@aux@segm{0}{0}{noauthor_wind_nodate}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{noauthor_intraday_nodate}
\abx@aux@segm{0}{0}{noauthor_intraday_nodate}
\abx@aux@page{10}{23}
\abx@aux@page{11}{23}
\abx@aux@page{12}{23}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.2}Quantile Regression}{24}{subsection.6.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.2.1}Linear Model}{24}{subsubsection.6.2.1}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces Linear model results\relax }}{24}{table.caption.9}\protected@file@percent }
\newlabel{tab:linear_model_baseline_results}{{3}{24}{Linear model results\relax }{table.caption.9}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Mean and standard deviation of the NRV values over the quarter of the day\relax }}{26}{figure.caption.10}\protected@file@percent }
\newlabel{fig:nrv_mean_std_over_quarter}{{7}{26}{Mean and standard deviation of the NRV values over the quarter of the day\relax }{figure.caption.10}{}}
\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces Autoregressive linear model results with time features\relax }}{27}{table.caption.11}\protected@file@percent }
\newlabel{tab:autoregressive_linear_model_quarter_embedding_baseline_results}{{4}{27}{Autoregressive linear model results with time features\relax }{table.caption.11}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Comparison of the autoregressive and non-autoregressive linear model samples.\relax }}{27}{figure.caption.12}\protected@file@percent }
\newlabel{fig:linear_model_sample_comparison}{{8}{27}{Comparison of the autoregressive and non-autoregressive linear model samples.\relax }{figure.caption.12}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Samples for two examples from the test set for the autoregressive and non-autoregressive linear model. The real NRV is shown in orange.\relax }}{28}{figure.caption.13}\protected@file@percent }
\newlabel{fig:linear_model_samples_comparison}{{9}{28}{Samples for two examples from the test set for the autoregressive and non-autoregressive linear model. The real NRV is shown in orange.\relax }{figure.caption.13}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{29}{figure.caption.14}\protected@file@percent }
\newlabel{fig:linear_model_quantile_over_underestimation}{{10}{29}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.14}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.2.2}Non-Linear Model}{30}{subsubsection.6.2.2}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {5}{\ignorespaces Non-linear Quantile Regression Model Architecture\relax }}{30}{table.caption.15}\protected@file@percent }
\newlabel{tab:non_linear_model_architecture}{{5}{30}{Non-linear Quantile Regression Model Architecture\relax }{table.caption.15}{}}
\@writefile{lot}{\contentsline {table}{\numberline {6}{\ignorespaces Non-linear quantile regression model results. All the models used a dropout of 0.2 .\relax }}{31}{table.caption.16}\protected@file@percent }
\newlabel{tab:non_linear_model_results}{{6}{31}{Non-linear quantile regression model results. All the models used a dropout of 0.2 .\relax }{table.caption.16}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces Comparison of the autoregressive and non-autoregressive non-linear model examples.\relax }}{32}{figure.caption.17}\protected@file@percent }
\newlabel{fig:non_linear_model_examples}{{11}{32}{Comparison of the autoregressive and non-autoregressive non-linear model examples.\relax }{figure.caption.17}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive non-linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{33}{figure.caption.18}\protected@file@percent }
\newlabel{fig:non-linear_model_quantile_over_underestimation}{{12}{33}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive non-linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.18}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.2.3}GRU Model}{33}{subsubsection.6.2.3}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {7}{\ignorespaces GRU Model Architecture\relax }}{34}{table.caption.19}\protected@file@percent }
\newlabel{tab:gru_model_architecture}{{7}{34}{GRU Model Architecture\relax }{table.caption.19}{}}
\@writefile{lot}{\contentsline {table}{\numberline {8}{\ignorespaces Autoregressive GRU quantile regression model results. All the models used a dropout of 0.2 .\relax }}{35}{table.caption.20}\protected@file@percent }
\newlabel{tab:autoregressive_gru_model_results}{{8}{35}{Autoregressive GRU quantile regression model results. All the models used a dropout of 0.2 .\relax }{table.caption.20}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces Comparison of the autoregressive and non-autoregressive GRU model examples.\relax }}{36}{figure.caption.21}\protected@file@percent }
\newlabel{fig:gru_model_sample_comparison}{{13}{36}{Comparison of the autoregressive and non-autoregressive GRU model examples.\relax }{figure.caption.21}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive GRU models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{37}{figure.caption.22}\protected@file@percent }
\newlabel{fig:gru_model_quantile_over_underestimation}{{14}{37}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive GRU models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.22}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.3}Diffusion}{37}{subsection.6.3}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {15}{\ignorespaces Intermediate steps of the diffusion model for example 864 from the test set. The confidence intervals shown in the plots are made using 100 samples.\relax }}{39}{figure.caption.23}\protected@file@percent }
\newlabel{fig:diffusion_intermediates}{{15}{39}{Intermediate steps of the diffusion model for example 864 from the test set. The confidence intervals shown in the plots are made using 100 samples.\relax }{figure.caption.23}{}}
\@writefile{lot}{\contentsline {table}{\numberline {9}{\ignorespaces Simple diffusion model results.\relax }}{39}{table.caption.24}\protected@file@percent }
\newlabel{tab:diffusion_results}{{9}{39}{Simple diffusion model results.\relax }{table.caption.24}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {16}{\ignorespaces The plots show the generations for the examples from the test set. The diffusion model used to generate the samples consists of 2 layers with a hidden size of 1024. The number of denoising steps is set to 300. The confidence intervals shown in the plots are made using 100 samples. All the available input features are used which includes the \acs {NRV}, Load, Wind, \acs {PV} and \acs {NP} data.\relax }}{40}{figure.caption.25}\protected@file@percent }
\newlabel{fig:diffusion_test_set_examples}{{16}{40}{The plots show the generations for the examples from the test set. The diffusion model used to generate the samples consists of 2 layers with a hidden size of 1024. The number of denoising steps is set to 300. The confidence intervals shown in the plots are made using 100 samples. All the available input features are used which includes the \acs {NRV}, Load, Wind, \acs {PV} and \acs {NP} data.\relax }{figure.caption.25}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {17}{\ignorespaces The plots show the generations for the first examples from the test set. Two diffusion models with 2 layers and 1024 hidden units are used. The first one is only conditioned on the NRV of the previous day while the second one uses all available input features.\relax }}{41}{figure.caption.26}\protected@file@percent }
\newlabel{fig:diffusion_test_set_example_only_nrv_vs_all}{{17}{41}{The plots show the generations for the first examples from the test set. Two diffusion models with 2 layers and 1024 hidden units are used. The first one is only conditioned on the NRV of the previous day while the second one uses all available input features.\relax }{figure.caption.26}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.4}Comparison}{41}{subsection.6.4}\protected@file@percent }
\ACRO{recordpage}{MSE}{42}{1}{41}
\ACRO{recordpage}{MAE}{42}{1}{41}
\ACRO{recordpage}{CRPS}{42}{1}{41}
\@writefile{lot}{\contentsline {table}{\numberline {10}{\ignorespaces Comparison of the different models using the \ac {MSE}, \ac {MAE} and \ac {CRPS} metrics. The best-performing models for a certain type are selected based on the \ac {CRPS}.\relax }}{42}{table.caption.27}\protected@file@percent }
\newlabel{tab:model_comparison}{{10}{42}{Comparison of the different models using the \ac {MSE}, \ac {MAE} and \ac {CRPS} metrics. The best-performing models for a certain type are selected based on the \ac {CRPS}.\relax }{table.caption.27}{}}
\ACRO{recordpage}{NAQR}{43}{1}{42}
\ACRO{recordpage}{MSE}{43}{1}{42}
\ACRO{recordpage}{MAE}{43}{1}{42}
\ACRO{recordpage}{CRPS}{43}{1}{42}
\ACRO{recordpage}{MSE}{43}{1}{42}
\ACRO{recordpage}{MAE}{43}{1}{42}
\ACRO{recordpage}{MSE}{43}{1}{42}
\ACRO{recordpage}{MAE}{43}{1}{42}
\@writefile{lof}{\contentsline {figure}{\numberline {18}{\ignorespaces Comparison of the autoregressive linear and GRU model\relax }}{43}{figure.caption.28}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{18}{43}{Comparison of the autoregressive linear and GRU model\relax }{figure.caption.28}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.5}Policies for battery optimization}{44}{subsection.6.5}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.5.1}Baselines}{44}{subsubsection.6.5.1}\protected@file@percent }
\ACRO{recordpage}{NRV}{45}{1}{44}
\ACRO{recordpage}{NRV}{45}{1}{44}
\ACRO{recordpage}{NRV}{45}{1}{44}
\ACRO{recordpage}{NRV}{46}{1}{45}
\ACRO{recordpage}{NRV}{46}{1}{45}
\ACRO{recordpage}{NRV}{46}{1}{45}
\ACRO{recordpage}{NRV}{46}{1}{45}
\ACRO{recordpage}{NRV}{46}{1}{45}
\ACRO{recordpage}{NRV}{46}{1}{45}
\@writefile{lot}{\contentsline {table}{\numberline {11}{\ignorespaces Results of the baseline policies on the test set. \relax }}{45}{table.caption.29}\protected@file@percent }
\newlabel{tab:fixed_thresholds}{{11}{45}{Results of the baseline policies on the test set. \relax }{table.caption.29}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.5.2}Policy using generated NRV samples}{45}{subsubsection.6.5.2}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {12}{\ignorespaces Comparison of diffusion models using different hyperparameters. Early stopping is done based on the profit using the validation set.\relax }}{46}{table.caption.30}\protected@file@percent }
\newlabel{tab:diffusion_policy_comparison}{{12}{46}{Comparison of diffusion models using different hyperparameters. Early stopping is done based on the profit using the validation set.\relax }{table.caption.30}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {19}{\ignorespaces Comparison of the two samples from the model with the lowest CRPS and the model with the highest profit. \relax }}{47}{figure.caption.31}\protected@file@percent }
\newlabel{fig:diffusion_policy_comparison_high_low_crps}{{19}{47}{Comparison of the two samples from the model with the lowest CRPS and the model with the highest profit. \relax }{figure.caption.31}{}}
\@writefile{lot}{\contentsline {table}{\numberline {13}{\ignorespaces Comparison of the different models using the CRPS, profit, charge cycles and penalty. The best-performing models for a certain type are selected based on the profit.\relax }}{47}{table.caption.32}\protected@file@percent }
\newlabel{tab:policy_comparison}{{13}{47}{Comparison of the different models using the CRPS, profit, charge cycles and penalty. The best-performing models for a certain type are selected based on the profit.\relax }{table.caption.32}{}}
\@writefile{toc}{\contentsline {section}{\numberline {7}Conclusion}{48}{section.7}\protected@file@percent }
\abx@aux@page{13}{50}
\abx@aux@page{14}{50}
\abx@aux@page{15}{50}
\abx@aux@page{16}{50}
\abx@aux@page{17}{50}
\abx@aux@page{18}{50}
\abx@aux@page{19}{50}
\abx@aux@page{20}{50}
\abx@aux@page{21}{50}
\abx@aux@page{22}{50}
\abx@aux@page{23}{50}
\abx@aux@page{24}{50}
\citation{noauthor_welcome_nodate}
\citation{noauthor_imbalance_nodate}
\citation{noauthor_measured_nodate}
\citation{noauthor_photovoltaic_nodate}
\@writefile{toc}{\contentsline {section}{\numberline {6}Results \& Discussion}{24}{section.6}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {6.1}Data}{24}{subsection.6.1}\protected@file@percent }
\citation{noauthor_wind_nodate}
\citation{noauthor_intraday_nodate}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.2}Quantile Regression}{25}{subsection.6.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.2.1}Linear Model}{25}{subsubsection.6.2.1}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces Linear model results\relax }}{26}{table.caption.10}\protected@file@percent }
\newlabel{tab:linear_model_baseline_results}{{3}{26}{Linear model results\relax }{table.caption.10}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Mean and standard deviation of the NRV values over the quarter of the day\relax }}{28}{figure.caption.11}\protected@file@percent }
\newlabel{fig:nrv_mean_std_over_quarter}{{8}{28}{Mean and standard deviation of the NRV values over the quarter of the day\relax }{figure.caption.11}{}}
\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces Autoregressive linear model results with time features\relax }}{28}{table.caption.12}\protected@file@percent }
\newlabel{tab:autoregressive_linear_model_quarter_embedding_baseline_results}{{4}{28}{Autoregressive linear model results with time features\relax }{table.caption.12}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Comparison of the autoregressive and non-autoregressive linear model samples.\relax }}{29}{figure.caption.13}\protected@file@percent }
\newlabel{fig:linear_model_sample_comparison}{{9}{29}{Comparison of the autoregressive and non-autoregressive linear model samples.\relax }{figure.caption.13}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Samples for two examples from the test set for the autoregressive and non-autoregressive linear model. The real NRV is shown in orange.\relax }}{30}{figure.caption.14}\protected@file@percent }
\newlabel{fig:linear_model_samples_comparison}{{10}{30}{Samples for two examples from the test set for the autoregressive and non-autoregressive linear model. The real NRV is shown in orange.\relax }{figure.caption.14}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{31}{figure.caption.15}\protected@file@percent }
\newlabel{fig:linear_model_quantile_over_underestimation}{{11}{31}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.15}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.2.2}Non-Linear Model}{32}{subsubsection.6.2.2}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {5}{\ignorespaces Non-linear Quantile Regression Model Architecture\relax }}{32}{table.caption.16}\protected@file@percent }
\newlabel{tab:non_linear_model_architecture}{{5}{32}{Non-linear Quantile Regression Model Architecture\relax }{table.caption.16}{}}
\@writefile{lot}{\contentsline {table}{\numberline {6}{\ignorespaces Non-linear quantile regression model results. All the models used a dropout of 0.2 .\relax }}{33}{table.caption.17}\protected@file@percent }
\newlabel{tab:non_linear_model_results}{{6}{33}{Non-linear quantile regression model results. All the models used a dropout of 0.2 .\relax }{table.caption.17}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Comparison of the autoregressive and non-autoregressive non-linear model examples.\relax }}{34}{figure.caption.18}\protected@file@percent }
\newlabel{fig:non_linear_model_examples}{{12}{34}{Comparison of the autoregressive and non-autoregressive non-linear model examples.\relax }{figure.caption.18}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive non-linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{35}{figure.caption.19}\protected@file@percent }
\newlabel{fig:non-linear_model_quantile_over_underestimation}{{13}{35}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive non-linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.19}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.2.3}GRU Model}{35}{subsubsection.6.2.3}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {7}{\ignorespaces GRU Model Architecture\relax }}{36}{table.caption.20}\protected@file@percent }
\newlabel{tab:gru_model_architecture}{{7}{36}{GRU Model Architecture\relax }{table.caption.20}{}}
\@writefile{lot}{\contentsline {table}{\numberline {8}{\ignorespaces Autoregressive GRU quantile regression model results. All the models used a dropout of 0.2 .\relax }}{37}{table.caption.21}\protected@file@percent }
\newlabel{tab:autoregressive_gru_model_results}{{8}{37}{Autoregressive GRU quantile regression model results. All the models used a dropout of 0.2 .\relax }{table.caption.21}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces Comparison of the autoregressive and non-autoregressive GRU model examples.\relax }}{38}{figure.caption.22}\protected@file@percent }
\newlabel{fig:gru_model_sample_comparison}{{14}{38}{Comparison of the autoregressive and non-autoregressive GRU model examples.\relax }{figure.caption.22}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {15}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive GRU models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{39}{figure.caption.23}\protected@file@percent }
\newlabel{fig:gru_model_quantile_over_underestimation}{{15}{39}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive GRU models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.23}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.3}Diffusion}{39}{subsection.6.3}\protected@file@percent }
\citation{ho_denoising_2020}
\@writefile{lof}{\contentsline {figure}{\numberline {16}{\ignorespaces Intermediate steps of the diffusion model for example 864 from the test set. The confidence intervals shown in the plots are made using 100 samples.\relax }}{41}{figure.caption.24}\protected@file@percent }
\newlabel{fig:diffusion_intermediates}{{16}{41}{Intermediate steps of the diffusion model for example 864 from the test set. The confidence intervals shown in the plots are made using 100 samples.\relax }{figure.caption.24}{}}
\@writefile{lot}{\contentsline {table}{\numberline {9}{\ignorespaces Simple diffusion model results.\relax }}{41}{table.caption.25}\protected@file@percent }
\newlabel{tab:diffusion_results}{{9}{41}{Simple diffusion model results.\relax }{table.caption.25}{}}
\citation{ho_denoising_2020}
\citation{dhariwal_diffusion_2021}
\citation{ho_classifier-free_2022}
\@writefile{lof}{\contentsline {figure}{\numberline {17}{\ignorespaces The plots show the generations for the examples from the test set. The diffusion model used to generate the samples consists of 2 layers with a hidden size of 1024. The number of denoising steps is set to 300. The confidence intervals shown in the plots are made using 100 samples. All the available input features are used which includes the \acs {NRV}, Load, Wind, \acs {PV} and \acs {NP} data.\relax }}{42}{figure.caption.26}\protected@file@percent }
\newlabel{fig:diffusion_test_set_examples}{{17}{42}{The plots show the generations for the examples from the test set. The diffusion model used to generate the samples consists of 2 layers with a hidden size of 1024. The number of denoising steps is set to 300. The confidence intervals shown in the plots are made using 100 samples. All the available input features are used which includes the \acs {NRV}, Load, Wind, \acs {PV} and \acs {NP} data.\relax }{figure.caption.26}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {18}{\ignorespaces The plots show the generations for the first examples from the test set. Two diffusion models with 2 layers and 1024 hidden units are used. The first one is only conditioned on the NRV of the previous day while the second one uses all available input features.\relax }}{43}{figure.caption.27}\protected@file@percent }
\newlabel{fig:diffusion_test_set_example_only_nrv_vs_all}{{18}{43}{The plots show the generations for the first examples from the test set. Two diffusion models with 2 layers and 1024 hidden units are used. The first one is only conditioned on the NRV of the previous day while the second one uses all available input features.\relax }{figure.caption.27}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.4}Comparison}{43}{subsection.6.4}\protected@file@percent }
\ACRO{recordpage}{MSE}{44}{1}{43}
\ACRO{recordpage}{MAE}{44}{1}{43}
\ACRO{recordpage}{CRPS}{44}{1}{43}
\@writefile{lot}{\contentsline {table}{\numberline {10}{\ignorespaces Comparison of the different models using the \ac {MSE}, \ac {MAE} and \ac {CRPS} metrics. The best-performing models for a certain type are selected based on the \ac {CRPS}.\relax }}{44}{table.caption.28}\protected@file@percent }
\newlabel{tab:model_comparison}{{10}{44}{Comparison of the different models using the \ac {MSE}, \ac {MAE} and \ac {CRPS} metrics. The best-performing models for a certain type are selected based on the \ac {CRPS}.\relax }{table.caption.28}{}}
\ACRO{recordpage}{NAQR}{45}{1}{44}
\ACRO{recordpage}{MSE}{45}{1}{44}
\ACRO{recordpage}{MAE}{45}{1}{44}
\ACRO{recordpage}{CRPS}{45}{1}{44}
\ACRO{recordpage}{MSE}{45}{1}{44}
\ACRO{recordpage}{MAE}{45}{1}{44}
\ACRO{recordpage}{MSE}{45}{1}{44}
\ACRO{recordpage}{MAE}{45}{1}{44}
\@writefile{lof}{\contentsline {figure}{\numberline {19}{\ignorespaces Comparison of the autoregressive linear and GRU model\relax }}{45}{figure.caption.29}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{19}{45}{Comparison of the autoregressive linear and GRU model\relax }{figure.caption.29}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.5}Policies for battery optimization}{46}{subsection.6.5}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.5.1}Baselines}{46}{subsubsection.6.5.1}\protected@file@percent }
\ACRO{recordpage}{NRV}{47}{1}{46}
\ACRO{recordpage}{NRV}{48}{1}{47}
\ACRO{recordpage}{NRV}{48}{1}{47}
\ACRO{recordpage}{NRV}{48}{1}{47}
\ACRO{recordpage}{NRV}{48}{1}{47}
\ACRO{recordpage}{NRV}{48}{1}{47}
\ACRO{recordpage}{NRV}{48}{1}{47}
\ACRO{recordpage}{NRV}{48}{1}{47}
\ACRO{recordpage}{NRV}{48}{1}{47}
\@writefile{lot}{\contentsline {table}{\numberline {11}{\ignorespaces Results of the baseline policies on the test set. \relax }}{47}{table.caption.30}\protected@file@percent }
\newlabel{tab:fixed_thresholds}{{11}{47}{Results of the baseline policies on the test set. \relax }{table.caption.30}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.5.2}Policy using generated NRV samples}{47}{subsubsection.6.5.2}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {12}{\ignorespaces Comparison of AQR: Non-linear and GRU models using different hyperparameters. Early stopping is done based on the profit using the validation set.\relax }}{48}{table.caption.31}\protected@file@percent }
\newlabel{tab:aqr_model_comparison}{{12}{48}{Comparison of AQR: Non-linear and GRU models using different hyperparameters. Early stopping is done based on the profit using the validation set.\relax }{table.caption.31}{}}
\@writefile{lot}{\contentsline {table}{\numberline {13}{\ignorespaces Comparison of diffusion models using different hyperparameters. Early stopping is done based on the profit using the validation set.\relax }}{49}{table.caption.32}\protected@file@percent }
\newlabel{tab:diffusion_policy_comparison}{{13}{49}{Comparison of diffusion models using different hyperparameters. Early stopping is done based on the profit using the validation set.\relax }{table.caption.32}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {20}{\ignorespaces Comparison of the two samples from the model with the lowest CRPS and the model with the highest profit. \relax }}{50}{figure.caption.33}\protected@file@percent }
\newlabel{fig:diffusion_policy_comparison_high_low_crps}{{20}{50}{Comparison of the two samples from the model with the lowest CRPS and the model with the highest profit. \relax }{figure.caption.33}{}}
\@writefile{lot}{\contentsline {table}{\numberline {14}{\ignorespaces Comparison of the different models using the CRPS, profit, charge cycles and penalty. The best-performing models for a certain type are selected based on the profit.\relax }}{51}{table.caption.34}\protected@file@percent }
\newlabel{tab:policy_comparison}{{14}{51}{Comparison of the different models using the CRPS, profit, charge cycles and penalty. The best-performing models for a certain type are selected based on the profit.\relax }{table.caption.34}{}}
\@writefile{toc}{\contentsline {section}{\numberline {7}Conclusion}{52}{section.7}\protected@file@percent }
\bibstyle{unsrtnat}
\bibdata{references}
\bibcite{commission_for_electricity_and_gas_regulation_creg_study_2023}{{1}{}{{Commission for Electricity and Gas Regulation (CREG)}}{{}}}
\bibcite{noauthor_geliberaliseerde_nodate}{{2}{}{{noa}}{{}}}
\bibcite{noauthor_role_nodate}{{3}{}{{noa}}{{}}}
\bibcite{noauthor_fcr_nodate}{{4}{}{{noa}}{{}}}
\bibcite{noauthor_afrr_nodate}{{5}{}{{noa}}{{}}}
\bibcite{noauthor_mfrr_nodate}{{6}{}{{noa}}{{}}}
\bibcite{elia_tariffs_2022}{{7}{}{{Elia}}{{}}}
\bibcite{goodfellow_generative_2014}{{8}{}{{Goodfellow et~al.}}{{Goodfellow, Pouget-Abadie, Mirza, Xu, Warde-Farley, Ozair, Courville, and Bengio}}}
\bibcite{kingma_auto-encoding_2022}{{9}{}{{Kingma and Welling}}{{}}}
\bibcite{rezende_variational_2015}{{10}{}{{Rezende and Mohamed}}{{}}}
\bibcite{sohl-dickstein_deep_2015}{{11}{}{{Sohl-Dickstein et~al.}}{{Sohl-Dickstein, Weiss, Maheswaranathan, and Ganguli}}}
\bibcite{koenker_regression_1978}{{12}{}{{Koenker and Bassett}}{{}}}
\bibcite{ho_denoising_2020}{{13}{}{{Ho et~al.}}{{Ho, Jain, and Abbeel}}}
\bibcite{gneiting_strictly_2007}{{14}{}{{Gneiting and Raftery}}{{}}}
\bibcite{weron_electricity_2014}{{15}{}{{Weron}}{{}}}
\bibcite{poggi_electricity_2023}{{16}{}{{Poggi et~al.}}{{Poggi, Di~Persio, and Ehrhardt}}}
\bibcite{lago_forecasting_2018}{{17}{}{{Lago et~al.}}{{Lago, De~Ridder, and De~Schutter}}}
\bibcite{hagfors_modeling_2016}{{18}{}{{Hagfors et~al.}}{{Hagfors, Bunn, Kristoffersen, Staver, and Westgaard}}}
\bibcite{lu_scenarios_2022}{{19}{}{{Lu et~al.}}{{Lu, Qiu, Lei, and Zhu}}}
\bibcite{dumas_deep_2022}{{20}{}{{Dumas et~al.}}{{Dumas, Wehenkel, Lanaspeze, Cornélusse, and Sutera}}}
\bibcite{rasul_autoregressive_2021}{{21}{}{{Rasul et~al.}}{{Rasul, Seward, Schuster, and Vollgraf}}}
\bibcite{dumas_probabilistic_2019}{{22}{}{{Dumas et~al.}}{{Dumas, Boukas, de~Villena, Mathieu, and Cornélusse}}}
\bibcite{narajewski_probabilistic_2022}{{23}{}{{Narajewski}}{{}}}
\bibcite{noauthor_welcome_nodate}{{24}{}{{noa}}{{}}}
\bibcite{noauthor_imbalance_nodate}{{25}{}{{noa}}{{}}}
\bibcite{noauthor_measured_nodate}{{26}{}{{noa}}{{}}}
\bibcite{noauthor_photovoltaic_nodate}{{27}{}{{noa}}{{}}}
\bibcite{noauthor_wind_nodate}{{28}{}{{noa}}{{}}}
\bibcite{noauthor_intraday_nodate}{{29}{}{{noa}}{{}}}
\bibcite{dhariwal_diffusion_2021}{{30}{}{{Dhariwal and Nichol}}{{}}}
\bibcite{ho_classifier-free_2022}{{31}{}{{Ho and Salimans}}{{}}}
\@input{sections/appendix.aux}
\ACRO{total-barriers}{1}
\ACRO{usage}{QR=={0}}
@@ -181,20 +156,20 @@
\ACRO{usage}{NAQR=={1}}
\ACRO{usage}{GRU=={0}}
\ACRO{usage}{LSTM=={0}}
\ACRO{usage}{GAN=={1}}
\ACRO{usage}{CTSGAN=={1}}
\ACRO{usage}{GAN=={0}}
\ACRO{usage}{CTSGAN=={0}}
\ACRO{usage}{VAE=={0}}
\ACRO{usage}{MLP=={1}}
\ACRO{usage}{GP=={1}}
\ACRO{usage}{MLP=={0}}
\ACRO{usage}{GP=={0}}
\ACRO{usage}{MSE=={4}}
\ACRO{usage}{MAE=={4}}
\ACRO{usage}{CRPS=={3}}
\ACRO{usage}{TSPA=={1}}
\ACRO{usage}{PLF=={1}}
\ACRO{usage}{CRPS=={2}}
\ACRO{usage}{TSPA=={0}}
\ACRO{usage}{PLF=={0}}
\ACRO{usage}{NRV=={12}}
\ACRO{usage}{PV=={0}}
\ACRO{usage}{NP=={0}}
\ACRO{usage}{TSO=={2}}
\ACRO{usage}{TSO=={3}}
\ACRO{usage}{DSO=={0}}
\ACRO{usage}{BRP=={1}}
\ACRO{usage}{BSP=={1}}
@@ -203,34 +178,15 @@
\ACRO{usage}{aFRR=={1}}
\ACRO{usage}{mFRR=={1}}
\ACRO{usage}{MW=={0}}
\ACRO{pages}{BRP=={4@1@3}}
\ACRO{pages}{TSO=={3@1@2|5@1@4}}
\ACRO{pages}{FCR=={6@1@5}}
\ACRO{pages}{BSP=={6@1@5}}
\ACRO{pages}{aFRR=={6@1@5}}
\ACRO{pages}{mFRR=={6@1@5}}
\ACRO{pages}{GAN=={21@1@20}}
\ACRO{pages}{CTSGAN=={21@1@20}}
\ACRO{pages}{MLP=={22@1@21}}
\ACRO{pages}{GP=={22@1@21}}
\ACRO{pages}{TSPA=={22@1@21}}
\ACRO{pages}{PLF=={22@1@21}}
\ACRO{pages}{NAQR=={43@1@42}}
\ACRO{pages}{CRPS=={22@1@21|42@1@41|43@1@42}}
\ACRO{pages}{MSE=={42@1@41|43@1@42}}
\ACRO{pages}{MAE=={42@1@41|43@1@42}}
\ACRO{pages}{NRV=={3@1@2|45@1@44|46@1@45}}
\abx@aux@read@bbl@mdfivesum{E3B4F6289F5EA7AEDA0AEA967029BC23}
\abx@aux@defaultrefcontext{0}{dumas_probabilistic_2019}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{dumas_deep_2022}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{ho_denoising_2020}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{noauthor_imbalance_nodate}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{noauthor_intraday_nodate}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{lu_scenarios_2022}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{noauthor_measured_nodate}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{noauthor_photovoltaic_nodate}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{poggi_electricity_2023}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{rasul_autoregressive_2021}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{weron_electricity_2014}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{noauthor_wind_nodate}{nyt/global//global/global}
\gdef \@abspage@last{53}
\ACRO{pages}{BRP=={5@1@4}}
\ACRO{pages}{TSO=={3@1@2|6@1@5}}
\ACRO{pages}{FCR=={7@1@6}}
\ACRO{pages}{BSP=={7@1@6}}
\ACRO{pages}{aFRR=={7@1@6}}
\ACRO{pages}{mFRR=={7@1@6}}
\ACRO{pages}{NAQR=={45@1@44}}
\ACRO{pages}{CRPS=={44@1@43|45@1@44}}
\ACRO{pages}{MSE=={44@1@43|45@1@44}}
\ACRO{pages}{MAE=={44@1@43|45@1@44}}
\ACRO{pages}{NRV=={3@1@2|4@1@3|47@1@46|48@1@47}}
\gdef \@abspage@last{59}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +1,123 @@
[0] Config.pm:307> INFO - This is Biber 2.19
[0] Config.pm:310> INFO - Logfile is 'verslag.blg'
[38] biber-darwin:340> INFO - === Sat May 18, 2024, 23:55:25
[48] Biber.pm:419> INFO - Reading 'verslag.bcf'
[100] Biber.pm:979> INFO - Found 14 citekeys in bib section 0
[111] Biber.pm:4419> INFO - Processing section 0
[116] Biber.pm:4610> INFO - Looking for bibtex file './references.bib' for section 0
[119] bibtex.pm:1713> INFO - LaTeX decoding ...
[146] bibtex.pm:1519> INFO - Found BibTeX data source './references.bib'
[198] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'variable = shifted' with 'variable = non-ignorable'
[198] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'normalization = NFD' with 'normalization = prenormalized'
[198] Biber.pm:4239> INFO - Sorting list 'nyt/apasortcite//global/global' of type 'entry' with template 'nyt' and locale 'en-US'
[198] Biber.pm:4245> INFO - No sort tailoring available for locale 'en-US'
[211] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'variable = shifted' with 'variable = non-ignorable'
[211] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'normalization = NFD' with 'normalization = prenormalized'
[211] Biber.pm:4239> INFO - Sorting list 'nyt/global//global/global' of type 'entry' with template 'nyt' and locale 'en-US'
[211] Biber.pm:4245> INFO - No sort tailoring available for locale 'en-US'
[221] bbl.pm:660> INFO - Writing 'verslag.bbl' with encoding 'UTF-8'
[230] bbl.pm:763> INFO - Output to verslag.bbl
[230] Biber.pm:131> WARN - I didn't find a database entry for 'ho2020denoising' (section 0)
[230] Biber.pm:131> WARN - I didn't find a database entry for 'elia_open_data' (section 0)
[230] Biber.pm:133> INFO - WARNINGS: 2
This is BibTeX, Version 0.99d (TeX Live 2023)
Capacity: max_strings=200000, hash_size=200000, hash_prime=170003
The top-level auxiliary file: verslag.aux
A level-1 auxiliary file: sections/introduction.aux
A level-1 auxiliary file: sections/background.aux
A level-1 auxiliary file: sections/policies.aux
A level-1 auxiliary file: sections/literature_study.aux
The style file: unsrtnat.bst
A level-1 auxiliary file: sections/appendix.aux
Database file #1: references.bib
Warning--entry type for "noauthor_welcome_nodate" isn't style-file defined
--line 365 of file references.bib
Warning--entry type for "noauthor_imbalance_nodate" isn't style-file defined
--line 372 of file references.bib
Warning--entry type for "noauthor_measured_nodate" isn't style-file defined
--line 381 of file references.bib
Warning--entry type for "noauthor_photovoltaic_nodate" isn't style-file defined
--line 399 of file references.bib
Warning--entry type for "noauthor_wind_nodate" isn't style-file defined
--line 408 of file references.bib
Warning--entry type for "noauthor_intraday_nodate" isn't style-file defined
--line 417 of file references.bib
Warning--entry type for "noauthor_geliberaliseerde_nodate" isn't style-file defined
--line 442 of file references.bib
Warning--entry type for "commission_for_electricity_and_gas_regulation_creg_study_2023" isn't style-file defined
--line 477 of file references.bib
Warning--entry type for "noauthor_role_nodate" isn't style-file defined
--line 487 of file references.bib
Warning--entry type for "noauthor_fcr_nodate" isn't style-file defined
--line 495 of file references.bib
Warning--entry type for "noauthor_afrr_nodate" isn't style-file defined
--line 502 of file references.bib
Warning--entry type for "noauthor_mfrr_nodate" isn't style-file defined
--line 509 of file references.bib
Warning--empty year in commission_for_electricity_and_gas_regulation_creg_study_2023
Warning--empty year in noauthor_geliberaliseerde_nodate
Warning--empty year in noauthor_role_nodate
Warning--empty year in noauthor_fcr_nodate
Warning--empty year in noauthor_afrr_nodate
Warning--empty year in noauthor_mfrr_nodate
Warning--empty year in elia_tariffs_2022
Warning--empty year in goodfellow_generative_2014
Warning--empty year in kingma_auto-encoding_2022
Warning--empty year in rezende_variational_2015
Warning--empty year in rezende_variational_2015
Warning--empty year in sohl-dickstein_deep_2015
Warning--empty journal in koenker_regression_1978
Warning--empty year in koenker_regression_1978
Warning--empty year in koenker_regression_1978
Warning--empty year in ho_denoising_2020
Warning--empty journal in gneiting_strictly_2007
Warning--empty year in gneiting_strictly_2007
Warning--empty year in gneiting_strictly_2007
Warning--empty journal in weron_electricity_2014
Warning--empty year in weron_electricity_2014
Warning--empty year in weron_electricity_2014
Warning--empty journal in poggi_electricity_2023
Warning--empty year in poggi_electricity_2023
Warning--empty year in poggi_electricity_2023
Warning--empty journal in lago_forecasting_2018
Warning--empty year in lago_forecasting_2018
Warning--empty year in lago_forecasting_2018
Warning--empty journal in hagfors_modeling_2016
Warning--empty year in hagfors_modeling_2016
Warning--empty year in hagfors_modeling_2016
Warning--empty journal in lu_scenarios_2022
Warning--empty year in lu_scenarios_2022
Warning--empty year in lu_scenarios_2022
Warning--empty journal in dumas_deep_2022
Warning--empty year in dumas_deep_2022
Warning--empty year in rasul_autoregressive_2021
Warning--empty year in dumas_probabilistic_2019
Warning--empty year in narajewski_probabilistic_2022
Warning--empty year in noauthor_welcome_nodate
Warning--empty year in noauthor_imbalance_nodate
Warning--empty year in noauthor_measured_nodate
Warning--empty year in noauthor_photovoltaic_nodate
Warning--empty year in noauthor_wind_nodate
Warning--empty year in noauthor_intraday_nodate
Warning--empty year in dhariwal_diffusion_2021
Warning--empty year in ho_classifier-free_2022
You've used 31 entries,
2481 wiz_defined-function locations,
728 strings with 11247 characters,
and the built_in function-call counts, 10379 in all, are:
= -- 895
> -- 420
< -- 13
+ -- 195
- -- 122
* -- 806
:= -- 1473
add.period$ -- 113
call.type$ -- 31
change.case$ -- 62
chr.to.int$ -- 20
cite$ -- 89
duplicate$ -- 507
empty$ -- 1052
format.name$ -- 154
if$ -- 2252
int.to.chr$ -- 12
int.to.str$ -- 32
missing$ -- 10
newline$ -- 182
num.names$ -- 62
pop$ -- 348
preamble$ -- 1
purify$ -- 31
quote$ -- 0
skip$ -- 364
stack$ -- 0
substring$ -- 315
swap$ -- 87
text.length$ -- 3
text.prefix$ -- 0
top$ -- 0
type$ -- 217
warning$ -- 47
while$ -- 61
width$ -- 0
write$ -- 403
(There were 59 warnings)

File diff suppressed because it is too large Load Diff

View File

@@ -16,18 +16,19 @@
\BOOKMARK [2][-]{subsection.4.1}{\376\377\000B\000a\000s\000e\000l\000i\000n\000e\000s}{section.4}% 16
\BOOKMARK [2][-]{subsection.4.2}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000b\000a\000s\000e\000d\000\040\000o\000n\000\040\000N\000R\000V\000\040\000g\000e\000n\000e\000r\000a\000t\000i\000o\000n\000s}{section.4}% 17
\BOOKMARK [1][-]{section.5}{\376\377\000L\000i\000t\000e\000r\000a\000t\000u\000r\000e\000\040\000S\000t\000u\000d\000y}{}% 18
\BOOKMARK [2][-]{subsection.5.1}{\376\377\000E\000l\000e\000c\000t\000r\000i\000c\000i\000t\000y\000\040\000P\000r\000i\000c\000e\000\040\000F\000o\000r\000e\000c\000a\000s\000t\000i\000n\000g}{section.5}% 19
\BOOKMARK [2][-]{subsection.5.2}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000f\000o\000r\000\040\000B\000a\000t\000t\000e\000r\000y\000\040\000O\000p\000t\000i\000m\000i\000z\000a\000t\000i\000o\000n}{section.5}% 20
\BOOKMARK [1][-]{section.6}{\376\377\000R\000e\000s\000u\000l\000t\000s\000\040\000\046\000\040\000D\000i\000s\000c\000u\000s\000s\000i\000o\000n}{}% 21
\BOOKMARK [2][-]{subsection.6.1}{\376\377\000D\000a\000t\000a}{section.6}% 22
\BOOKMARK [2][-]{subsection.6.2}{\376\377\000Q\000u\000a\000n\000t\000i\000l\000e\000\040\000R\000e\000g\000r\000e\000s\000s\000i\000o\000n}{section.6}% 23
\BOOKMARK [3][-]{subsubsection.6.2.1}{\376\377\000L\000i\000n\000e\000a\000r\000\040\000M\000o\000d\000e\000l}{subsection.6.2}% 24
\BOOKMARK [3][-]{subsubsection.6.2.2}{\376\377\000N\000o\000n\000-\000L\000i\000n\000e\000a\000r\000\040\000M\000o\000d\000e\000l}{subsection.6.2}% 25
\BOOKMARK [3][-]{subsubsection.6.2.3}{\376\377\000G\000R\000U\000\040\000M\000o\000d\000e\000l}{subsection.6.2}% 26
\BOOKMARK [2][-]{subsection.6.3}{\376\377\000D\000i\000f\000f\000u\000s\000i\000o\000n}{section.6}% 27
\BOOKMARK [2][-]{subsection.6.4}{\376\377\000C\000o\000m\000p\000a\000r\000i\000s\000o\000n}{section.6}% 28
\BOOKMARK [2][-]{subsection.6.5}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000f\000o\000r\000\040\000b\000a\000t\000t\000e\000r\000y\000\040\000o\000p\000t\000i\000m\000i\000z\000a\000t\000i\000o\000n}{section.6}% 29
\BOOKMARK [3][-]{subsubsection.6.5.1}{\376\377\000B\000a\000s\000e\000l\000i\000n\000e\000s}{subsection.6.5}% 30
\BOOKMARK [3][-]{subsubsection.6.5.2}{\376\377\000P\000o\000l\000i\000c\000y\000\040\000u\000s\000i\000n\000g\000\040\000g\000e\000n\000e\000r\000a\000t\000e\000d\000\040\000N\000R\000V\000\040\000s\000a\000m\000p\000l\000e\000s}{subsection.6.5}% 31
\BOOKMARK [1][-]{section.7}{\376\377\000C\000o\000n\000c\000l\000u\000s\000i\000o\000n}{}% 32
\BOOKMARK [1][-]{appendix.A}{\376\377\000A\000p\000p\000e\000n\000d\000i\000x}{}% 33
\BOOKMARK [2][-]{subsection.5.1}{\376\377\000D\000a\000y\000-\000A\000h\000e\000a\000d\000\040\000E\000l\000e\000c\000t\000r\000i\000c\000i\000t\000y\000\040\000P\000r\000i\000c\000e\000\040\000F\000o\000r\000e\000c\000a\000s\000t\000i\000n\000g}{section.5}% 19
\BOOKMARK [2][-]{subsection.5.2}{\376\377\000I\000m\000b\000a\000l\000a\000n\000c\000e\000\040\000P\000r\000i\000c\000e\000\040\000F\000o\000r\000e\000c\000a\000s\000t\000i\000n\000g}{section.5}% 20
\BOOKMARK [2][-]{subsection.5.3}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000f\000o\000r\000\040\000B\000a\000t\000t\000e\000r\000y\000\040\000O\000p\000t\000i\000m\000i\000z\000a\000t\000i\000o\000n}{section.5}% 21
\BOOKMARK [1][-]{section.6}{\376\377\000R\000e\000s\000u\000l\000t\000s\000\040\000\046\000\040\000D\000i\000s\000c\000u\000s\000s\000i\000o\000n}{}% 22
\BOOKMARK [2][-]{subsection.6.1}{\376\377\000D\000a\000t\000a}{section.6}% 23
\BOOKMARK [2][-]{subsection.6.2}{\376\377\000Q\000u\000a\000n\000t\000i\000l\000e\000\040\000R\000e\000g\000r\000e\000s\000s\000i\000o\000n}{section.6}% 24
\BOOKMARK [3][-]{subsubsection.6.2.1}{\376\377\000L\000i\000n\000e\000a\000r\000\040\000M\000o\000d\000e\000l}{subsection.6.2}% 25
\BOOKMARK [3][-]{subsubsection.6.2.2}{\376\377\000N\000o\000n\000-\000L\000i\000n\000e\000a\000r\000\040\000M\000o\000d\000e\000l}{subsection.6.2}% 26
\BOOKMARK [3][-]{subsubsection.6.2.3}{\376\377\000G\000R\000U\000\040\000M\000o\000d\000e\000l}{subsection.6.2}% 27
\BOOKMARK [2][-]{subsection.6.3}{\376\377\000D\000i\000f\000f\000u\000s\000i\000o\000n}{section.6}% 28
\BOOKMARK [2][-]{subsection.6.4}{\376\377\000C\000o\000m\000p\000a\000r\000i\000s\000o\000n}{section.6}% 29
\BOOKMARK [2][-]{subsection.6.5}{\376\377\000P\000o\000l\000i\000c\000i\000e\000s\000\040\000f\000o\000r\000\040\000b\000a\000t\000t\000e\000r\000y\000\040\000o\000p\000t\000i\000m\000i\000z\000a\000t\000i\000o\000n}{section.6}% 30
\BOOKMARK [3][-]{subsubsection.6.5.1}{\376\377\000B\000a\000s\000e\000l\000i\000n\000e\000s}{subsection.6.5}% 31
\BOOKMARK [3][-]{subsubsection.6.5.2}{\376\377\000P\000o\000l\000i\000c\000y\000\040\000u\000s\000i\000n\000g\000\040\000g\000e\000n\000e\000r\000a\000t\000e\000d\000\040\000N\000R\000V\000\040\000s\000a\000m\000p\000l\000e\000s}{subsection.6.5}% 32
\BOOKMARK [1][-]{section.7}{\376\377\000C\000o\000n\000c\000l\000u\000s\000i\000o\000n}{}% 33
\BOOKMARK [1][-]{appendix.A}{\376\377\000A\000p\000p\000e\000n\000d\000i\000x}{}% 34

Binary file not shown.

View File

@@ -41,7 +41,7 @@
>
]>
<requests version="1.0">
<internal package="biblatex" priority="9" active="0">
<internal package="biblatex" priority="9" active="1">
<generic>latex</generic>
<provides type="dynamic">
<file>verslag.bcf</file>
@@ -51,20 +51,16 @@
</requires>
<requires type="static">
<file>blx-dm.def</file>
<file>apa.dbx</file>
<file>blx-compat.def</file>
<file>biblatex.def</file>
<file>standard.bbx</file>
<file>apa.bbx</file>
<file>apa.cbx</file>
<file>numeric.bbx</file>
<file>numeric.cbx</file>
<file>biblatex.cfg</file>
<file>english.lbx</file>
<file>american.lbx</file>
<file>american-apa.lbx</file>
<file>english-apa.lbx</file>
</requires>
</internal>
<external package="biblatex" priority="5" active="0">
<external package="biblatex" priority="5" active="1">
<generic>biber</generic>
<cmdline>
<binary>biber</binary>
@@ -82,8 +78,5 @@
<requires type="dynamic">
<file>verslag.bcf</file>
</requires>
<requires type="editable">
<file>./references.bib</file>
</requires>
</external>
</requests>

View File

Binary file not shown.

View File

@@ -31,6 +31,7 @@
\usepackage{tikz}
\usepackage{acro}
\usepackage{pdflscape}
\usepackage[square,numbers]{natbib}
\usetikzlibrary{positioning, calc}
@@ -78,9 +79,9 @@
% Bibliography settings
%-----------------------
\usepackage[backend=biber, style=apa, sorting=nyt, hyperref=true]{biblatex}
\addbibresource{./references.bib}
\usepackage{csquotes} % Suggested when using babel+biblatex
% \usepackage[backend=biber, style=apa, sorting=nyt, hyperref=true]{biblatex}
% \addbibresource{./references.bib}
% \usepackage{csquotes} % Suggested when using babel+biblatex
% Hyperreferences
%-----------------
@@ -133,6 +134,7 @@
\begin{document}
% =====================================================================
% Cover
% =====================================================================
@@ -193,7 +195,9 @@
\newpage
% bibliography
\printbibliography
% \printbibliography
\bibliographystyle{unsrtnat}
\bibliography{references}
% appendix
\appendix

View File

@@ -1,35 +1,36 @@
\acswitchoff
\babel@toc {english}{}\relax
\contentsline {section}{\numberline {1}Introduction}{2}{section.1}%
\contentsline {section}{\numberline {2}Electricity market}{3}{section.2}%
\contentsline {section}{\numberline {3}Generative modeling}{7}{section.3}%
\contentsline {subsection}{\numberline {3.1}Quantile Regression}{7}{subsection.3.1}%
\contentsline {subsection}{\numberline {3.2}Autoregressive vs Non-Autoregressive models}{10}{subsection.3.2}%
\contentsline {subsection}{\numberline {3.3}Model Types}{11}{subsection.3.3}%
\contentsline {subsubsection}{\numberline {3.3.1}Linear Model}{11}{subsubsection.3.3.1}%
\contentsline {subsubsection}{\numberline {3.3.2}Non-Linear Model}{12}{subsubsection.3.3.2}%
\contentsline {subsubsection}{\numberline {3.3.3}Recurrent Neural Network (RNN)}{12}{subsubsection.3.3.3}%
\contentsline {subsection}{\numberline {3.4}Diffusion models}{13}{subsection.3.4}%
\contentsline {subsubsection}{\numberline {3.4.1}Overview}{13}{subsubsection.3.4.1}%
\contentsline {subsubsection}{\numberline {3.4.2}Applications}{14}{subsubsection.3.4.2}%
\contentsline {subsubsection}{\numberline {3.4.3}Generation process}{14}{subsubsection.3.4.3}%
\contentsline {subsection}{\numberline {3.5}Evaluation}{16}{subsection.3.5}%
\contentsline {section}{\numberline {4}Policies}{18}{section.4}%
\contentsline {subsection}{\numberline {4.1}Baselines}{18}{subsection.4.1}%
\contentsline {subsection}{\numberline {4.2}Policies based on NRV generations}{19}{subsection.4.2}%
\contentsline {section}{\numberline {5}Literature Study}{20}{section.5}%
\contentsline {subsection}{\numberline {5.1}Electricity Price Forecasting}{20}{subsection.5.1}%
\contentsline {subsection}{\numberline {5.2}Policies for Battery Optimization}{21}{subsection.5.2}%
\contentsline {section}{\numberline {6}Results \& Discussion}{22}{section.6}%
\contentsline {subsection}{\numberline {6.1}Data}{22}{subsection.6.1}%
\contentsline {subsection}{\numberline {6.2}Quantile Regression}{24}{subsection.6.2}%
\contentsline {subsubsection}{\numberline {6.2.1}Linear Model}{24}{subsubsection.6.2.1}%
\contentsline {subsubsection}{\numberline {6.2.2}Non-Linear Model}{30}{subsubsection.6.2.2}%
\contentsline {subsubsection}{\numberline {6.2.3}GRU Model}{33}{subsubsection.6.2.3}%
\contentsline {subsection}{\numberline {6.3}Diffusion}{37}{subsection.6.3}%
\contentsline {subsection}{\numberline {6.4}Comparison}{41}{subsection.6.4}%
\contentsline {subsection}{\numberline {6.5}Policies for battery optimization}{44}{subsection.6.5}%
\contentsline {subsubsection}{\numberline {6.5.1}Baselines}{44}{subsubsection.6.5.1}%
\contentsline {subsubsection}{\numberline {6.5.2}Policy using generated NRV samples}{45}{subsubsection.6.5.2}%
\contentsline {section}{\numberline {7}Conclusion}{48}{section.7}%
\contentsline {section}{\numberline {A}Appendix}{51}{appendix.A}%
\contentsline {section}{\numberline {2}Electricity market}{4}{section.2}%
\contentsline {section}{\numberline {3}Generative modeling}{8}{section.3}%
\contentsline {subsection}{\numberline {3.1}Quantile Regression}{9}{subsection.3.1}%
\contentsline {subsection}{\numberline {3.2}Autoregressive vs Non-Autoregressive models}{12}{subsection.3.2}%
\contentsline {subsection}{\numberline {3.3}Model Types}{13}{subsection.3.3}%
\contentsline {subsubsection}{\numberline {3.3.1}Linear Model}{13}{subsubsection.3.3.1}%
\contentsline {subsubsection}{\numberline {3.3.2}Non-Linear Model}{14}{subsubsection.3.3.2}%
\contentsline {subsubsection}{\numberline {3.3.3}Recurrent Neural Network (RNN)}{14}{subsubsection.3.3.3}%
\contentsline {subsection}{\numberline {3.4}Diffusion models}{15}{subsection.3.4}%
\contentsline {subsubsection}{\numberline {3.4.1}Overview}{15}{subsubsection.3.4.1}%
\contentsline {subsubsection}{\numberline {3.4.2}Applications}{16}{subsubsection.3.4.2}%
\contentsline {subsubsection}{\numberline {3.4.3}Generation process}{16}{subsubsection.3.4.3}%
\contentsline {subsection}{\numberline {3.5}Evaluation}{18}{subsection.3.5}%
\contentsline {section}{\numberline {4}Policies}{20}{section.4}%
\contentsline {subsection}{\numberline {4.1}Baselines}{20}{subsection.4.1}%
\contentsline {subsection}{\numberline {4.2}Policies based on NRV generations}{21}{subsection.4.2}%
\contentsline {section}{\numberline {5}Literature Study}{22}{section.5}%
\contentsline {subsection}{\numberline {5.1}Day-Ahead Electricity Price Forecasting}{22}{subsection.5.1}%
\contentsline {subsection}{\numberline {5.2}Imbalance Price Forecasting}{23}{subsection.5.2}%
\contentsline {subsection}{\numberline {5.3}Policies for Battery Optimization}{23}{subsection.5.3}%
\contentsline {section}{\numberline {6}Results \& Discussion}{24}{section.6}%
\contentsline {subsection}{\numberline {6.1}Data}{24}{subsection.6.1}%
\contentsline {subsection}{\numberline {6.2}Quantile Regression}{25}{subsection.6.2}%
\contentsline {subsubsection}{\numberline {6.2.1}Linear Model}{25}{subsubsection.6.2.1}%
\contentsline {subsubsection}{\numberline {6.2.2}Non-Linear Model}{32}{subsubsection.6.2.2}%
\contentsline {subsubsection}{\numberline {6.2.3}GRU Model}{35}{subsubsection.6.2.3}%
\contentsline {subsection}{\numberline {6.3}Diffusion}{39}{subsection.6.3}%
\contentsline {subsection}{\numberline {6.4}Comparison}{43}{subsection.6.4}%
\contentsline {subsection}{\numberline {6.5}Policies for battery optimization}{46}{subsection.6.5}%
\contentsline {subsubsection}{\numberline {6.5.1}Baselines}{46}{subsubsection.6.5.1}%
\contentsline {subsubsection}{\numberline {6.5.2}Policy using generated NRV samples}{47}{subsubsection.6.5.2}%
\contentsline {section}{\numberline {7}Conclusion}{52}{section.7}%
\contentsline {section}{\numberline {A}Appendix}{57}{appendix.A}%

Binary file not shown.

Before

Width:  |  Height:  |  Size: 176 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 281 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 278 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 160 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 263 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 195 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 312 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 184 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 274 KiB

File diff suppressed because one or more lines are too long

View File

@@ -841,9 +841,11 @@ class NonAutoRegressiveQuantileRegression(Trainer):
self.data_processor.inverse_transform(samples),
)
samples = samples.unsqueeze(0)
samples = self.data_processor.inverse_transform(samples).unsqueeze(0)
targets = targets.squeeze(-1)
targets = targets[0].unsqueeze(0)
targets = self.data_processor.inverse_transform(targets)
samples = samples.to(self.device)
@@ -851,48 +853,59 @@ class NonAutoRegressiveQuantileRegression(Trainer):
crps_from_samples_metric.append(crps[0].mean().item())
task.get_logger().report_scalar(
title="CRPS_from_samples",
series="test",
value=np.mean(crps_from_samples_metric),
iteration=epoch,
)
# using the policy evaluator, evaluate the policy with the generated samples
if self.policy_evaluator is not None:
optimal_penalty, profit, charge_cycles = (
self.policy_evaluator.optimize_penalty_for_target_charge_cycles(
idx_samples=generated_samples,
test_loader=dataloader,
initial_penalty=500,
target_charge_cycles=283,
initial_learning_rate=2,
max_iterations=100,
tolerance=1,
if epoch is not None and task is not None:
task.get_logger().report_scalar(
title="CRPS_from_samples",
series="val",
value=np.mean(crps_from_samples_metric),
iteration=epoch,
)
# using the policy evaluator, evaluate the policy with the generated samples
if self.policy_evaluator is not None and epoch != -1:
optimal_penalty, profit, charge_cycles = (
self.policy_evaluator.optimize_penalty_for_target_charge_cycles(
idx_samples=generated_samples,
test_loader=dataloader,
initial_penalty=900,
target_charge_cycles=58 * 400 / 356,
initial_learning_rate=5,
max_iterations=100,
tolerance=1,
iteration=epoch,
)
)
)
print(
f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
)
print(
f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
)
task.get_logger().report_scalar(
title="Optimal Penalty",
series="test",
value=optimal_penalty,
iteration=epoch,
)
task.get_logger().report_scalar(
title="Optimal Penalty",
series="val",
value=optimal_penalty,
iteration=epoch,
)
task.get_logger().report_scalar(
title="Optimal Profit", series="test", value=profit, iteration=epoch
)
task.get_logger().report_scalar(
title="Optimal Profit", series="val", value=profit, iteration=epoch
)
task.get_logger().report_scalar(
title="Optimal Charge Cycles",
series="test",
value=charge_cycles,
iteration=epoch,
)
task.get_logger().report_scalar(
title="Optimal Charge Cycles",
series="val",
value=charge_cycles,
iteration=epoch,
)
return (
np.mean(crps_from_samples_metric),
profit,
charge_cycles,
optimal_penalty,
generated_samples,
)
def plot_quantile_percentages(
self,

View File

@@ -2,9 +2,7 @@ from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(
task_name="AQR: Non-Linear (4 - 512) + Load + Wind + PV + QE + NP"
)
task = clearml_helper.get_task(task_name="AQR: Linear + All")
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.PolicyEvaluator import PolicyEvaluator
@@ -46,7 +44,7 @@ data_config.NOMINAL_NET_POSITION = True
data_config = task.connect(data_config, name="data_features")
data_processor = DataProcessor(data_config, path="", lstm=False)
data_processor = DataProcessor(data_config, path="", lstm=True)
data_processor.set_batch_size(512)
data_processor.set_full_day_skip(False)
@@ -69,8 +67,8 @@ else:
model_parameters = {
"learning_rate": 0.0001,
"hidden_size": 512,
"num_layers": 4,
"hidden_size": 256,
"num_layers": 2,
"dropout": 0.2,
"time_feature_embedding": 5,
}
@@ -91,17 +89,17 @@ time_embedding = TimeEmbedding(
# dropout=model_parameters["dropout"],
# )
non_linear_model = NonLinearRegression(
time_embedding.output_dim(inputDim),
len(quantiles),
hiddenSize=model_parameters["hidden_size"],
numLayers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
)
# non_linear_model = NonLinearRegression(
# time_embedding.output_dim(inputDim),
# len(quantiles),
# hiddenSize=model_parameters["hidden_size"],
# numLayers=model_parameters["num_layers"],
# dropout=model_parameters["dropout"],
# )
# linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
model = nn.Sequential(time_embedding, non_linear_model)
model = nn.Sequential(time_embedding, linear_model)
model.output_size = 1
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
@@ -128,7 +126,7 @@ trainer.add_metrics_to_track(
)
trainer.early_stopping(patience=6)
trainer.plot_every(4)
trainer.train(task=task, epochs=epochs, remotely=False)
trainer.train(task=task, epochs=epochs, remotely=True)
### Policy Evaluation ###
idx_samples = trainer.test_set_samples

View File

@@ -2,151 +2,18 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>DateTime</th>\n",
" <th>Quality status</th>\n",
" <th>Resolution code</th>\n",
" <th>Net regulation volume</th>\n",
" <th>System imbalance</th>\n",
" <th>Alpha</th>\n",
" <th>Marginal incremental price</th>\n",
" <th>Marginal decremental price</th>\n",
" <th>Strategic reserve price</th>\n",
" <th>Positive imbalance price</th>\n",
" <th>Negative imbalance price</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>2023-12-12 06:45:00+00:00</td>\n",
" <td>Not Validated</td>\n",
" <td>PT15M</td>\n",
" <td>64.621</td>\n",
" <td>-92.402</td>\n",
" <td>0.00</td>\n",
" <td>153.12</td>\n",
" <td>-248.86</td>\n",
" <td>NaN</td>\n",
" <td>153.12</td>\n",
" <td>153.12</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>2023-12-12 06:30:00+00:00</td>\n",
" <td>Not Validated</td>\n",
" <td>PT15M</td>\n",
" <td>39.672</td>\n",
" <td>-73.985</td>\n",
" <td>0.00</td>\n",
" <td>153.29</td>\n",
" <td>-147.29</td>\n",
" <td>NaN</td>\n",
" <td>153.29</td>\n",
" <td>153.29</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2023-12-12 06:15:00+00:00</td>\n",
" <td>Not Validated</td>\n",
" <td>PT15M</td>\n",
" <td>80.030</td>\n",
" <td>-103.795</td>\n",
" <td>0.00</td>\n",
" <td>152.00</td>\n",
" <td>-90.69</td>\n",
" <td>NaN</td>\n",
" <td>152.00</td>\n",
" <td>152.00</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>2023-12-12 06:00:00+00:00</td>\n",
" <td>Not Validated</td>\n",
" <td>PT15M</td>\n",
" <td>9.882</td>\n",
" <td>10.054</td>\n",
" <td>0.00</td>\n",
" <td>234.00</td>\n",
" <td>-516.96</td>\n",
" <td>NaN</td>\n",
" <td>-516.96</td>\n",
" <td>-516.96</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>2023-12-12 05:45:00+00:00</td>\n",
" <td>Not Validated</td>\n",
" <td>PT15M</td>\n",
" <td>198.887</td>\n",
" <td>-250.889</td>\n",
" <td>5.06</td>\n",
" <td>234.00</td>\n",
" <td>-390.57</td>\n",
" <td>NaN</td>\n",
" <td>239.06</td>\n",
" <td>239.06</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" DateTime Quality status Resolution code \\\n",
"0 2023-12-12 06:45:00+00:00 Not Validated PT15M \n",
"1 2023-12-12 06:30:00+00:00 Not Validated PT15M \n",
"2 2023-12-12 06:15:00+00:00 Not Validated PT15M \n",
"3 2023-12-12 06:00:00+00:00 Not Validated PT15M \n",
"4 2023-12-12 05:45:00+00:00 Not Validated PT15M \n",
"\n",
" Net regulation volume System imbalance Alpha Marginal incremental price \\\n",
"0 64.621 -92.402 0.00 153.12 \n",
"1 39.672 -73.985 0.00 153.29 \n",
"2 80.030 -103.795 0.00 152.00 \n",
"3 9.882 10.054 0.00 234.00 \n",
"4 198.887 -250.889 5.06 234.00 \n",
"\n",
" Marginal decremental price Strategic reserve price \\\n",
"0 -248.86 NaN \n",
"1 -147.29 NaN \n",
"2 -90.69 NaN \n",
"3 -516.96 NaN \n",
"4 -390.57 NaN \n",
"\n",
" Positive imbalance price Negative imbalance price \n",
"0 153.12 153.12 \n",
"1 153.29 153.29 \n",
"2 152.00 152.00 \n",
"3 -516.96 -516.96 \n",
"4 239.06 239.06 "
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
"ename": "",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[1;31mRunning cells with 'Python 3.12.3' requires the ipykernel package.\n",
"\u001b[1;31mRun the following command to install 'ipykernel' into the Python environment. \n",
"\u001b[1;31mCommand: '/opt/homebrew/bin/python3 -m pip install ipykernel -U --user --force-reinstall'"
]
}
],
"source": [
@@ -160,7 +27,7 @@
"# read imbalance prices\n",
"imbalance_prices = pd.read_csv('../../data/imbalance_prices.csv', sep=';')\n",
"imbalance_prices[\"DateTime\"] = pd.to_datetime(imbalance_prices['DateTime'], utc=True)\n",
"imbalance_prices.head()"
"imbalance_prices.head()\n"
]
},
{
@@ -170,7 +37,7 @@
"outputs": [],
"source": [
"ipc_1 = ImbalancePriceCalculator(method=1)\n",
"ipc_2 = ImbalancePriceCalculator(method=2)"
"ipc_2 = ImbalancePriceCalculator(method=2)\n"
]
},
{
@@ -320,7 +187,7 @@
" \n",
"\n",
"print(\"Total error for method 1: \", error_1)\n",
"print(\"Total error for method 2: \", error_2)"
"print(\"Total error for method 2: \", error_2)\n"
]
},
{
@@ -508,7 +375,7 @@
" plt.legend()\n",
" # save plt\n",
" # plt.savefig(f'../../Result-Reports/imbalance_prices_images/method_1/imbalance_price_reconstruction_{dt.strftime(\"%d-%m-%Y\")}.png', dpi=300)\n",
" plt.show()"
" plt.show()\n"
]
},
{
@@ -591,7 +458,7 @@
"print(\"MAE NRV | Today Bid Ladder:: \", error_2)\n",
"\n",
"print(\"MAE SI | Yesterday Bid Ladder: \", error_3)\n",
"print(\"MAE NRV | Yesterday Bid Ladder: \", error_4)"
"print(\"MAE NRV | Yesterday Bid Ladder: \", error_4)\n"
]
},
{
@@ -776,7 +643,7 @@
" plt.legend()\n",
" # save plt\n",
" # plt.savefig(f'../../Result-Reports/imbalance_prices_images/method_1/imbalance_price_reconstruction_{dt.strftime(\"%d-%m-%Y\")}.png', dpi=300)\n",
" plt.show()"
" plt.show()\n"
]
},
{
@@ -803,7 +670,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
"version": "3.undefined.undefined"
}
},
"nbformat": 4,

View File

@@ -85,7 +85,9 @@ class ImbalancePriceCalculator:
def plot(self, datetime):
print(self.bid_ladder.index)
row = self.bid_ladder.loc[self.bid_ladder.index == datetime]
print(row)
dec_bids = row["bid_ladder_dec"].values[0]
inc_bids = row["bid_ladder_inc"].values[0]
@@ -109,6 +111,15 @@ class ImbalancePriceCalculator:
hovermode='x unified'
)
# figure size figsize=(10, 6)
fig.update_layout(
autosize=False,
width=800,
height=600,
)
fig.show()
def get_imbalance_prices_2023_for_date_vectorized(self, date, NRV_predictions_matrix):
@@ -178,4 +189,4 @@ def calculate_imbalance_price(SI_PREV, SI, MIP, MDP):
imbalance_price[SI > 0] = neg_imbalance_price[SI > 0]
imbalance_price[SI == 0] = (pos_imbalance_price[SI == 0] + neg_imbalance_price[SI == 0]) / 2
return alpha, imbalance_price
return alpha, imbalance_price