Updated literature study and validation set for autoregressive models

This commit is contained in:
2024-05-19 00:08:43 +02:00
parent 1d1436612c
commit 26807eae22
33 changed files with 1282 additions and 742 deletions

View File

@@ -14,6 +14,44 @@
long = Non-Autoregressive Quantile Regression
}
% Deep Learning
\DeclareAcronym{GRU}{
short = GRU,
long = Gated Recurrent Unit
}
\DeclareAcronym{LSTM}{
short = LSTM,
long = Long Short-Term Memory
}
\DeclareAcronym{GAN}{
short = GAN,
long = Generative Adversarial Network,
plural = s
}
\DeclareAcronym{CTSGAN}{
short = CTSGAN,
long = Conditional Time Series Generative Adversarial Network
}
\DeclareAcronym{VAE}{
short = VAE,
long = Variational Autoencoder
}
\DeclareAcronym{MLP}{
short = MLP,
long = Multi-Layer Perceptron
}
\DeclareAcronym{GP}{
short = GP,
long = Gaussian Process,
plural-form = Gaussian Processes
}
% Metrics
\DeclareAcronym{MSE}{
short = MSE,
@@ -30,6 +68,16 @@
long = Continuous Ranked Probability Score
}
\DeclareAcronym{TSPA}{
short = TSPA,
long = Two-Step Probabilistic Approach
}
\DeclareAcronym{PLF}{
short = PLF,
long = Pinball Loss Function
}
% Electricity Market Terms
\DeclareAcronym{NRV}{
short = NRV,

View File

@@ -1,11 +1,4 @@
@online{noauthor_zotero_nodate,
title = {Zotero {\textbar} Connectors},
url = {https://www.zotero.org/download/connectors},
urldate = {2022-10-12},
file = {Zotero | Connectors:/Users/victormylle/Zotero/storage/EPF3ZZRA/connectors.html:text/html},
}
@online{noauthor_elia_nodate,
title = {Elia: de electriciteitsmarkt en -systeem},
url = {https://www.elia.be/nl/elektriciteitsmarkt-en-systeem},
@@ -16,205 +9,6 @@
file = {Snapshot:/Users/victormylle/Zotero/storage/7QY94WTW/elektriciteitsmarkt-en-systeem.html:text/html},
}
@misc{gao_easy--hard_2023,
title = {Easy-to-Hard Learning for Information Extraction},
url = {http://arxiv.org/abs/2305.09193},
abstract = {Information extraction ({IE}) systems aim to automatically extract structured information, such as named entities, relations between entities, and events, from unstructured texts. While most existing work addresses a particular {IE} task, universally modeling various {IE} tasks with one model has achieved great success recently. Despite their success, they employ a one-stage learning strategy, i.e., directly learning to extract the target structure given the input text, which contradicts the human learning process. In this paper, we propose a unified easy-to-hard learning framework consisting of three stages, i.e., the easy stage, the hard stage, and the main stage, for {IE} by mimicking the human learning process. By breaking down the learning process into multiple stages, our framework facilitates the model to acquire general {IE} task knowledge and improve its generalization ability. Extensive experiments across four {IE} tasks demonstrate the effectiveness of our framework. We achieve new state-of-the-art results on 13 out of 17 datasets. Our code is available at {\textbackslash}url\{https://github.com/{DAMO}-{NLP}-{SG}/{IE}-E2H\}.},
number = {{arXiv}:2305.09193},
publisher = {{arXiv}},
author = {Gao, Chang and Zhang, Wenxuan and Lam, Wai and Bing, Lidong},
urldate = {2023-07-10},
date = {2023-05-19},
eprinttype = {arxiv},
eprint = {2305.09193 [cs]},
keywords = {Computer Science - Computation and Language},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/5YBG5XYS/2305.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/D8LIDUE8/Gao et al. - 2023 - Easy-to-Hard Learning for Information Extraction.pdf:application/pdf},
}
@article{gaur_semi-supervised_2021,
title = {Semi-supervised deep learning based named entity recognition model to parse education section of resumes},
volume = {33},
issn = {1433-3058},
url = {https://doi.org/10.1007/s00521-020-05351-2},
doi = {10.1007/s00521-020-05351-2},
abstract = {A job seekers resume contains several sections, including educational qualifications. Educational qualifications capture the knowledge and skills relevant to the job. Machine processing of the education sections of resumes has been a difficult task. In this paper, we attempt to identify educational institutions names and degrees from a resumes education section. Usually, a significant amount of annotated data is required for neural network-based named entity recognition techniques. A semi-supervised approach is used to overcome the lack of large annotated data. We trained a deep neural network model on an initial (seed) set of resume education sections. This model is used to predict entities of unlabeled education sections and is rectified using a correction module. The education sections containing the rectified entities are augmented to the seed set. The updated seed set is used for retraining, leading to better accuracy than the previously trained model. This way, it can provide a high overall accuracy without the need of large annotated data. Our model has achieved an accuracy of 92.06\% on the named entity recognition task.},
pages = {5705--5718},
number = {11},
journaltitle = {Neural Computing and Applications},
shortjournal = {Neural Comput \& Applic},
author = {Gaur, Bodhvi and Saluja, Gurpreet Singh and Sivakumar, Hamsa Bharathi and Singh, Sanjay},
urldate = {2023-07-10},
date = {2021-06-01},
langid = {english},
keywords = {Deep learning models, Named entity recognition ({NER}), Natural language processing, Resume information extraction, Semi-supervised learning},
file = {Full Text PDF:/Users/victormylle/Zotero/storage/4NK6IXHZ/Gaur et al. - 2021 - Semi-supervised deep learning based named entity r.pdf:application/pdf},
}
@article{landolsi_information_2023,
title = {Information extraction from electronic medical documents: state of the art and future research directions},
volume = {65},
issn = {0219-3116},
url = {https://doi.org/10.1007/s10115-022-01779-1},
doi = {10.1007/s10115-022-01779-1},
shorttitle = {Information extraction from electronic medical documents},
abstract = {In the medical field, a doctor must have a comprehensive knowledge by reading and writing narrative documents, and he is responsible for every decision he takes for patients. Unfortunately, it is very tiring to read all necessary information about drugs, diseases and patients due to the large amount of documents that are increasing every day. Consequently, so many medical errors can happen and even kill people. Likewise, there is such an important field that can handle this problem, which is the information extraction. There are several important tasks in this field to extract the important and desired information from unstructured text written in natural language. The main principal tasks are named entity recognition and relation extraction since they can structure the text by extracting the relevant information. However, in order to treat the narrative text we should use natural language processing techniques to extract useful information and features. In our paper, we introduce and discuss the several techniques and solutions used in these tasks. Furthermore, we outline the challenges in information extraction from medical documents. In our knowledge, this is the most comprehensive survey in the literature with an experimental analysis and a suggestion for some uncovered directions.},
pages = {463--516},
number = {2},
journaltitle = {Knowledge and Information Systems},
shortjournal = {Knowl Inf Syst},
author = {Landolsi, Mohamed Yassine and Hlaoua, Lobna and Ben Romdhane, Lotfi},
urldate = {2023-07-10},
date = {2023-02-01},
langid = {english},
keywords = {Electronic medical records, Information extraction, Medical named entities recognition, Medical relation extraction, Section detection},
file = {Full Text PDF:/Users/victormylle/Zotero/storage/KRTKZW3M/Landolsi et al. - 2023 - Information extraction from electronic medical doc.pdf:application/pdf},
}
@inproceedings{fu_spanner_2021,
location = {Online},
title = {{SpanNER}: Named Entity Re-/Recognition as Span Prediction},
url = {https://aclanthology.org/2021.acl-long.558},
doi = {10.18653/v1/2021.acl-long.558},
shorttitle = {{SpanNER}},
abstract = {Recent years have seen the paradigm shift of Named Entity Recognition ({NER}) systems from sequence labeling to span prediction. Despite its preliminary effectiveness, the span prediction model's architectural bias has not been fully understood. In this paper, we first investigate the strengths and weaknesses when the span prediction model is used for named entity recognition compared with the sequence labeling framework and how to further improve it, which motivates us to make complementary advantages of systems based on different paradigms. We then reveal that span prediction, simultaneously, can serve as a system combiner to re-recognize named entities from different systems' outputs. We experimentally implement 154 systems on 11 datasets, covering three languages, comprehensive results show the effectiveness of span prediction models that both serve as base {NER} systems and system combiners. We make all codes and datasets available: https://github.com/neulab/spanner, as well as an online system demo: http://spanner.sh. Our model also has been deployed into the {ExplainaBoard} platform, which allows users to flexibly perform a system combination of top-scoring systems in an interactive way: http://explainaboard.nlpedia.ai/leaderboard/task-ner/.},
eventtitle = {{ACL}-{IJCNLP} 2021},
pages = {7183--7195},
booktitle = {Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
author = {Fu, Jinlan and Huang, Xuanjing and Liu, Pengfei},
urldate = {2023-07-10},
date = {2021-08},
file = {Full Text PDF:/Users/victormylle/Zotero/storage/6JU4DR5Y/Fu et al. - 2021 - SpanNER Named Entity Re-Recognition as Span Pred.pdf:application/pdf},
}
@inproceedings{li_unified_2020,
location = {Online},
title = {A Unified {MRC} Framework for Named Entity Recognition},
url = {https://aclanthology.org/2020.acl-main.519},
doi = {10.18653/v1/2020.acl-main.519},
abstract = {The task of named entity recognition ({NER}) is normally divided into nested {NER} and flat {NER} depending on whether named entities are nested or not.Models are usually separately developed for the two tasks, since sequence labeling models, the most widely used backbone for flat {NER}, are only able to assign a single label to a particular token, which is unsuitable for nested {NER} where a token may be assigned several labels. In this paper, we propose a unified framework that is capable of handling both flat and nested {NER} tasks. Instead of treating the task of {NER} as a sequence labeling problem, we propose to formulate it as a machine reading comprehension ({MRC}) task. For example, extracting entities with the per label is formalized as extracting answer spans to the question “which person is mentioned in the text”.This formulation naturally tackles the entity overlapping issue in nested {NER}: the extraction of two overlapping entities with different categories requires answering two independent questions. Additionally, since the query encodes informative prior knowledge, this strategy facilitates the process of entity extraction, leading to better performances for not only nested {NER}, but flat {NER}. We conduct experiments on both nested and flat {NER} datasets.Experiment results demonstrate the effectiveness of the proposed formulation. We are able to achieve a vast amount of performance boost over current {SOTA} models on nested {NER} datasets, i.e., +1.28, +2.55, +5.44, +6.37,respectively on {ACE}04, {ACE}05, {GENIA} and {KBP}17, along with {SOTA} results on flat {NER} datasets, i.e., +0.24, +1.95, +0.21, +1.49 respectively on English {CoNLL} 2003, English {OntoNotes} 5.0, Chinese {MSRA} and Chinese {OntoNotes} 4.0.},
eventtitle = {{ACL} 2020},
pages = {5849--5859},
booktitle = {Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
publisher = {Association for Computational Linguistics},
author = {Li, Xiaoya and Feng, Jingrong and Meng, Yuxian and Han, Qinghong and Wu, Fei and Li, Jiwei},
urldate = {2023-07-10},
date = {2020-07},
file = {Full Text PDF:/Users/victormylle/Zotero/storage/TIVIKNGN/Li et al. - 2020 - A Unified MRC Framework for Named Entity Recogniti.pdf:application/pdf},
}
@misc{decorte_jobbert_2021,
title = {{JobBERT}: Understanding Job Titles through Skills},
url = {http://arxiv.org/abs/2109.09605},
shorttitle = {{JobBERT}},
abstract = {Job titles form a cornerstone of today's human resources ({HR}) processes. Within online recruitment, they allow candidates to understand the contents of a vacancy at a glance, while internal {HR} departments use them to organize and structure many of their processes. As job titles are a compact, convenient, and readily available data source, modeling them with high accuracy can greatly benefit many {HR} tech applications. In this paper, we propose a neural representation model for job titles, by augmenting a pre-trained language model with co-occurrence information from skill labels extracted from vacancies. Our {JobBERT} method leads to considerable improvements compared to using generic sentence encoders, for the task of job title normalization, for which we release a new evaluation benchmark.},
number = {{arXiv}:2109.09605},
publisher = {{arXiv}},
author = {Decorte, Jens-Joris and Van Hautte, Jeroen and Demeester, Thomas and Develder, Chris},
urldate = {2023-07-20},
date = {2021-09-20},
eprinttype = {arxiv},
eprint = {2109.09605 [cs]},
keywords = {Computer Science - Computation and Language},
}
@misc{sun_retentive_2023,
title = {Retentive Network: A Successor to Transformer for Large Language Models},
url = {http://arxiv.org/abs/2307.08621},
shorttitle = {Retentive Network},
abstract = {In this work, we propose Retentive Network ({RetNet}) as a foundation architecture for large language models, simultaneously achieving training parallelism, low-cost inference, and good performance. We theoretically derive the connection between recurrence and attention. Then we propose the retention mechanism for sequence modeling, which supports three computation paradigms, i.e., parallel, recurrent, and chunkwise recurrent. Specifically, the parallel representation allows for training parallelism. The recurrent representation enables low-cost \$O(1)\$ inference, which improves decoding throughput, latency, and {GPU} memory without sacrificing performance. The chunkwise recurrent representation facilitates efficient long-sequence modeling with linear complexity, where each chunk is encoded parallelly while recurrently summarizing the chunks. Experimental results on language modeling show that {RetNet} achieves favorable scaling results, parallel training, low-cost deployment, and efficient inference. The intriguing properties make {RetNet} a strong successor to Transformer for large language models. Code will be available at https://aka.ms/retnet.},
number = {{arXiv}:2307.08621},
publisher = {{arXiv}},
author = {Sun, Yutao and Dong, Li and Huang, Shaohan and Ma, Shuming and Xia, Yuqing and Xue, Jilong and Wang, Jianyong and Wei, Furu},
urldate = {2023-07-25},
date = {2023-07-19},
eprinttype = {arxiv},
eprint = {2307.08621 [cs]},
keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning},
}
@misc{zhang_generation-driven_2023,
title = {Generation-driven Contrastive Self-training for Zero-shot Text Classification with Instruction-tuned {GPT}},
url = {http://arxiv.org/abs/2304.11872},
abstract = {Moreover, {GPT}-based zero-shot classification models tend to make independent predictions over test instances, which can be sub-optimal as the instance correlations and the decision boundaries in the target space are ignored. To address these difficulties and limitations, we propose a new approach to zero-shot text classification, namely {\textbackslash}ourmodelshort, which leverages the strong generative power of {GPT} to assist in training a smaller, more adaptable, and efficient sentence encoder classifier with contrastive self-training. Specifically, {GenCo} applies {GPT} in two ways: firstly, it generates multiple augmented texts for each input instance to enhance the semantic embedding of the instance and improve the mapping to relevant labels; secondly, it generates augmented texts conditioned on the predicted label during self-training, which makes the generative process tailored to the decision boundaries in the target space. In our experiments, {GenCo} outperforms previous state-of-the-art methods on multiple benchmark datasets, even when only limited in-domain text data is available.},
number = {{arXiv}:2304.11872},
publisher = {{arXiv}},
author = {Zhang, Ruohong and Wang, Yau-Shian and Yang, Yiming},
urldate = {2023-08-01},
date = {2023-04-24},
eprinttype = {arxiv},
eprint = {2304.11872 [cs]},
keywords = {Computer Science - Computation and Language, Computer Science - Artificial Intelligence, interesting},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/2ULMRMN5/2304.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/D98MRNHP/Zhang et al. - 2023 - Generation-driven Contrastive Self-training for Ze.pdf:application/pdf},
}
@misc{zhang_clusterllm_2023,
title = {{ClusterLLM}: Large Language Models as a Guide for Text Clustering},
url = {http://arxiv.org/abs/2305.14871},
shorttitle = {{ClusterLLM}},
abstract = {We introduce {ClusterLLM}, a novel text clustering framework that leverages feedback from an instruction-tuned large language model, such as {ChatGPT}. Compared with traditional unsupervised methods that builds upon "small" embedders, {ClusterLLM} exhibits two intriguing advantages: (1) it enjoys the emergent capability of {LLM} even if its embeddings are inaccessible; and (2) it understands the user's preference on clustering through textual instruction and/or a few annotated data. First, we prompt {ChatGPT} for insights on clustering perspective by constructing hard triplet questions {\textless}does A better correspond to B than C{\textgreater}, where A, B and C are similar data points that belong to different clusters according to small embedder. We empirically show that this strategy is both effective for fine-tuning small embedder and cost-efficient to query {ChatGPT}. Second, we prompt {ChatGPT} for helps on clustering granularity by carefully designed pairwise questions {\textless}do A and B belong to the same category{\textgreater}, and tune the granularity from cluster hierarchies that is the most consistent with the {ChatGPT} answers. Extensive experiments on 14 datasets show that {ClusterLLM} consistently improves clustering quality, at an average cost of {\textasciitilde}\$0.6 per dataset.},
number = {{arXiv}:2305.14871},
publisher = {{arXiv}},
author = {Zhang, Yuwei and Wang, Zihan and Shang, Jingbo},
urldate = {2023-08-08},
date = {2023-05-24},
eprinttype = {arxiv},
eprint = {2305.14871 [cs]},
keywords = {Computer Science - Computation and Language},
}
@misc{zhang_clusterllm_2023-1,
title = {{ClusterLLM}: Large Language Models as a Guide for Text Clustering},
url = {http://arxiv.org/abs/2305.14871},
shorttitle = {{ClusterLLM}},
abstract = {We introduce {ClusterLLM}, a novel text clustering framework that leverages feedback from an instruction-tuned large language model, such as {ChatGPT}. Compared with traditional unsupervised methods that builds upon "small" embedders, {ClusterLLM} exhibits two intriguing advantages: (1) it enjoys the emergent capability of {LLM} even if its embeddings are inaccessible; and (2) it understands the user's preference on clustering through textual instruction and/or a few annotated data. First, we prompt {ChatGPT} for insights on clustering perspective by constructing hard triplet questions {\textless}does A better correspond to B than C{\textgreater}, where A, B and C are similar data points that belong to different clusters according to small embedder. We empirically show that this strategy is both effective for fine-tuning small embedder and cost-efficient to query {ChatGPT}. Second, we prompt {ChatGPT} for helps on clustering granularity by carefully designed pairwise questions {\textless}do A and B belong to the same category{\textgreater}, and tune the granularity from cluster hierarchies that is the most consistent with the {ChatGPT} answers. Extensive experiments on 14 datasets show that {ClusterLLM} consistently improves clustering quality, at an average cost of {\textasciitilde}\$0.6 per dataset.},
number = {{arXiv}:2305.14871},
publisher = {{arXiv}},
author = {Zhang, Yuwei and Wang, Zihan and Shang, Jingbo},
urldate = {2023-08-08},
date = {2023-05-24},
eprinttype = {arxiv},
eprint = {2305.14871 [cs]},
keywords = {Computer Science - Computation and Language},
}
@misc{zhang_clusterllm_2023-2,
title = {{ClusterLLM}: Large Language Models as a Guide for Text Clustering},
url = {http://arxiv.org/abs/2305.14871},
shorttitle = {{ClusterLLM}},
abstract = {We introduce {ClusterLLM}, a novel text clustering framework that leverages feedback from an instruction-tuned large language model, such as {ChatGPT}. Compared with traditional unsupervised methods that builds upon "small" embedders, {ClusterLLM} exhibits two intriguing advantages: (1) it enjoys the emergent capability of {LLM} even if its embeddings are inaccessible; and (2) it understands the user's preference on clustering through textual instruction and/or a few annotated data. First, we prompt {ChatGPT} for insights on clustering perspective by constructing hard triplet questions {\textless}does A better correspond to B than C{\textgreater}, where A, B and C are similar data points that belong to different clusters according to small embedder. We empirically show that this strategy is both effective for fine-tuning small embedder and cost-efficient to query {ChatGPT}. Second, we prompt {ChatGPT} for helps on clustering granularity by carefully designed pairwise questions {\textless}do A and B belong to the same category{\textgreater}, and tune the granularity from cluster hierarchies that is the most consistent with the {ChatGPT} answers. Extensive experiments on 14 datasets show that {ClusterLLM} consistently improves clustering quality, at an average cost of {\textasciitilde}\$0.6 per dataset.},
number = {{arXiv}:2305.14871},
publisher = {{arXiv}},
author = {Zhang, Yuwei and Wang, Zihan and Shang, Jingbo},
urldate = {2023-08-08},
date = {2023-05-24},
eprinttype = {arxiv},
eprint = {2305.14871 [cs]},
keywords = {Computer Science - Computation and Language},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/68L6AESY/2305.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/VGWL9LRC/Zhang et al. - 2023 - ClusterLLM Large Language Models as a Guide for T.pdf:application/pdf},
}
@article{vijeikis_efficient_2022,
title = {Efficient Violence Detection in Surveillance},
volume = {22},
rights = {http://creativecommons.org/licenses/by/3.0/},
issn = {1424-8220},
url = {https://www.mdpi.com/1424-8220/22/6/2216},
doi = {10.3390/s22062216},
abstract = {Intelligent video surveillance systems are rapidly being introduced to public places. The adoption of computer vision and machine learning techniques enables various applications for collected video features; one of the major is safety monitoring. The efficacy of violent event detection is measured by the efficiency and accuracy of violent event detection. In this paper, we present a novel architecture for violence detection from video surveillance cameras. Our proposed model is a spatial feature extracting a U-Net-like network that uses {MobileNet} V2 as an encoder followed by {LSTM} for temporal feature extraction and classification. The proposed model is computationally light and still achieves good results—experiments showed that an average accuracy is 0.82 ± 2\% and average precision is 0.81 ± 3\% using a complex real-world security camera footage dataset based on {RWF}-2000.},
pages = {2216},
number = {6},
journaltitle = {Sensors},
author = {Vijeikis, Romas and Raudonis, Vidas and Dervinis, Gintaras},
urldate = {2023-08-08},
date = {2022-01},
langid = {english},
note = {Number: 6
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {computer vision, deep learning, intelligent video surveillance, {LSTM}, U-Net, violence detection, violent behavior},
file = {Full Text PDF:/Users/victormylle/Zotero/storage/PSYA8YSJ/Vijeikis et al. - 2022 - Efficient Violence Detection in Surveillance.pdf:application/pdf},
}
@article{toubeau_interpretable_2022,
title = {Interpretable Probabilistic Forecasting of Imbalances in Renewable-Dominated Electricity Systems},
volume = {13},
@@ -233,12 +27,6 @@ Publisher: Multidisciplinary Digital Publishing Institute},
file = {Toubeau et al. - 2022 - Interpretable Probabilistic Forecasting of Imbalan.pdf:/Users/victormylle/Zotero/storage/WA7DZBXX/Toubeau et al. - 2022 - Interpretable Probabilistic Forecasting of Imbalan.pdf:application/pdf},
}
@online{noauthor_deep_nodate,
title = {Deep Generative Modelling: A Comparative Review of {VAEs}, {GANs}, Normalizing Flows, Energy-Based and Autoregressive Models {\textbar} {IEEE} Journals \& Magazine {\textbar} {IEEE} Xplore},
url = {https://ieeexplore.ieee.org/document/9555209},
urldate = {2023-10-11},
}
@article{bond-taylor_deep_2022,
title = {Deep Generative Modelling: A Comparative Review of {VAEs}, {GANs}, Normalizing Flows, Energy-Based and Autoregressive Models},
volume = {44},
@@ -368,39 +156,6 @@ Publisher: Multidisciplinary Digital Publishing Institute},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/8LIRWZ4G/2101.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/QPPFJVR5/Rasul et al. - 2021 - Autoregressive Denoising Diffusion Models for Mult.pdf:application/pdf},
}
@online{noauthor_spacy_nodate,
title = {{spaCy} · Industrial-strength Natural Language Processing in Python},
url = {https://spacy.io/},
abstract = {{spaCy} is a free open-source library for Natural Language Processing in Python. It features {NER}, {POS} tagging, dependency parsing, word vectors and more.},
urldate = {2023-10-17},
langid = {english},
file = {Snapshot:/Users/victormylle/Zotero/storage/8WWDDEH4/spacy.io.html:text/html},
}
@online{noauthor_intfloatmultilingual-e5-base_nodate,
title = {intfloat/multilingual-e5-base · Hugging Face},
url = {https://huggingface.co/intfloat/multilingual-e5-base},
abstract = {Were on a journey to advance and democratize artificial intelligence through open source and open science.},
urldate = {2023-10-17},
file = {Snapshot:/Users/victormylle/Zotero/storage/LYSDP8CD/multilingual-e5-base.html:text/html},
}
@online{noauthor_googleflan-t5-base_nodate,
title = {google/flan-t5-base · Hugging Face},
url = {https://huggingface.co/google/flan-t5-base},
urldate = {2023-10-17},
file = {flan-t5-base · Hugging Face:/Users/victormylle/Zotero/storage/284DLNVT/flan-t5-base.html:text/html},
}
@online{noauthor_openai_nodate,
title = {{OpenAI} Platform},
url = {https://platform.openai.com},
abstract = {Explore developer resources, tutorials, {API} docs, and dynamic examples to get the most out of {OpenAI}'s platform.},
urldate = {2023-10-17},
langid = {english},
file = {Snapshot:/Users/victormylle/Zotero/storage/9NFW3FCP/gpt-3-5.html:text/html},
}
@article{cramer_normalizing_2022,
title = {Normalizing flow-based day-ahead wind power scenario generation for profitable and reliable delivery commitments by wind farm operators},
volume = {166},
@@ -449,21 +204,6 @@ Publisher: Multidisciplinary Digital Publishing Institute},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/2J7MPVV5/1505.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/GQWIFAAN/Rezende and Mohamed - 2016 - Variational Inference with Normalizing Flows.pdf:application/pdf},
}
@misc{gruver_large_2023,
title = {Large Language Models Are Zero-Shot Time Series Forecasters},
url = {http://arxiv.org/abs/2310.07820},
doi = {10.48550/arXiv.2310.07820},
abstract = {By encoding time series as a string of numerical digits, we can frame time series forecasting as next-token prediction in text. Developing this approach, we find that large language models ({LLMs}) such as {GPT}-3 and {LLaMA}-2 can surprisingly zero-shot extrapolate time series at a level comparable to or exceeding the performance of purpose-built time series models trained on the downstream tasks. To facilitate this performance, we propose procedures for effectively tokenizing time series data and converting discrete distributions over tokens into highly flexible densities over continuous values. We argue the success of {LLMs} for time series stems from their ability to naturally represent multimodal distributions, in conjunction with biases for simplicity, and repetition, which align with the salient features in many time series, such as repeated seasonal trends. We also show how {LLMs} can naturally handle missing data without imputation through non-numerical text, accommodate textual side information, and answer questions to help explain predictions. While we find that increasing model size generally improves performance on time series, we show {GPT}-4 can perform worse than {GPT}-3 because of how it tokenizes numbers, and poor uncertainty calibration, which is likely the result of alignment interventions such as {RLHF}.},
number = {{arXiv}:2310.07820},
author = {Gruver, Nate and Finzi, Marc and Qiu, Shikai and Wilson, Andrew Gordon},
urldate = {2023-10-13},
date = {2023-10-11},
eprinttype = {arxiv},
eprint = {2310.07820 [cs]},
keywords = {Computer Science - Machine Learning},
file = {Gruver et al. - 2023 - Large Language Models Are Zero-Shot Time Series Forecasters.pdf:/Users/victormylle/Zotero/storage/T5XZ227W/Gruver et al. - 2023 - Large Language Models Are Zero-Shot Time Series Forecasters.pdf:application/pdf},
}
@article{sweidan_probabilistic_nodate,
title = {Probabilistic Prediction in scikit-learn},
abstract = {Adding confidence measures to predictive models should increase the trustworthiness, but only if the models are well-calibrated. Historically, some algorithms like logistic regression, but also neural networks, have been considered to produce well-calibrated probability estimates off-the-shelf. Other techniques, like decision trees and Naive Bayes, on the other hand, are infamous for being significantly overconfident in their probabilistic predictions. In this paper, a large experimental study is conducted to investigate how well-calibrated models produced by a number of algorithms in the scikit-learn library are out-of-the-box, but also if either the built-in calibration techniques Platt scaling and isotonic regression, or Venn-Abers, can be used to improve the calibration. The results show that of the seven algorithms evaluated, the only one obtaining well-calibrated models without the external calibration is logistic regression. All other algorithms, i.e., decision trees, adaboost, gradient boosting, {kNN}, naive Bayes and random forest benefit from using any of the calibration techniques. In particular, decision trees, Naive Bayes and the boosted models are substantially improved using external calibration. From a practitioners perspective, the obvious recommendation becomes to incorporate calibration when using probabilistic prediction. Comparing the different calibration techniques, Platt scaling and {VennAbers} generally outperform isotonic regression, on these rather small datasets. Finally, the unique ability of Venn-Abers to output not only well-calibrated probability estimates, but also the confidence in these estimates is demonstrated.},
@@ -490,49 +230,6 @@ Publisher: Multidisciplinary Digital Publishing Institute},
file = {Baskan et al. - 2023 - A Scenario-Based Model Comparison for Short-Term D.pdf:/Users/victormylle/Zotero/storage/TU5JX5D4/Baskan et al. - 2023 - A Scenario-Based Model Comparison for Short-Term D.pdf:application/pdf},
}
@online{tsaprounis_metrics_2023,
title = {Metrics for Distributional Forecasts},
url = {https://medium.com/trusted-data-science-haleon/metrics-for-distributional-forecasts-60e156c60177},
abstract = {How to evaluate distributional/probabilistic time series forecasts in Python.},
titleaddon = {Trusted Data Science @ Haleon},
author = {Tsaprounis, Leonidas},
urldate = {2023-10-24},
date = {2023-02-27},
langid = {english},
}
@misc{roy_recent_2021,
title = {Recent Trends in Named Entity Recognition ({NER})},
url = {http://arxiv.org/abs/2101.11420},
doi = {10.48550/arXiv.2101.11420},
abstract = {The availability of large amounts of computer-readable textual data and hardware that can process the data has shifted the focus of knowledge projects towards deep learning architecture. Natural Language Processing, particularly the task of Named Entity Recognition is no exception. The bulk of the learning methods that have produced state-of-the-art results have changed the deep learning model, the training method used, the training data itself or the encoding of the output of the {NER} system. In this paper, we review significant learning methods that have been employed for {NER} in the recent past and how they came about from the linear learning methods of the past. We also cover the progress of related tasks that are upstream or downstream to {NER}, e.g., sequence tagging, entity linking, etc., wherever the processes in question have also improved {NER} results.},
number = {{arXiv}:2101.11420},
publisher = {{arXiv}},
author = {Roy, Arya},
urldate = {2023-10-24},
date = {2021-01-25},
eprinttype = {arxiv},
eprint = {2101.11420 [cs]},
keywords = {Computer Science - Computation and Language},
file = {arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/AAZ3I43G/Roy - 2021 - Recent Trends in Named Entity Recognition (NER).pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/DWNPFLCX/2101.html:text/html},
}
@online{noauthor_sentencetransformers_nodate,
title = {{SentenceTransformers} Documentation — Sentence-Transformers documentation},
url = {https://www.sbert.net/},
urldate = {2023-10-29},
file = {SentenceTransformers Documentation — Sentence-Transformers documentation:/Users/victormylle/Zotero/storage/7ZPK2DIZ/www.sbert.net.html:text/html},
}
@online{noauthor_hugging_2023,
title = {Hugging Face The {AI} community building the future.},
url = {https://huggingface.co/},
abstract = {Were on a journey to advance and democratize artificial intelligence through open source and open science.},
urldate = {2023-10-29},
date = {2023-10-22},
file = {Snapshot:/Users/victormylle/Zotero/storage/8U9I2BD9/huggingface.co.html:text/html},
}
@misc{narayan_regularization_2021,
title = {Regularization Strategies for Quantile Regression},
url = {http://arxiv.org/abs/2102.05135},
@@ -565,98 +262,6 @@ Publisher: Multidisciplinary Digital Publishing Institute},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/WWFHI3UN/2011.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/SHMRZ3Q7/Chung et al. - 2021 - Beyond Pinball Loss Quantile Methods for Calibrat.pdf:application/pdf},
}
@misc{van_hautte_bad_2019,
title = {Bad Form: Comparing Context-Based and Form-Based Few-Shot Learning in Distributional Semantic Models},
url = {http://arxiv.org/abs/1910.00275},
shorttitle = {Bad Form},
abstract = {Word embeddings are an essential component in a wide range of natural language processing applications. However, distributional semantic models are known to struggle when only a small number of context sentences are available. Several methods have been proposed to obtain higher-quality vectors for these words, leveraging both this context information and sometimes the word forms themselves through a hybrid approach. We show that the current tasks do not suffice to evaluate models that use word-form information, as such models can easily leverage word forms in the training data that are related to word forms in the test data. We introduce 3 new tasks, allowing for a more balanced comparison between models. Furthermore, we show that hyperparameters that have largely been ignored in previous work can consistently improve the performance of both baseline and advanced models, achieving a new state of the art on 4 out of 6 tasks.},
number = {{arXiv}:1910.00275},
publisher = {{arXiv}},
author = {Van Hautte, Jeroen and Emerson, Guy and Rei, Marek},
urldate = {2024-03-09},
date = {2019-10-01},
eprinttype = {arxiv},
eprint = {1910.00275 [cs]},
keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/PUBS4DRK/1910.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/VY5YGVXU/Van Hautte et al. - 2019 - Bad Form Comparing Context-Based and Form-Based F.pdf:application/pdf},
}
@misc{decorte_jobbert_2021-1,
title = {{JobBERT}: Understanding Job Titles through Skills},
url = {http://arxiv.org/abs/2109.09605},
shorttitle = {{JobBERT}},
abstract = {Job titles form a cornerstone of today's human resources ({HR}) processes. Within online recruitment, they allow candidates to understand the contents of a vacancy at a glance, while internal {HR} departments use them to organize and structure many of their processes. As job titles are a compact, convenient, and readily available data source, modeling them with high accuracy can greatly benefit many {HR} tech applications. In this paper, we propose a neural representation model for job titles, by augmenting a pre-trained language model with co-occurrence information from skill labels extracted from vacancies. Our {JobBERT} method leads to considerable improvements compared to using generic sentence encoders, for the task of job title normalization, for which we release a new evaluation benchmark.},
number = {{arXiv}:2109.09605},
publisher = {{arXiv}},
author = {Decorte, Jens-Joris and Van Hautte, Jeroen and Demeester, Thomas and Develder, Chris},
urldate = {2024-03-09},
date = {2021-09-20},
eprinttype = {arxiv},
eprint = {2109.09605 [cs]},
keywords = {Computer Science - Computation and Language},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/6FMYQ68Y/2109.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/2SD3P252/Decorte et al. - 2021 - JobBERT Understanding Job Titles through Skills.pdf:application/pdf},
}
@misc{van_hautte_leveraging_2020,
title = {Leveraging the Inherent Hierarchy of Vacancy Titles for Automated Job Ontology Expansion},
url = {http://arxiv.org/abs/2004.02814},
abstract = {Machine learning plays an ever-bigger part in online recruitment, powering intelligent matchmaking and job recommendations across many of the world's largest job platforms. However, the main text is rarely enough to fully understand a job posting: more often than not, much of the required information is condensed into the job title. Several organised efforts have been made to map job titles onto a hand-made knowledge base as to provide this information, but these only cover around 60{\textbackslash}\% of online vacancies. We introduce a novel, purely data-driven approach towards the detection of new job titles. Our method is conceptually simple, extremely efficient and competitive with traditional {NER}-based approaches. Although the standalone application of our method does not outperform a finetuned {BERT} model, it can be applied as a preprocessing step as well, substantially boosting accuracy across several architectures.},
number = {{arXiv}:2004.02814},
publisher = {{arXiv}},
author = {Van Hautte, Jeroen and Schelstraete, Vincent and Wornoo, Mikaël},
urldate = {2024-03-09},
date = {2020-04-06},
eprinttype = {arxiv},
eprint = {2004.02814 [cs]},
keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/6FAKZYDM/2004.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/EY3RNC6S/Van Hautte et al. - 2020 - Leveraging the Inherent Hierarchy of Vacancy Title.pdf:application/pdf},
}
@misc{decorte_design_2022,
title = {Design of Negative Sampling Strategies for Distantly Supervised Skill Extraction},
url = {http://arxiv.org/abs/2209.05987},
abstract = {Skills play a central role in the job market and many human resources ({HR}) processes. In the wake of other digital experiences, today's online job market has candidates expecting to see the right opportunities based on their skill set. Similarly, enterprises increasingly need to use data to guarantee that the skills within their workforce remain future-proof. However, structured information about skills is often missing, and processes building on self- or manager-assessment have shown to struggle with issues around adoption, completeness, and freshness of the resulting data. Extracting skills is a highly challenging task, given the many thousands of possible skill labels mentioned either explicitly or merely described implicitly and the lack of finely annotated training corpora. Previous work on skill extraction overly simplifies the task to an explicit entity detection task or builds on manually annotated training data that would be infeasible if applied to a complete vocabulary of skills. We propose an end-to-end system for skill extraction, based on distant supervision through literal matching. We propose and evaluate several negative sampling strategies, tuned on a small validation dataset, to improve the generalization of skill extraction towards implicitly mentioned skills, despite the lack of such implicit skills in the distantly supervised data. We observe that using the {ESCO} taxonomy to select negative examples from related skills yields the biggest improvements, and combining three different strategies in one model further increases the performance, up to 8 percentage points in {RP}@5. We introduce a manually annotated evaluation benchmark for skill extraction based on the {ESCO} taxonomy, on which we validate our models. We release the benchmark dataset for research purposes to stimulate further research on the task.},
number = {{arXiv}:2209.05987},
publisher = {{arXiv}},
author = {Decorte, Jens-Joris and Van Hautte, Jeroen and Deleu, Johannes and Develder, Chris and Demeester, Thomas},
urldate = {2024-03-09},
date = {2022-09-13},
eprinttype = {arxiv},
eprint = {2209.05987 [cs]},
keywords = {Computer Science - Computation and Language},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/E79F2EV8/2209.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/SBEAYV66/Decorte et al. - 2022 - Design of Negative Sampling Strategies for Distant.pdf:application/pdf},
}
@misc{decorte_extreme_2023,
title = {Extreme Multi-Label Skill Extraction Training using Large Language Models},
url = {http://arxiv.org/abs/2307.10778},
abstract = {Online job ads serve as a valuable source of information for skill requirements, playing a crucial role in labor market analysis and e-recruitment processes. Since such ads are typically formatted in free text, natural language processing ({NLP}) technologies are required to automatically process them. We specifically focus on the task of detecting skills (mentioned literally, or implicitly described) and linking them to a large skill ontology, making it a challenging case of extreme multi-label classification ({XMLC}). Given that there is no sizable labeled (training) dataset are available for this specific {XMLC} task, we propose techniques to leverage general Large Language Models ({LLMs}). We describe a cost-effective approach to generate an accurate, fully synthetic labeled dataset for skill extraction, and present a contrastive learning strategy that proves effective in the task. Our results across three skill extraction benchmarks show a consistent increase of between 15 to 25 percentage points in {\textbackslash}textit\{R-Precision@5\} compared to previously published results that relied solely on distant supervision through literal matches.},
number = {{arXiv}:2307.10778},
publisher = {{arXiv}},
author = {Decorte, Jens-Joris and Verlinden, Severine and Van Hautte, Jeroen and Deleu, Johannes and Develder, Chris and Demeester, Thomas},
urldate = {2024-03-09},
date = {2023-07-20},
eprinttype = {arxiv},
eprint = {2307.10778 [cs]},
keywords = {Computer Science - Computation and Language},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/VVZZQW45/2307.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/8U7P43IE/Decorte et al. - 2023 - Extreme Multi-Label Skill Extraction Training usin.pdf:application/pdf},
}
@misc{decorte_career_2023,
title = {Career Path Prediction using Resume Representation Learning and Skill-based Matching},
url = {http://arxiv.org/abs/2310.15636},
abstract = {The impact of person-job fit on job satisfaction and performance is widely acknowledged, which highlights the importance of providing workers with next steps at the right time in their career. This task of predicting the next step in a career is known as career path prediction, and has diverse applications such as turnover prevention and internal job mobility. Existing methods to career path prediction rely on large amounts of private career history data to model the interactions between job titles and companies. We propose leveraging the unexplored textual descriptions that are part of work experience sections in resumes. We introduce a structured dataset of 2,164 anonymized career histories, annotated with {ESCO} occupation labels. Based on this dataset, we present a novel representation learning approach, {CareerBERT}, specifically designed for work history data. We develop a skill-based model and a text-based model for career path prediction, which achieve 35.24\% and 39.61\% recall@10 respectively on our dataset. Finally, we show that both approaches are complementary as a hybrid approach achieves the strongest result with 43.01\% recall@10.},
number = {{arXiv}:2310.15636},
publisher = {{arXiv}},
author = {Decorte, Jens-Joris and Van Hautte, Jeroen and Deleu, Johannes and Develder, Chris and Demeester, Thomas},
urldate = {2024-03-09},
date = {2023-10-24},
eprinttype = {arxiv},
eprint = {2310.15636 [cs]},
keywords = {Computer Science - Computation and Language, Computer Science - Artificial Intelligence},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/I6AMKGVA/2310.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/AKTKCWBR/Decorte et al. - 2023 - Career Path Prediction using Resume Representation.pdf:application/pdf},
}
@online{noauthor_liberalised_nodate,
title = {The liberalised electricity market includes many parties who all have to work together and at the same time try to make a profit. An overview of the most...},
url = {https://www.next-kraftwerke.be/en/knowledge-hub/players-in-the-belgian-power-market/},
@@ -697,13 +302,6 @@ Publisher: Multidisciplinary Digital Publishing Institute},
file = {arXiv.org Snapshot:/Users/victormylle/Zotero/storage/3N56FPYP/2106.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/958MBH5M/Dumas et al. - 2019 - Probabilistic Forecasting of Imbalance Prices in t.pdf:application/pdf},
}
@online{noauthor_ghent_nodate,
title = {Ghent University: Master of Science in Computer Science Engineering},
url = {https://studiekiezer.ugent.be/2024/master-of-science-in-computer-science-engineering-en},
urldate = {2024-04-17},
file = {master-of-science-in-computer-science-engineering-en:/Users/victormylle/Zotero/storage/JCELQ9VV/master-of-science-in-computer-science-engineering-en.html:text/html},
}
@article{gunduz_transfer_2023,
title = {Transfer learning for electricity price forecasting},
volume = {34},
@@ -754,7 +352,7 @@ Publisher: Multidisciplinary Digital Publishing Institute},
author = {Weron, Rafał},
urldate = {2024-05-02},
date = {2014-10-01},
keywords = {Autoregression, Day-ahead market, Electricity price forecasting, Factor model, Forecast combination, Neural network, Probabilistic forecast, Seasonality},
keywords = {Electricity price forecasting, Autoregression, Day-ahead market, Factor model, Forecast combination, Neural network, Probabilistic forecast, Seasonality},
file = {ScienceDirect Snapshot:/Users/victormylle/Zotero/storage/DDGF263F/S0169207014001083.html:text/html},
}
@@ -776,6 +374,67 @@ Publisher: Multidisciplinary Digital Publishing Institute},
langid = {english},
note = {Number: 2
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {autoregressive, deep learning, electricity price forecasting, machine learning, neural network, statistical method, univariate model},
keywords = {deep learning, autoregressive, electricity price forecasting, machine learning, neural network, statistical method, univariate model},
file = {Full Text PDF:/Users/victormylle/Zotero/storage/3IR29RU3/Poggi et al. - 2023 - Electricity Price Forecasting via Statistical and .pdf:application/pdf},
}
@online{noauthor_welcome_nodate,
title = {Welcome — Elia Open Data Portal},
url = {https://opendata.elia.be/pages/home/},
urldate = {2024-05-18},
file = {Welcome — Elia Open Data Portal:/Users/victormylle/Zotero/storage/SYR9PM3Z/home.html:text/html},
}
@online{noauthor_imbalance_nodate,
title = {Imbalance prices per quarter-hour (Historical data)},
url = {https://opendata.elia.be/explore/dataset/ods047/information/?sort=datetime},
abstract = {System imbalance prices applied if an imbalance is found between injections and offtakes in a balance responsible parties ({BRPs}) balance area. When imbalance prices are published on a quarter-hourly basis, the published prices have not yet been validated and can therefore only be used as an indication of the imbalance price.Only after the published prices have been validated can they be used for invoicing purposes. The records for month M are validated after the 15th of month M+1. Contains the historical data and is refreshed daily.This dataset contains data until 21/05/2024 (before {MARI} local go-live).},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/PZI6PTQ2/information.html:text/html},
}
@online{noauthor_measured_nodate,
title = {Measured and forecasted total load on the Belgian grid (Historical data)},
url = {https://opendata.elia.be/explore/dataset/ods001/table/?sort=datetime},
abstract = {Measured and upscaled, most recent, day-ahead and week-ahead forecasts for total load on the Belgian grid.},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/8857IXIQ/table.html:text/html},
}
@online{noauthor_measured_nodate-1,
title = {Measured and forecasted total load on the Belgian grid (Historical data)},
url = {https://opendata.elia.be/explore/dataset/ods001/table/?sort=datetime},
abstract = {Measured and upscaled, most recent, day-ahead and week-ahead forecasts for total load on the Belgian grid.},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/88FLT7BA/table.html:text/html},
}
@online{noauthor_photovoltaic_nodate,
title = {Photovoltaic power production estimation and forecast on Belgian grid (Historical)},
url = {https://opendata.elia.be/explore/dataset/ods032/table/?sort=datetime},
abstract = {Measured and upscaled photovoltaic power generation on the Belgian grid.Please note that the measured and forecast values are in {MW}, it is of the users responsibility to interpret the values as such.},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/7VB5YHYE/table.html:text/html},
}
@online{noauthor_wind_nodate,
title = {Wind power production estimation and forecast on Belgian grid (Historical)},
url = {https://opendata.elia.be/explore/dataset/ods031/information/},
abstract = {Measured and upscaled wind power generation on the Belgian grid.Please note that the measured and forecast values are in {MW}, it is of the users responsibility to interpret the values as such.},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/UTJUH5VQ/information.html:text/html},
}
@online{noauthor_intraday_nodate,
title = {Intraday implicit net position (Belgium's balance)},
url = {https://opendata.elia.be/explore/dataset/ods022/information/?sort=datetime},
abstract = {Net sum of intraday nominations of the implicit capacity allocated for energy exchanges for Belgium.},
urldate = {2024-05-18},
langid = {british},
file = {Snapshot:/Users/victormylle/Zotero/storage/XJ7KBDWG/information.html:text/html},
}

View File

@@ -1,10 +1,10 @@
\relax
\providecommand\hyper@newdestlabel[2]{}
\@writefile{toc}{\contentsline {section}{\numberline {A}Appendix}{51}{appendix.A}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {19}{\ignorespaces Comparison of the autoregressive models with the diffusion model\relax }}{51}{figure.caption.33}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{19}{51}{Comparison of the autoregressive models with the diffusion model\relax }{figure.caption.33}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {20}{\ignorespaces Comparison of the non-autoregressive models with the diffusion model\relax }}{52}{figure.caption.34}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{20}{52}{Comparison of the non-autoregressive models with the diffusion model\relax }{figure.caption.34}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {20}{\ignorespaces Comparison of the autoregressive models with the diffusion model\relax }}{51}{figure.caption.33}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{20}{51}{Comparison of the autoregressive models with the diffusion model\relax }{figure.caption.33}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {21}{\ignorespaces Comparison of the non-autoregressive models with the diffusion model\relax }}{52}{figure.caption.34}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{21}{52}{Comparison of the non-autoregressive models with the diffusion model\relax }{figure.caption.34}{}}
\@setckpt{sections/appendix}{
\setcounter{page}{53}
\setcounter{equation}{8}
@@ -20,7 +20,7 @@
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{20}
\setcounter{figure}{21}
\setcounter{table}{13}
\setcounter{parentequation}{0}
\setcounter{float@type}{4}
@@ -37,7 +37,7 @@
\setcounter{citetotal}{0}
\setcounter{multicitecount}{0}
\setcounter{multicitetotal}{0}
\setcounter{instcount}{9}
\setcounter{instcount}{24}
\setcounter{maxnames}{2}
\setcounter{minnames}{1}
\setcounter{maxitems}{999}
@@ -51,7 +51,7 @@
\setcounter{refsegment}{0}
\setcounter{maxextratitle}{0}
\setcounter{maxextratitleyear}{0}
\setcounter{maxextraname}{0}
\setcounter{maxextraname}{2}
\setcounter{maxextradate}{0}
\setcounter{maxextraalpha}{0}
\setcounter{abbrvpenalty}{50}
@@ -159,9 +159,18 @@
\setcounter{g@acro@QR@int}{0}
\setcounter{g@acro@AQR@int}{0}
\setcounter{g@acro@NAQR@int}{1}
\setcounter{g@acro@GRU@int}{0}
\setcounter{g@acro@LSTM@int}{0}
\setcounter{g@acro@GAN@int}{1}
\setcounter{g@acro@CTSGAN@int}{1}
\setcounter{g@acro@VAE@int}{0}
\setcounter{g@acro@MLP@int}{1}
\setcounter{g@acro@GP@int}{1}
\setcounter{g@acro@MSE@int}{4}
\setcounter{g@acro@MAE@int}{4}
\setcounter{g@acro@CRPS@int}{2}
\setcounter{g@acro@CRPS@int}{3}
\setcounter{g@acro@TSPA@int}{1}
\setcounter{g@acro@PLF@int}{1}
\setcounter{g@acro@NRV@int}{12}
\setcounter{g@acro@PV@int}{0}
\setcounter{g@acro@NP@int}{0}

View File

@@ -31,10 +31,11 @@
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Example of the diffusion process. The image of a cat is generated by starting from noise and iteratively denoising the image.\relax }}{14}{figure.caption.6}\protected@file@percent }
\newlabel{fig:diffusion_example}{{4}{14}{Example of the diffusion process. The image of a cat is generated by starting from noise and iteratively denoising the image.\relax }{figure.caption.6}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.3}Generation process}{14}{subsubsection.3.4.3}\protected@file@percent }
\newlabel{fig:diffusion_process}{{\caption@xref {fig:diffusion_process}{ on input line 309}}{16}{Generation process}{figure.caption.7}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Diffusion process}}{16}{figure.caption.7}\protected@file@percent }
\newlabel{fig:diffusion_process}{{5}{16}{Diffusion process}{figure.caption.7}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Evaluation}{16}{subsection.3.5}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Visualization of the CRPS metric\relax }}{17}{figure.caption.8}\protected@file@percent }
\newlabel{fig:crps_visualization}{{5}{17}{Visualization of the CRPS metric\relax }{figure.caption.8}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Visualization of the CRPS metric\relax }}{17}{figure.caption.8}\protected@file@percent }
\newlabel{fig:crps_visualization}{{6}{17}{Visualization of the CRPS metric\relax }{figure.caption.8}{}}
\@setckpt{sections/background}{
\setcounter{page}{18}
\setcounter{equation}{7}
@@ -50,7 +51,7 @@
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{5}
\setcounter{figure}{6}
\setcounter{table}{2}
\setcounter{parentequation}{0}
\setcounter{float@type}{4}
@@ -67,7 +68,7 @@
\setcounter{citetotal}{0}
\setcounter{multicitecount}{0}
\setcounter{multicitetotal}{0}
\setcounter{instcount}{0}
\setcounter{instcount}{1}
\setcounter{maxnames}{2}
\setcounter{minnames}{1}
\setcounter{maxitems}{999}
@@ -81,7 +82,7 @@
\setcounter{refsegment}{0}
\setcounter{maxextratitle}{0}
\setcounter{maxextratitleyear}{0}
\setcounter{maxextraname}{0}
\setcounter{maxextraname}{2}
\setcounter{maxextradate}{0}
\setcounter{maxextraalpha}{0}
\setcounter{abbrvpenalty}{50}
@@ -189,9 +190,18 @@
\setcounter{g@acro@QR@int}{0}
\setcounter{g@acro@AQR@int}{0}
\setcounter{g@acro@NAQR@int}{0}
\setcounter{g@acro@GRU@int}{0}
\setcounter{g@acro@LSTM@int}{0}
\setcounter{g@acro@GAN@int}{0}
\setcounter{g@acro@CTSGAN@int}{0}
\setcounter{g@acro@VAE@int}{0}
\setcounter{g@acro@MLP@int}{0}
\setcounter{g@acro@GP@int}{0}
\setcounter{g@acro@MSE@int}{0}
\setcounter{g@acro@MAE@int}{0}
\setcounter{g@acro@CRPS@int}{0}
\setcounter{g@acro@TSPA@int}{0}
\setcounter{g@acro@PLF@int}{0}
\setcounter{g@acro@NRV@int}{3}
\setcounter{g@acro@PV@int}{0}
\setcounter{g@acro@NP@int}{0}

View File

@@ -1,10 +1,3 @@
% Achtergrond informatie
% Generatief modelleren
% -> enkel forecast is vaak brak -> reinforcement learning is lastig -> generatief modelleren, veel generaties om mee te trainen
% - Achtergrond electrititetismarkt
% - Achtergrond Generatief modelleren (van NRV)
% - TODO: Achtergrond RNN?
\section{Electricity market}
The electricity market consists of many different parties who all work together and want to make a profit in the end. An overview of the most important parties can be found in Table \ref{tab:parties}. Each of them has a different role in the market.
@@ -247,13 +240,11 @@ The input features for the RNN model are carefully structured to capture the rel
The input feature structure is designed to provide the model with a comprehensive view of the previous quarters and the forecasted values for the current quarter. By incorporating both historical and forecasted information sequentially, the model can learn to predict the NRV quantiles for the next quarter more accurately.
\subsection{Diffusion models}
TODO: reference the paper
The "Denoising Diffusion Probabilistic Models" (DDPM)
\subsubsection{Overview}
Diffusion models are a type of probabilistic model designed to generate high-quality, diverse samples from complex data distributions. The way this type of model is trained is unique. The model is trained to reverse an iterative noise process that is applied to the data. This process is called the diffusion process. The model denoises the data in each iteration. During the training, the model learns to reverse the diffusion process. A training sample is transformed into a noise sample by applying the diffusion process. The model is then trained to recover the original sample from the noise sample. The model is trained to maximize the likelihood of the data given the noise. By doing this, the model learns to generate samples from the data distribution. Starting from the noise, the model can generate samples that look like the data. The model can also be conditioned on additional information to generate samples that follow other distributions.
\subsubsection{Applications}
Diffusion models gained popularity in the field of computer vision. They are used for inpainting, super-resolution, image generation, image editing etc. The paper introducing "Denoising Diffusion Probabilistic Models" (DDPM) showed that diffusion models can achieve state-of-the-art results in image generation. This type of model was then applied to other fields like text generation, audio generation etc. The most popular application of diffusion models is still image generation. Many different models and products exist that make use of diffusion models to generate images. Some examples are DALL·E, Stable Diffusion, Midjourney, etc. These models can generate or edit images based on a given text description.
Diffusion models gained popularity in the field of computer vision. They are used for inpainting, super-resolution, image generation, image editing etc. The paper introducing "Denoising Diffusion Probabilistic Models" (DDPM) \parencite{ho_denoising_2020} showed that diffusion models can achieve state-of-the-art results in image generation. This type of model was then applied to other fields like text generation, audio generation etc. The most popular application of diffusion models is still image generation. Many different models and products exist that make use of diffusion models to generate images. Some examples are DALL·E, Stable Diffusion, Midjourney, etc. These models can generate or edit images based on a given text description.
This method can also be applied to other fields like audio generation, text generation etc. In this thesis, diffusion models are explored to model time series data conditioned on additional information. A small example of the diffusion process is shown in Figure \ref{fig:diffusion_example}. An image of a cat is generated by starting from noise and iteratively denoising the image.
@@ -304,8 +295,7 @@ The diffusion process can be seen in Figure \ref{fig:diffusion_process}. The mod
\begin{figure}[h]
\centering
\includegraphics[width=0.8\textwidth]{images/diffusion/diffusion_graphical_model.png}
TODO: fix citation
%\caption[Diffusion process]{Diffusion process (adapted from \cite{ho2020denoising}).}
\caption[Diffusion process]{Diffusion process \parencite{ho2020denoising}.}
\label{fig:diffusion_process}
\end{figure}

View File

@@ -51,7 +51,7 @@
\setcounter{refsegment}{0}
\setcounter{maxextratitle}{0}
\setcounter{maxextratitleyear}{0}
\setcounter{maxextraname}{0}
\setcounter{maxextraname}{2}
\setcounter{maxextradate}{0}
\setcounter{maxextraalpha}{0}
\setcounter{abbrvpenalty}{50}
@@ -159,9 +159,18 @@
\setcounter{g@acro@QR@int}{0}
\setcounter{g@acro@AQR@int}{0}
\setcounter{g@acro@NAQR@int}{0}
\setcounter{g@acro@GRU@int}{0}
\setcounter{g@acro@LSTM@int}{0}
\setcounter{g@acro@GAN@int}{0}
\setcounter{g@acro@CTSGAN@int}{0}
\setcounter{g@acro@VAE@int}{0}
\setcounter{g@acro@MLP@int}{0}
\setcounter{g@acro@GP@int}{0}
\setcounter{g@acro@MSE@int}{0}
\setcounter{g@acro@MAE@int}{0}
\setcounter{g@acro@CRPS@int}{0}
\setcounter{g@acro@TSPA@int}{0}
\setcounter{g@acro@PLF@int}{0}
\setcounter{g@acro@NRV@int}{3}
\setcounter{g@acro@PV@int}{0}
\setcounter{g@acro@NP@int}{0}

View File

@@ -2,6 +2,13 @@
\providecommand\hyper@newdestlabel[2]{}
\@writefile{toc}{\contentsline {section}{\numberline {5}Literature Study}{20}{section.5}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Electricity Price Forecasting}{20}{subsection.5.1}\protected@file@percent }
\ACRO{recordpage}{GAN}{21}{1}{20}
\ACRO{recordpage}{CTSGAN}{21}{1}{20}
\ACRO{recordpage}{MLP}{22}{1}{21}
\ACRO{recordpage}{GP}{22}{1}{21}
\ACRO{recordpage}{TSPA}{22}{1}{21}
\ACRO{recordpage}{PLF}{22}{1}{21}
\ACRO{recordpage}{CRPS}{22}{1}{21}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Policies for Battery Optimization}{21}{subsection.5.2}\protected@file@percent }
\@setckpt{sections/literature_study}{
\setcounter{page}{22}
@@ -18,7 +25,7 @@
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{5}
\setcounter{figure}{6}
\setcounter{table}{2}
\setcounter{parentequation}{0}
\setcounter{float@type}{4}
@@ -35,7 +42,7 @@
\setcounter{citetotal}{0}
\setcounter{multicitecount}{0}
\setcounter{multicitetotal}{0}
\setcounter{instcount}{5}
\setcounter{instcount}{7}
\setcounter{maxnames}{2}
\setcounter{minnames}{1}
\setcounter{maxitems}{999}
@@ -49,7 +56,7 @@
\setcounter{refsegment}{0}
\setcounter{maxextratitle}{0}
\setcounter{maxextratitleyear}{0}
\setcounter{maxextraname}{0}
\setcounter{maxextraname}{2}
\setcounter{maxextradate}{0}
\setcounter{maxextraalpha}{0}
\setcounter{abbrvpenalty}{50}
@@ -157,9 +164,18 @@
\setcounter{g@acro@QR@int}{0}
\setcounter{g@acro@AQR@int}{0}
\setcounter{g@acro@NAQR@int}{0}
\setcounter{g@acro@GRU@int}{0}
\setcounter{g@acro@LSTM@int}{0}
\setcounter{g@acro@GAN@int}{1}
\setcounter{g@acro@CTSGAN@int}{1}
\setcounter{g@acro@VAE@int}{0}
\setcounter{g@acro@MLP@int}{1}
\setcounter{g@acro@GP@int}{1}
\setcounter{g@acro@MSE@int}{0}
\setcounter{g@acro@MAE@int}{0}
\setcounter{g@acro@CRPS@int}{0}
\setcounter{g@acro@CRPS@int}{1}
\setcounter{g@acro@TSPA@int}{1}
\setcounter{g@acro@PLF@int}{1}
\setcounter{g@acro@NRV@int}{3}
\setcounter{g@acro@PV@int}{0}
\setcounter{g@acro@NP@int}{0}

View File

@@ -6,9 +6,9 @@
\subsection{Electricity Price Forecasting}
Forecasting the electricity price is a challenging task that has been researched extensively. Knowing the future electricity price is crucial for market participants to make informed decisions and optimize their operations and profit. Already since the early 2000s, researchers have been trying to predict the electricity price. The first models were based on time series analysis, but with the rise of machine learning, more advanced models have been developed. A rise in publications on this topic can be observed since 2005. This is described in the literature review by \parencite{weron_electricity_2014}. An overview is given of the evolution of the methods used for electricity price forecasting. A significant shift can be observed towards integrating machine learning techniques with traditional statistical methods. The earliest models were based on time series analysis involving methods like autoregression, moving averages and their combinations (ARMA, ARIMA). These methods are not always able to capture the complex patterns in the electricity price. Therefore, researchers started to use more advanced models like neural networks, support vector machines, and random forests. The combination of statistical and machine learning models is more accurate. The statistical models are used to capture the linear patterns, while the machine learning models are used to capture the more complex non-linear patterns. This results in a more accurate and robust model. The more recent paper \parencite{poggi_electricity_2023} compares the performance of statistical and machine learning methods for electricity price forecasting. The authors use ARIMA and SARIMA as statistical methods and XGBoost as a machine learning method. They also compare the performance of Long Short-Term Memory (LSTM) networks for electricity price forecasting.
Because forecasting the electricity price is a challenging task with a lot of uncertainty, other generative methods to model the electricity price were researched. Generative modeling is a type of unsupervised learning that can be used to generate new samples from the same distribution as the training data. This can be used to generate new electricity price samples. The authors of \parencite{lu_scenarios_2022} use General Adversarial Networks (GANs) to generate new electricity price scenarios. They introduce a deep learning framework called Conditional Time Series Generative Adversarial Networks (CTSGAN) to generate electricity price scenarios. This enhances the traditional forecasting models by allowing the generation of a diverse set of potential future scenarios. This capability allows the modeling of the uncertainty in the electricity price. The authors show that the CTSGAN model outperforms traditional forecasting models in terms of forecasting accuracy. Other generative models like normalizing flows can also be used to generate new electricity price samples. The authors of \parencite{dumas_deep_2022} use normalizing flows to generate new electricity price samples. They show that normalizing flow models for electricity price forecasting are more accurate in quality than other generative models like GANs and Variational Autoencoders (VAEs). Not a lot of research has been done on using diffusion models for electricity price forecasting. The authors of \parencite{rasul_autoregressive_2021}, however, show that autoregressive diffusion models can be used for time series forecasting and achieve good results. They apply the model on multiple datasets which includes an electricity price dataset. The use of diffusion models for NRV modeling is further explored in this thesis.
Because forecasting the electricity price is a challenging task with a lot of uncertainty, other generative methods to model the electricity price were researched. Generative modeling is a type of unsupervised learning that can be used to generate new samples from the same distribution as the training data. This can be used to generate new electricity price samples. The authors of \parencite{lu_scenarios_2022} use \acfp{GAN} to generate new electricity price scenarios. They introduce a deep learning framework called \acf{CTSGAN} to generate electricity price scenarios. This enhances the traditional forecasting models by allowing the generation of a diverse set of potential future scenarios. This capability allows the modeling of the uncertainty in the electricity price. The authors show that the CTSGAN model outperforms traditional forecasting models in terms of forecasting accuracy. Other generative models like normalizing flows can also be used to generate new electricity price samples. The authors of \parencite{dumas_deep_2022} use normalizing flows to generate new electricity price samples. They show that normalizing flow models for electricity price forecasting are more accurate in quality than other generative models like GANs and Variational Autoencoders (VAEs). Not a lot of research has been done on using diffusion models for electricity price forecasting. The authors of \parencite{rasul_autoregressive_2021}, however, show that autoregressive diffusion models can be used for time series forecasting and achieve good results. They apply the model on multiple datasets which includes an electricity price dataset. The use of diffusion models for NRV modeling is further explored in this thesis.
Most research on forecasting for the electricity market focuses on the electricity price for consumers. Another important aspect of the electricity market is the imbalance price. Not many papers have been published on forecasting the imbalance price. One paper \parencite{dumas_deep_2022} describes the forecasting of the imbalance price. They do not forecast the price itself but rather forecast the NRV and use this to reconstruct the imbalance price. This approach will also be used in this thesis.
Most research on forecasting for the electricity market focuses on the day-ahead electricity price. Another important aspect of the electricity market, however, is the imbalance price. Not many papers have been published on forecasting the imbalance price. The authors of \parencite{dumas_probabilistic_2019} describe the forecasting of the imbalance price. They use a two-step approach that is also used in this thesis. First, a forecast is made for the Net Regulation Volume (NRV). This forecast is then converted into an imbalance price forecast using data published by the Transmission System Operator (TSO). The authors compare several methods including a deterministic \acf{MLP}, a probabilistic technique using \acfp{GP} and a \acf{TSPA}. The probabilistic techniques are evaluated using the \acf{PLF} and the \acf{CRPS}. The authors show that the two-step probabilistic approach outperforms other approaches on probabilistic error measures but is less accurate at predicting the precise imbalance prices.
TODO: more information?

View File

@@ -19,7 +19,7 @@
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{5}
\setcounter{figure}{6}
\setcounter{table}{2}
\setcounter{parentequation}{0}
\setcounter{float@type}{4}
@@ -36,7 +36,7 @@
\setcounter{citetotal}{0}
\setcounter{multicitecount}{0}
\setcounter{multicitetotal}{0}
\setcounter{instcount}{0}
\setcounter{instcount}{1}
\setcounter{maxnames}{2}
\setcounter{minnames}{1}
\setcounter{maxitems}{999}
@@ -50,7 +50,7 @@
\setcounter{refsegment}{0}
\setcounter{maxextratitle}{0}
\setcounter{maxextratitleyear}{0}
\setcounter{maxextraname}{0}
\setcounter{maxextraname}{2}
\setcounter{maxextradate}{0}
\setcounter{maxextraalpha}{0}
\setcounter{abbrvpenalty}{50}
@@ -158,9 +158,18 @@
\setcounter{g@acro@QR@int}{0}
\setcounter{g@acro@AQR@int}{0}
\setcounter{g@acro@NAQR@int}{0}
\setcounter{g@acro@GRU@int}{0}
\setcounter{g@acro@LSTM@int}{0}
\setcounter{g@acro@GAN@int}{0}
\setcounter{g@acro@CTSGAN@int}{0}
\setcounter{g@acro@VAE@int}{0}
\setcounter{g@acro@MLP@int}{0}
\setcounter{g@acro@GP@int}{0}
\setcounter{g@acro@MSE@int}{0}
\setcounter{g@acro@MAE@int}{0}
\setcounter{g@acro@CRPS@int}{0}
\setcounter{g@acro@TSPA@int}{0}
\setcounter{g@acro@PLF@int}{0}
\setcounter{g@acro@NRV@int}{3}
\setcounter{g@acro@PV@int}{0}
\setcounter{g@acro@NP@int}{0}

View File

@@ -2,7 +2,7 @@
As discussed in the background information, the imbalance prices are based on the Net Regulation Volume (NRV). This means that the imbalance prices can be reconstructed from the sampled NRV. Multiple baselines and models will be compared that forecast and model the NRV using different metrics. The data utilized in this thesis is provided by Elia. Elia makes a lot of data public and provides them in quarterly hour or minute intervals. The data used in this thesis is on a quarterly hourly basis. This makes the number of input features and output features way more manageable and makes the training more computationally efficient. A full-day sample of the NRV exists of 96 values. One value for every quarter. Further research could be done using smaller data intervals to see if this improves the models.
\subsection{Data}
Elia offers a lot of different data on their website (TODO: open data citation). They provide data for the following categories:
Elia offers a variety of data on their website \cite{elia_open_data}. They provide data for the following categories:
(TODO: Relevant? or too much information?)
\begin{itemize}
\item Balancing
@@ -15,30 +15,32 @@ Elia offers a lot of different data on their website (TODO: open data citation).
The data useful to model the NRV is scattered over multiple categories. The data used in this thesis is the following:
TODO: ask Jonas: add urls to the correct data? via citation?
% TODO: better citations
\begin{itemize}
\item \textbf{Imbalance prices per quarter-hour (Historical data) } \\
% https://opendata.elia.be/explore/dataset/ods047/information/?sort=datetime
This dataset contains the NRV and system imbalance in a quarter-hour interval. The data is available from 01-01-2015 to the present day. The NRV is used as the target variable that needs to be modeled but can also be used as input features. The next day NRV modeling can be conditioned on the real NRV of the previous day.
This dataset contains the NRV and system imbalance in a quarter-hour interval. The data is available from 01-01-2015 to the present day. The NRV is used as the target variable that needs to be modeled but can also be used as input features. The next day NRV modeling can be conditioned on the real NRV of the previous day. \parencite{noauthor_imbalance_nodate}
\item \textbf{Measured and forecasted total load on the Belgian grid (Historical data)} \\
% https://opendata.elia.be/explore/dataset/ods001/table/?sort=datetime
Elia publishes what the total load on the Belgian grid is. This data is also provided in a quarter-hour interval. This data consists of the real load for a certain quarter but also the different forecasted loads. There are day-ahead and week-ahead forecasts available. The total load on the Belgian grid can be used as input features for the NRV modeling. The data is also available from 01-01-2015 to the present day.
Elia publishes what the total load on the Belgian grid is. This data is also provided in a quarter-hour interval. This data consists of the real load for a certain quarter but also the different forecasted loads. There are day-ahead and week-ahead forecasts available. The total load on the Belgian grid can be used as input features for the NRV modeling. The data is also available from 01-01-2015 to the present day. \parencite{noauthor_measured_nodate}
\item \textbf{Photovoltaic power production estimation and forecast on Belgian grid (Historical)} \\
% https://opendata.elia.be/explore/dataset/ods032/table/?sort=datetime
The photovoltanic power production is also available in a quarter-hour interval. The production is also forecasted day-ahead and week-ahead. The data is provided for each of the provinces in Belgium. Forecasts are also available for the 3 Belgian regions (Flanders, Wallonia, Brussels) and the total Belgian production. The photovoltanic data has been provided since 01-04-2018 and is available to the present day.
The photovoltanic power production is also available in a quarter-hour interval. The production is also forecasted day-ahead and week-ahead. The data is provided for each of the provinces in Belgium. Forecasts are also available for the 3 Belgian regions (Flanders, Wallonia, Brussels) and the total Belgian production. The photovoltanic data has been provided since 01-04-2018 and is available to the present day. \parencite{noauthor_photovoltaic_nodate}
\item \textbf{Wind power production estimation and forecast on Belgian grid (Historical)} \\
% https://opendata.elia.be/explore/dataset/ods031/information/
Just as the photovoltanic power production data, wind power production is available in a quarterly-hour interval for each of the provinces and regions in Belgium. This data also includes the real production and the forecasts. An additional column is available that shows if the power is generated offshore or onshore. During this thesis, the offshore and onshore data will be combined. The wind power production data has been provided since 01-01-2015 and is available to the present day.
Just as the photovoltanic power production data, wind power production is available in a quarterly-hour interval for each of the provinces and regions in Belgium. This data also includes the real production and the forecasts. An additional column is available that shows if the power is generated offshore or onshore. During this thesis, the offshore and onshore data will be combined. The wind power production data has been provided since 01-01-2015 and is available to the present day. \parencite{noauthor_wind_nodate}
\item \textbf{Day-ahead implicit net position (Belgium's balance)} \\
% https://opendata.elia.be/explore/dataset/ods022/information/?sort=datetime
The day-ahead implicit net position shows the total amount of electricity that will be imported or exported to neighboring countries. The trades are done on the day-ahead market and are thus known in advance. This data is available in a quarter-hour interval and has been provided since 01-11-2020 and is available to the present day. The data before 01-11-2020 is also available but only in hourly intervals.
The day-ahead implicit net position shows the total amount of electricity that will be imported or exported to neighboring countries. The trades are done on the day-ahead market and are thus known in advance. This data is available in a quarter-hour interval and has been provided since 01-11-2020 and is available to the present day. The data before 01-11-2020 is also available but only in hourly intervals. \parencite{noauthor_intraday_nodate}
\end{itemize}
A lot of data is available but only the most relevant data needs to be used. Experiments will be done to identify which data and features improve the NRV modeling. The data will be split into a training and test set. The training dataset starts depending on which data features are used but ends on 31-12-2022. The test set starts on 01-01-2023 and ends on (TODO: check the end date). This makes sure enough data is available to train the models and the test set is large enough to evaluate the models. The year 2023 is chosen as the test set because it is the most recent data available when the thesis experiments were conducted. Using data from 2022 in the test set also does not make a lot of sense because the trained models would be used to predict the future. Data from 2022 is not relevant anymore to evaluate the models.
The open data can be accessed at: \url{https://opendata.elia.be/pages/home/}
A lot of data is available but only the most relevant data needs to be used. Experiments will be done to identify which data and features improve the NRV modeling. The data will be split into a training and test set. The training dataset starts depending on which data features are used but ends on 31-12-2022. The test set starts on 01-01-2023 and ends on 12-12-2023. This makes sure enough data is available to train the models and the test set is large enough to evaluate the models. The year 2023 is chosen as the test set because it is the most recent data available when the thesis experiments were conducted. Using data from 2022 in the test set also does not make a lot of sense because the trained models would be used to predict the future. Data from 2022 is not relevant anymore to evaluate the models. Some data features are missing for certain periods, this is taken into account and those periods are excluded from the training and test set even if the unavailable feature is not used. This makes sure the data is consistent and results can be compared fairly.
\subsection{Quantile Regression}
\input{sections/results/models/linear}

View File

@@ -23,6 +23,16 @@
\babel@aux{english}{}
\@input{sections/introduction.aux}
\@input{sections/background.aux}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{ho_denoising_2020}
\abx@aux@segm{0}{0}{ho_denoising_2020}
\abx@aux@page{1}{14}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{ho2020denoising}
\abx@aux@segm{0}{0}{ho2020denoising}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{ho2020denoising}
\abx@aux@segm{0}{0}{ho2020denoising}
\@input{sections/policies.aux}
\@input{sections/literature_study.aux}
\abx@aux@refcontext{nyt/apasortcite//global/global}
@@ -40,57 +50,81 @@
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{rasul_autoregressive_2021}
\abx@aux@segm{0}{0}{rasul_autoregressive_2021}
\abx@aux@page{1}{20}
\abx@aux@page{2}{20}
\abx@aux@page{3}{20}
\abx@aux@page{4}{20}
\abx@aux@page{5}{20}
\abx@aux@page{6}{20}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{dumas_deep_2022}
\abx@aux@segm{0}{0}{dumas_deep_2022}
\abx@aux@page{5}{21}
\abx@aux@cite{0}{dumas_probabilistic_2019}
\abx@aux@segm{0}{0}{dumas_probabilistic_2019}
\abx@aux@page{7}{21}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{elia_open_data}
\abx@aux@segm{0}{0}{elia_open_data}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{noauthor_imbalance_nodate}
\abx@aux@segm{0}{0}{noauthor_imbalance_nodate}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{noauthor_measured_nodate}
\abx@aux@segm{0}{0}{noauthor_measured_nodate}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{noauthor_photovoltaic_nodate}
\abx@aux@segm{0}{0}{noauthor_photovoltaic_nodate}
\@writefile{toc}{\contentsline {section}{\numberline {6}Results \& Discussion}{22}{section.6}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {6.1}Data}{22}{subsection.6.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {6.2}Quantile Regression}{23}{subsection.6.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.2.1}Linear Model}{23}{subsubsection.6.2.1}\protected@file@percent }
\abx@aux@page{8}{22}
\abx@aux@page{9}{22}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{noauthor_wind_nodate}
\abx@aux@segm{0}{0}{noauthor_wind_nodate}
\abx@aux@refcontext{nyt/apasortcite//global/global}
\abx@aux@cite{0}{noauthor_intraday_nodate}
\abx@aux@segm{0}{0}{noauthor_intraday_nodate}
\abx@aux@page{10}{23}
\abx@aux@page{11}{23}
\abx@aux@page{12}{23}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.2}Quantile Regression}{24}{subsection.6.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.2.1}Linear Model}{24}{subsubsection.6.2.1}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces Linear model results\relax }}{24}{table.caption.9}\protected@file@percent }
\newlabel{tab:linear_model_baseline_results}{{3}{24}{Linear model results\relax }{table.caption.9}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Mean and standard deviation of the NRV values over the quarter of the day\relax }}{26}{figure.caption.10}\protected@file@percent }
\newlabel{fig:nrv_mean_std_over_quarter}{{6}{26}{Mean and standard deviation of the NRV values over the quarter of the day\relax }{figure.caption.10}{}}
\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces Autoregressive linear model results with time features\relax }}{26}{table.caption.11}\protected@file@percent }
\newlabel{tab:autoregressive_linear_model_quarter_embedding_baseline_results}{{4}{26}{Autoregressive linear model results with time features\relax }{table.caption.11}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Comparison of the autoregressive and non-autoregressive linear model samples.\relax }}{27}{figure.caption.12}\protected@file@percent }
\newlabel{fig:linear_model_sample_comparison}{{7}{27}{Comparison of the autoregressive and non-autoregressive linear model samples.\relax }{figure.caption.12}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Samples for two examples from the test set for the autoregressive and non-autoregressive linear model. The real NRV is shown in orange.\relax }}{28}{figure.caption.13}\protected@file@percent }
\newlabel{fig:linear_model_samples_comparison}{{8}{28}{Samples for two examples from the test set for the autoregressive and non-autoregressive linear model. The real NRV is shown in orange.\relax }{figure.caption.13}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{29}{figure.caption.14}\protected@file@percent }
\newlabel{fig:linear_model_quantile_over_underestimation}{{9}{29}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.14}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Mean and standard deviation of the NRV values over the quarter of the day\relax }}{26}{figure.caption.10}\protected@file@percent }
\newlabel{fig:nrv_mean_std_over_quarter}{{7}{26}{Mean and standard deviation of the NRV values over the quarter of the day\relax }{figure.caption.10}{}}
\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces Autoregressive linear model results with time features\relax }}{27}{table.caption.11}\protected@file@percent }
\newlabel{tab:autoregressive_linear_model_quarter_embedding_baseline_results}{{4}{27}{Autoregressive linear model results with time features\relax }{table.caption.11}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Comparison of the autoregressive and non-autoregressive linear model samples.\relax }}{27}{figure.caption.12}\protected@file@percent }
\newlabel{fig:linear_model_sample_comparison}{{8}{27}{Comparison of the autoregressive and non-autoregressive linear model samples.\relax }{figure.caption.12}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Samples for two examples from the test set for the autoregressive and non-autoregressive linear model. The real NRV is shown in orange.\relax }}{28}{figure.caption.13}\protected@file@percent }
\newlabel{fig:linear_model_samples_comparison}{{9}{28}{Samples for two examples from the test set for the autoregressive and non-autoregressive linear model. The real NRV is shown in orange.\relax }{figure.caption.13}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{29}{figure.caption.14}\protected@file@percent }
\newlabel{fig:linear_model_quantile_over_underestimation}{{10}{29}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.14}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.2.2}Non-Linear Model}{30}{subsubsection.6.2.2}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {5}{\ignorespaces Non-linear Quantile Regression Model Architecture\relax }}{30}{table.caption.15}\protected@file@percent }
\newlabel{tab:non_linear_model_architecture}{{5}{30}{Non-linear Quantile Regression Model Architecture\relax }{table.caption.15}{}}
\@writefile{lot}{\contentsline {table}{\numberline {6}{\ignorespaces Non-linear quantile regression model results. All the models used a dropout of 0.2 .\relax }}{31}{table.caption.16}\protected@file@percent }
\newlabel{tab:non_linear_model_results}{{6}{31}{Non-linear quantile regression model results. All the models used a dropout of 0.2 .\relax }{table.caption.16}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Comparison of the autoregressive and non-autoregressive non-linear model examples.\relax }}{32}{figure.caption.17}\protected@file@percent }
\newlabel{fig:non_linear_model_examples}{{10}{32}{Comparison of the autoregressive and non-autoregressive non-linear model examples.\relax }{figure.caption.17}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive non-linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{33}{figure.caption.18}\protected@file@percent }
\newlabel{fig:non-linear_model_quantile_over_underestimation}{{11}{33}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive non-linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.18}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces Comparison of the autoregressive and non-autoregressive non-linear model examples.\relax }}{32}{figure.caption.17}\protected@file@percent }
\newlabel{fig:non_linear_model_examples}{{11}{32}{Comparison of the autoregressive and non-autoregressive non-linear model examples.\relax }{figure.caption.17}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive non-linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{33}{figure.caption.18}\protected@file@percent }
\newlabel{fig:non-linear_model_quantile_over_underestimation}{{12}{33}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive non-linear models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.18}{}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.2.3}GRU Model}{33}{subsubsection.6.2.3}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {7}{\ignorespaces GRU Model Architecture\relax }}{34}{table.caption.19}\protected@file@percent }
\newlabel{tab:gru_model_architecture}{{7}{34}{GRU Model Architecture\relax }{table.caption.19}{}}
\@writefile{lot}{\contentsline {table}{\numberline {8}{\ignorespaces Autoregressive GRU quantile regression model results. All the models used a dropout of 0.2 .\relax }}{35}{table.caption.20}\protected@file@percent }
\newlabel{tab:autoregressive_gru_model_results}{{8}{35}{Autoregressive GRU quantile regression model results. All the models used a dropout of 0.2 .\relax }{table.caption.20}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Comparison of the autoregressive and non-autoregressive GRU model examples.\relax }}{36}{figure.caption.21}\protected@file@percent }
\newlabel{fig:gru_model_sample_comparison}{{12}{36}{Comparison of the autoregressive and non-autoregressive GRU model examples.\relax }{figure.caption.21}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive GRU models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{37}{figure.caption.22}\protected@file@percent }
\newlabel{fig:gru_model_quantile_over_underestimation}{{13}{37}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive GRU models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.22}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces Comparison of the autoregressive and non-autoregressive GRU model examples.\relax }}{36}{figure.caption.21}\protected@file@percent }
\newlabel{fig:gru_model_sample_comparison}{{13}{36}{Comparison of the autoregressive and non-autoregressive GRU model examples.\relax }{figure.caption.21}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces Over/underestimation of the quantiles for the autoregressive and non-autoregressive GRU models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }}{37}{figure.caption.22}\protected@file@percent }
\newlabel{fig:gru_model_quantile_over_underestimation}{{14}{37}{Over/underestimation of the quantiles for the autoregressive and non-autoregressive GRU models. Both the quantile performance for the training and test set are shown. The plots are generated using the input features NRV, Load, Wind, PV, Net Position, and the quarter embedding (only for the autoregressive model).\relax }{figure.caption.22}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.3}Diffusion}{37}{subsection.6.3}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces Intermediate steps of the diffusion model for example 864 from the test set. The confidence intervals shown in the plots are made using 100 samples.\relax }}{39}{figure.caption.23}\protected@file@percent }
\newlabel{fig:diffusion_intermediates}{{14}{39}{Intermediate steps of the diffusion model for example 864 from the test set. The confidence intervals shown in the plots are made using 100 samples.\relax }{figure.caption.23}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {15}{\ignorespaces Intermediate steps of the diffusion model for example 864 from the test set. The confidence intervals shown in the plots are made using 100 samples.\relax }}{39}{figure.caption.23}\protected@file@percent }
\newlabel{fig:diffusion_intermediates}{{15}{39}{Intermediate steps of the diffusion model for example 864 from the test set. The confidence intervals shown in the plots are made using 100 samples.\relax }{figure.caption.23}{}}
\@writefile{lot}{\contentsline {table}{\numberline {9}{\ignorespaces Simple diffusion model results.\relax }}{39}{table.caption.24}\protected@file@percent }
\newlabel{tab:diffusion_results}{{9}{39}{Simple diffusion model results.\relax }{table.caption.24}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {15}{\ignorespaces The plots show the generations for the examples from the test set. The diffusion model used to generate the samples consists of 2 layers with a hidden size of 1024. The number of denoising steps is set to 300. The confidence intervals shown in the plots are made using 100 samples. All the available input features are used which includes the \acs {NRV}, Load, Wind, \acs {PV} and \acs {NP} data.\relax }}{40}{figure.caption.25}\protected@file@percent }
\newlabel{fig:diffusion_test_set_examples}{{15}{40}{The plots show the generations for the examples from the test set. The diffusion model used to generate the samples consists of 2 layers with a hidden size of 1024. The number of denoising steps is set to 300. The confidence intervals shown in the plots are made using 100 samples. All the available input features are used which includes the \acs {NRV}, Load, Wind, \acs {PV} and \acs {NP} data.\relax }{figure.caption.25}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {16}{\ignorespaces The plots show the generations for the first examples from the test set. Two diffusion models with 2 layers and 1024 hidden units are used. The first one is only conditioned on the NRV of the previous day while the second one uses all available input features.\relax }}{41}{figure.caption.26}\protected@file@percent }
\newlabel{fig:diffusion_test_set_example_only_nrv_vs_all}{{16}{41}{The plots show the generations for the first examples from the test set. Two diffusion models with 2 layers and 1024 hidden units are used. The first one is only conditioned on the NRV of the previous day while the second one uses all available input features.\relax }{figure.caption.26}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {16}{\ignorespaces The plots show the generations for the examples from the test set. The diffusion model used to generate the samples consists of 2 layers with a hidden size of 1024. The number of denoising steps is set to 300. The confidence intervals shown in the plots are made using 100 samples. All the available input features are used which includes the \acs {NRV}, Load, Wind, \acs {PV} and \acs {NP} data.\relax }}{40}{figure.caption.25}\protected@file@percent }
\newlabel{fig:diffusion_test_set_examples}{{16}{40}{The plots show the generations for the examples from the test set. The diffusion model used to generate the samples consists of 2 layers with a hidden size of 1024. The number of denoising steps is set to 300. The confidence intervals shown in the plots are made using 100 samples. All the available input features are used which includes the \acs {NRV}, Load, Wind, \acs {PV} and \acs {NP} data.\relax }{figure.caption.25}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {17}{\ignorespaces The plots show the generations for the first examples from the test set. Two diffusion models with 2 layers and 1024 hidden units are used. The first one is only conditioned on the NRV of the previous day while the second one uses all available input features.\relax }}{41}{figure.caption.26}\protected@file@percent }
\newlabel{fig:diffusion_test_set_example_only_nrv_vs_all}{{17}{41}{The plots show the generations for the first examples from the test set. Two diffusion models with 2 layers and 1024 hidden units are used. The first one is only conditioned on the NRV of the previous day while the second one uses all available input features.\relax }{figure.caption.26}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.4}Comparison}{41}{subsection.6.4}\protected@file@percent }
\ACRO{recordpage}{MSE}{42}{1}{41}
\ACRO{recordpage}{MAE}{42}{1}{41}
@@ -105,8 +139,8 @@
\ACRO{recordpage}{MAE}{43}{1}{42}
\ACRO{recordpage}{MSE}{43}{1}{42}
\ACRO{recordpage}{MAE}{43}{1}{42}
\@writefile{lof}{\contentsline {figure}{\numberline {17}{\ignorespaces Comparison of the autoregressive linear and GRU model\relax }}{43}{figure.caption.28}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{17}{43}{Comparison of the autoregressive linear and GRU model\relax }{figure.caption.28}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {18}{\ignorespaces Comparison of the autoregressive linear and GRU model\relax }}{43}{figure.caption.28}\protected@file@percent }
\newlabel{fig:ar_linear_gru_comparison}{{18}{43}{Comparison of the autoregressive linear and GRU model\relax }{figure.caption.28}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {6.5}Policies for battery optimization}{44}{subsection.6.5}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.5.1}Baselines}{44}{subsubsection.6.5.1}\protected@file@percent }
\ACRO{recordpage}{NRV}{45}{1}{44}
@@ -123,23 +157,40 @@
\@writefile{toc}{\contentsline {subsubsection}{\numberline {6.5.2}Policy using generated NRV samples}{45}{subsubsection.6.5.2}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {12}{\ignorespaces Comparison of diffusion models using different hyperparameters. Early stopping is done based on the profit using the validation set.\relax }}{46}{table.caption.30}\protected@file@percent }
\newlabel{tab:diffusion_policy_comparison}{{12}{46}{Comparison of diffusion models using different hyperparameters. Early stopping is done based on the profit using the validation set.\relax }{table.caption.30}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {18}{\ignorespaces Comparison of the two samples from the model with the lowest CRPS and the model with the highest profit. \relax }}{47}{figure.caption.31}\protected@file@percent }
\newlabel{fig:diffusion_policy_comparison_high_low_crps}{{18}{47}{Comparison of the two samples from the model with the lowest CRPS and the model with the highest profit. \relax }{figure.caption.31}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {19}{\ignorespaces Comparison of the two samples from the model with the lowest CRPS and the model with the highest profit. \relax }}{47}{figure.caption.31}\protected@file@percent }
\newlabel{fig:diffusion_policy_comparison_high_low_crps}{{19}{47}{Comparison of the two samples from the model with the lowest CRPS and the model with the highest profit. \relax }{figure.caption.31}{}}
\@writefile{lot}{\contentsline {table}{\numberline {13}{\ignorespaces Comparison of the different models using the CRPS, profit, charge cycles and penalty. The best-performing models for a certain type are selected based on the profit.\relax }}{47}{table.caption.32}\protected@file@percent }
\newlabel{tab:policy_comparison}{{13}{47}{Comparison of the different models using the CRPS, profit, charge cycles and penalty. The best-performing models for a certain type are selected based on the profit.\relax }{table.caption.32}{}}
\@writefile{toc}{\contentsline {section}{\numberline {7}Conclusion}{48}{section.7}\protected@file@percent }
\abx@aux@page{6}{50}
\abx@aux@page{7}{50}
\abx@aux@page{8}{50}
\abx@aux@page{9}{50}
\abx@aux@page{13}{50}
\abx@aux@page{14}{50}
\abx@aux@page{15}{50}
\abx@aux@page{16}{50}
\abx@aux@page{17}{50}
\abx@aux@page{18}{50}
\abx@aux@page{19}{50}
\abx@aux@page{20}{50}
\abx@aux@page{21}{50}
\abx@aux@page{22}{50}
\abx@aux@page{23}{50}
\abx@aux@page{24}{50}
\@input{sections/appendix.aux}
\ACRO{total-barriers}{1}
\ACRO{usage}{QR=={0}}
\ACRO{usage}{AQR=={0}}
\ACRO{usage}{NAQR=={1}}
\ACRO{usage}{GRU=={0}}
\ACRO{usage}{LSTM=={0}}
\ACRO{usage}{GAN=={1}}
\ACRO{usage}{CTSGAN=={1}}
\ACRO{usage}{VAE=={0}}
\ACRO{usage}{MLP=={1}}
\ACRO{usage}{GP=={1}}
\ACRO{usage}{MSE=={4}}
\ACRO{usage}{MAE=={4}}
\ACRO{usage}{CRPS=={2}}
\ACRO{usage}{CRPS=={3}}
\ACRO{usage}{TSPA=={1}}
\ACRO{usage}{PLF=={1}}
\ACRO{usage}{NRV=={12}}
\ACRO{usage}{PV=={0}}
\ACRO{usage}{NP=={0}}
@@ -158,15 +209,28 @@
\ACRO{pages}{BSP=={6@1@5}}
\ACRO{pages}{aFRR=={6@1@5}}
\ACRO{pages}{mFRR=={6@1@5}}
\ACRO{pages}{GAN=={21@1@20}}
\ACRO{pages}{CTSGAN=={21@1@20}}
\ACRO{pages}{MLP=={22@1@21}}
\ACRO{pages}{GP=={22@1@21}}
\ACRO{pages}{TSPA=={22@1@21}}
\ACRO{pages}{PLF=={22@1@21}}
\ACRO{pages}{NAQR=={43@1@42}}
\ACRO{pages}{CRPS=={42@1@41|43@1@42}}
\ACRO{pages}{CRPS=={22@1@21|42@1@41|43@1@42}}
\ACRO{pages}{MSE=={42@1@41|43@1@42}}
\ACRO{pages}{MAE=={42@1@41|43@1@42}}
\ACRO{pages}{NRV=={3@1@2|45@1@44|46@1@45}}
\abx@aux@read@bbl@mdfivesum{5DC935CC8C8FAB8A3CAF97A486ED2386}
\abx@aux@read@bblrerun
\abx@aux@read@bbl@mdfivesum{E3B4F6289F5EA7AEDA0AEA967029BC23}
\abx@aux@defaultrefcontext{0}{dumas_probabilistic_2019}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{dumas_deep_2022}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{ho_denoising_2020}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{noauthor_imbalance_nodate}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{noauthor_intraday_nodate}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{lu_scenarios_2022}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{noauthor_measured_nodate}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{noauthor_photovoltaic_nodate}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{poggi_electricity_2023}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{rasul_autoregressive_2021}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{weron_electricity_2014}{nyt/global//global/global}
\abx@aux@defaultrefcontext{0}{noauthor_wind_nodate}{nyt/global//global/global}
\gdef \@abspage@last{53}

View File

@@ -19,6 +19,85 @@
\refsection{0}
\datalist[entry]{nyt/apasortcite//global/global}
\entry{dumas_probabilistic_2019}{article}{}
\name{author}{5}{}{%
{{un=0,uniquepart=base,hash=bc1b38697de64bfe3f5e7876e531bd45}{%
family={Dumas},
familyi={D\bibinitperiod},
given={Jonathan},
giveni={J\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=90aeadbc706186ab80f0d0e111f1af6f}{%
family={Boukas},
familyi={B\bibinitperiod},
given={Ioannis},
giveni={I\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=7e79d6a813916baca1e80ea45e08e4aa}{%
family={Villena},
familyi={V\bibinitperiod},
given={Miguel\bibnamedelima Manuel},
giveni={M\bibinitperiod\bibinitdelim M\bibinitperiod},
givenun=0,
prefix={de},
prefixi={d\bibinitperiod}}}%
{{un=0,uniquepart=base,hash=220a8e456b600303157420acf89b23d5}{%
family={Mathieu},
familyi={M\bibinitperiod},
given={Sébastien},
giveni={S\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=62ac593786e28c2e56fab28d272346bf}{%
family={Cornélusse},
familyi={C\bibinitperiod},
given={Bertrand},
giveni={B\bibinitperiod},
givenun=0}}%
}
\strng{namehash}{cac0f5a33afb491da6830cd9b00071a2}
\strng{fullhash}{fa8ecb65a3e2049bad6bebe5f605ff0f}
\strng{bibnamehash}{fa8ecb65a3e2049bad6bebe5f605ff0f}
\strng{authorbibnamehash}{fa8ecb65a3e2049bad6bebe5f605ff0f}
\strng{authornamehash}{cac0f5a33afb491da6830cd9b00071a2}
\strng{authorfullhash}{fa8ecb65a3e2049bad6bebe5f605ff0f}
\field{extraname}{1}
\field{sortinit}{D}
\field{sortinithash}{6f385f66841fb5e82009dc833c761848}
\field{extradatescope}{labelyear}
\field{labeldatesource}{}
\true{uniqueprimaryauthor}
\field{labelnamesource}{author}
\field{labeltitlesource}{title}
\field{abstract}{Forecasting imbalance prices is essential for strategic participation in the short-term energy markets. A novel two-step probabilistic approach is proposed, with a particular focus on the Belgian case. The first step consists of computing the net regulation volume state transition probabilities. It is modeled as a matrix computed using historical data. This matrix is then used to infer the imbalance prices since the net regulation volume can be related to the level of reserves activated and the corresponding marginal prices for each activation level are published by the Belgian Transmission System Operator one day before electricity delivery. This approach is compared to a deterministic model, a multi-layer perceptron, and a widely used probabilistic technique, Gaussian Processes.}
\field{eprinttype}{arxiv}
\field{journaltitle}{2019 16th International Conference on the European Energy Market ({EEM})}
\field{month}{9}
\field{title}{Probabilistic Forecasting of Imbalance Prices in the Belgian Context}
\field{urlday}{17}
\field{urlmonth}{4}
\field{urlyear}{2024}
\field{year}{2019}
\field{dateera}{ce}
\field{urldateera}{ce}
\field{pages}{1\bibrangedash 7}
\range{pages}{7}
\verb{doi}
\verb 10.1109/EEM.2019.8916375
\endverb
\verb{eprint}
\verb 2106.07361 [cs, eess, q-fin]
\endverb
\verb{file}
\verb arXiv.org Snapshot:/Users/victormylle/Zotero/storage/3N56FPYP/2106.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/958MBH5M/Dumas et al. - 2019 - Probabilistic Forecasting of Imbalance Prices in t.pdf:application/pdf
\endverb
\verb{urlraw}
\verb http://arxiv.org/abs/2106.07361
\endverb
\verb{url}
\verb http://arxiv.org/abs/2106.07361
\endverb
\keyw{Computer Science - Machine Learning,Electrical Engineering and Systems Science - Signal Processing,Quantitative Finance - Statistical Finance}
\endentry
\entry{dumas_deep_2022}{article}{}
\name{author}{5}{}{%
{{un=0,uniquepart=base,hash=bc1b38697de64bfe3f5e7876e531bd45}{%
@@ -58,6 +137,7 @@
\strng{authorbibnamehash}{176facf650dcf7220eec24af7e81dc53}
\strng{authornamehash}{cac0f5a33afb491da6830cd9b00071a2}
\strng{authorfullhash}{176facf650dcf7220eec24af7e81dc53}
\field{extraname}{2}
\field{sortinit}{D}
\field{sortinithash}{6f385f66841fb5e82009dc833c761848}
\field{extradatescope}{labelyear}
@@ -95,6 +175,116 @@
\verb https://linkinghub.elsevier.com/retrieve/pii/S0306261921011909
\endverb
\endentry
\entry{ho_denoising_2020}{misc}{}
\name{author}{3}{}{%
{{un=0,uniquepart=base,hash=2ac2ca10b22e4d13af1767e87495412f}{%
family={Ho},
familyi={H\bibinitperiod},
given={Jonathan},
giveni={J\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=31ac0835ffc3ba6d612252522a6a2011}{%
family={Jain},
familyi={J\bibinitperiod},
given={Ajay},
giveni={A\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=e28d4ee199593959d8c29980a64f1974}{%
family={Abbeel},
familyi={A\bibinitperiod},
given={Pieter},
giveni={P\bibinitperiod},
givenun=0}}%
}
\list{publisher}{1}{%
{arXiv}%
}
\strng{namehash}{d9c72e0919aac578c7e983d804becd3d}
\strng{fullhash}{1ef17e0fed8d834b029ebb1c6adf76b4}
\strng{bibnamehash}{1ef17e0fed8d834b029ebb1c6adf76b4}
\strng{authorbibnamehash}{1ef17e0fed8d834b029ebb1c6adf76b4}
\strng{authornamehash}{d9c72e0919aac578c7e983d804becd3d}
\strng{authorfullhash}{1ef17e0fed8d834b029ebb1c6adf76b4}
\field{sortinit}{H}
\field{sortinithash}{23a3aa7c24e56cfa16945d55545109b5}
\field{extradatescope}{labelyear}
\field{labeldatesource}{}
\true{uniqueprimaryauthor}
\field{labelnamesource}{author}
\field{labeltitlesource}{title}
\field{abstract}{We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional {CIFAR}10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art {FID} score of 3.17. On 256x256 {LSUN}, we obtain sample quality similar to {ProgressiveGAN}. Our implementation is available at https://github.com/hojonathanho/diffusion}
\field{day}{16}
\field{eprinttype}{arxiv}
\field{month}{12}
\field{number}{{arXiv}:2006.11239}
\field{title}{Denoising Diffusion Probabilistic Models}
\field{urlday}{2}
\field{urlmonth}{4}
\field{urlyear}{2024}
\field{year}{2020}
\field{dateera}{ce}
\field{urldateera}{ce}
\verb{doi}
\verb 10.48550/arXiv.2006.11239
\endverb
\verb{eprint}
\verb 2006.11239 [cs, stat]
\endverb
\verb{file}
\verb arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/CYMHCMUT/Ho et al. - 2020 - Denoising Diffusion Probabilistic Models.pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/CE8R84V5/2006.html:text/html
\endverb
\verb{urlraw}
\verb http://arxiv.org/abs/2006.11239
\endverb
\verb{url}
\verb http://arxiv.org/abs/2006.11239
\endverb
\keyw{Computer Science - Machine Learning,Statistics - Machine Learning}
\endentry
\entry{noauthor_imbalance_nodate}{online}{}
\field{sortinit}{I}
\field{sortinithash}{8d291c51ee89b6cd86bf5379f0b151d8}
\field{labeldatesource}{nodate}
\field{labeltitlesource}{title}
\field{abstract}{System imbalance prices applied if an imbalance is found between injections and offtakes in a balance responsible parties ({BRPs}) balance area. When imbalance prices are published on a quarter-hourly basis, the published prices have not yet been validated and can therefore only be used as an indication of the imbalance price.Only after the published prices have been validated can they be used for invoicing purposes. The records for month M are validated after the 15th of month M+1. Contains the historical data and is refreshed daily.This dataset contains data until 21/05/2024 (before {MARI} local go-live).}
\field{langid}{british}
\field{title}{Imbalance prices per quarter-hour (Historical data)}
\field{urlday}{18}
\field{urlmonth}{5}
\field{urlyear}{2024}
\field{urldateera}{ce}
\verb{file}
\verb Snapshot:/Users/victormylle/Zotero/storage/PZI6PTQ2/information.html:text/html
\endverb
\verb{urlraw}
\verb https://opendata.elia.be/explore/dataset/ods047/information/?sort=datetime
\endverb
\verb{url}
\verb https://opendata.elia.be/explore/dataset/ods047/information/?sort=datetime
\endverb
\endentry
\entry{noauthor_intraday_nodate}{online}{}
\field{sortinit}{I}
\field{sortinithash}{8d291c51ee89b6cd86bf5379f0b151d8}
\field{labeldatesource}{nodate}
\field{labeltitlesource}{title}
\field{abstract}{Net sum of intraday nominations of the implicit capacity allocated for energy exchanges for Belgium.}
\field{langid}{british}
\field{title}{Intraday implicit net position (Belgium's balance)}
\field{urlday}{18}
\field{urlmonth}{5}
\field{urlyear}{2024}
\field{urldateera}{ce}
\verb{file}
\verb Snapshot:/Users/victormylle/Zotero/storage/XJ7KBDWG/information.html:text/html
\endverb
\verb{urlraw}
\verb https://opendata.elia.be/explore/dataset/ods022/information/?sort=datetime
\endverb
\verb{url}
\verb https://opendata.elia.be/explore/dataset/ods022/information/?sort=datetime
\endverb
\endentry
\entry{lu_scenarios_2022}{article}{}
\name{author}{4}{}{%
{{un=0,uniquepart=base,hash=e20b6fceb410a42e1abe17804a826487}{%
@@ -166,6 +356,50 @@
\endverb
\keyw{Generative adversarial networks,Conditions,Electricity Price,Point forecasting,Probabilistic forecasting}
\endentry
\entry{noauthor_measured_nodate}{online}{}
\field{sortinit}{M}
\field{sortinithash}{4625c616857f13d17ce56f7d4f97d451}
\field{labeldatesource}{nodate}
\field{labeltitlesource}{title}
\field{abstract}{Measured and upscaled, most recent, day-ahead and week-ahead forecasts for total load on the Belgian grid.}
\field{langid}{british}
\field{title}{Measured and forecasted total load on the Belgian grid (Historical data)}
\field{urlday}{18}
\field{urlmonth}{5}
\field{urlyear}{2024}
\field{urldateera}{ce}
\verb{file}
\verb Snapshot:/Users/victormylle/Zotero/storage/8857IXIQ/table.html:text/html
\endverb
\verb{urlraw}
\verb https://opendata.elia.be/explore/dataset/ods001/table/?sort=datetime
\endverb
\verb{url}
\verb https://opendata.elia.be/explore/dataset/ods001/table/?sort=datetime
\endverb
\endentry
\entry{noauthor_photovoltaic_nodate}{online}{}
\field{sortinit}{P}
\field{sortinithash}{ff3bcf24f47321b42cb156c2cc8a8422}
\field{labeldatesource}{nodate}
\field{labeltitlesource}{title}
\field{abstract}{Measured and upscaled photovoltaic power generation on the Belgian grid.Please note that the measured and forecast values are in {MW}, it is of the users responsibility to interpret the values as such.}
\field{langid}{british}
\field{title}{Photovoltaic power production estimation and forecast on Belgian grid (Historical)}
\field{urlday}{18}
\field{urlmonth}{5}
\field{urlyear}{2024}
\field{urldateera}{ce}
\verb{file}
\verb Snapshot:/Users/victormylle/Zotero/storage/7VB5YHYE/table.html:text/html
\endverb
\verb{urlraw}
\verb https://opendata.elia.be/explore/dataset/ods032/table/?sort=datetime
\endverb
\verb{url}
\verb https://opendata.elia.be/explore/dataset/ods032/table/?sort=datetime
\endverb
\endentry
\entry{poggi_electricity_2023}{article}{}
\name{author}{3}{}{%
{{un=0,uniquepart=base,hash=d5449fc584ab2f2182b0b791e9e2524e}{%
@@ -230,7 +464,76 @@
\verb{url}
\verb https://www.mdpi.com/2673-9909/3/2/18
\endverb
\keyw{autoregressive,deep learning,electricity price forecasting,machine learning,neural network,statistical method,univariate model}
\keyw{deep learning,autoregressive,electricity price forecasting,machine learning,neural network,statistical method,univariate model}
\endentry
\entry{rasul_autoregressive_2021}{misc}{}
\name{author}{4}{}{%
{{un=0,uniquepart=base,hash=3c17107e356e9e329a5b82ae2f7cd441}{%
family={Rasul},
familyi={R\bibinitperiod},
given={Kashif},
giveni={K\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=44cad1999660adf4ecf118ba14cc42e6}{%
family={Seward},
familyi={S\bibinitperiod},
given={Calvin},
giveni={C\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=e061c52f2addf0fbd4a3708586db1f4e}{%
family={Schuster},
familyi={S\bibinitperiod},
given={Ingmar},
giveni={I\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=626b3b250f5889797be5d59aab9e0ac5}{%
family={Vollgraf},
familyi={V\bibinitperiod},
given={Roland},
giveni={R\bibinitperiod},
givenun=0}}%
}
\list{publisher}{1}{%
{arXiv}%
}
\strng{namehash}{6df40132e228a962ad11198a9dd9b91c}
\strng{fullhash}{17d56270d94f39fe44bf03ea876141dd}
\strng{bibnamehash}{17d56270d94f39fe44bf03ea876141dd}
\strng{authorbibnamehash}{17d56270d94f39fe44bf03ea876141dd}
\strng{authornamehash}{6df40132e228a962ad11198a9dd9b91c}
\strng{authorfullhash}{17d56270d94f39fe44bf03ea876141dd}
\field{sortinit}{R}
\field{sortinithash}{5e1c39a9d46ffb6bebd8f801023a9486}
\field{extradatescope}{labelyear}
\field{labeldatesource}{}
\true{uniqueprimaryauthor}
\field{labelnamesource}{author}
\field{labeltitlesource}{title}
\field{abstract}{In this work, we propose {\textbackslash}texttt\{{TimeGrad}\}, an autoregressive model for multivariate probabilistic time series forecasting which samples from the data distribution at each time step by estimating its gradient. To this end, we use diffusion probabilistic models, a class of latent variable models closely connected to score matching and energy-based methods. Our model learns gradients by optimizing a variational bound on the data likelihood and at inference time converts white noise into a sample of the distribution of interest through a Markov chain using Langevin sampling. We demonstrate experimentally that the proposed autoregressive denoising diffusion model is the new state-of-the-art multivariate probabilistic forecasting method on real-world data sets with thousands of correlated dimensions. We hope that this method is a useful tool for practitioners and lays the foundation for future research in this area.}
\field{day}{2}
\field{eprinttype}{arxiv}
\field{month}{2}
\field{number}{{arXiv}:2101.12072}
\field{title}{Autoregressive Denoising Diffusion Models for Multivariate Probabilistic Time Series Forecasting}
\field{urlday}{15}
\field{urlmonth}{10}
\field{urlyear}{2023}
\field{year}{2021}
\field{dateera}{ce}
\field{urldateera}{ce}
\verb{eprint}
\verb 2101.12072 [cs]
\endverb
\verb{file}
\verb arXiv.org Snapshot:/Users/victormylle/Zotero/storage/8LIRWZ4G/2101.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/QPPFJVR5/Rasul et al. - 2021 - Autoregressive Denoising Diffusion Models for Mult.pdf:application/pdf
\endverb
\verb{urlraw}
\verb http://arxiv.org/abs/2101.12072
\endverb
\verb{url}
\verb http://arxiv.org/abs/2101.12072
\endverb
\keyw{Computer Science - Machine Learning,Computer Science - Artificial Intelligence}
\endentry
\entry{weron_electricity_2014}{article}{}
\name{author}{1}{}{%
@@ -284,10 +587,111 @@
\verb{url}
\verb https://www.sciencedirect.com/science/article/pii/S0169207014001083
\endverb
\keyw{Autoregression,Day-ahead market,Electricity price forecasting,Factor model,Forecast combination,Neural network,Probabilistic forecast,Seasonality}
\keyw{Electricity price forecasting,Autoregression,Day-ahead market,Factor model,Forecast combination,Neural network,Probabilistic forecast,Seasonality}
\endentry
\entry{noauthor_wind_nodate}{online}{}
\field{sortinit}{W}
\field{sortinithash}{4315d78024d0cea9b57a0c6f0e35ed0d}
\field{labeldatesource}{nodate}
\field{labeltitlesource}{title}
\field{abstract}{Measured and upscaled wind power generation on the Belgian grid.Please note that the measured and forecast values are in {MW}, it is of the users responsibility to interpret the values as such.}
\field{langid}{british}
\field{title}{Wind power production estimation and forecast on Belgian grid (Historical)}
\field{urlday}{18}
\field{urlmonth}{5}
\field{urlyear}{2024}
\field{urldateera}{ce}
\verb{file}
\verb Snapshot:/Users/victormylle/Zotero/storage/UTJUH5VQ/information.html:text/html
\endverb
\verb{urlraw}
\verb https://opendata.elia.be/explore/dataset/ods031/information/
\endverb
\verb{url}
\verb https://opendata.elia.be/explore/dataset/ods031/information/
\endverb
\endentry
\enddatalist
\datalist[entry]{nyt/global//global/global}
\entry{dumas_probabilistic_2019}{article}{}
\name{author}{5}{}{%
{{un=0,uniquepart=base,hash=bc1b38697de64bfe3f5e7876e531bd45}{%
family={Dumas},
familyi={D\bibinitperiod},
given={Jonathan},
giveni={J\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=90aeadbc706186ab80f0d0e111f1af6f}{%
family={Boukas},
familyi={B\bibinitperiod},
given={Ioannis},
giveni={I\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=7e79d6a813916baca1e80ea45e08e4aa}{%
family={Villena},
familyi={V\bibinitperiod},
given={Miguel\bibnamedelima Manuel},
giveni={M\bibinitperiod\bibinitdelim M\bibinitperiod},
givenun=0,
prefix={de},
prefixi={d\bibinitperiod}}}%
{{un=0,uniquepart=base,hash=220a8e456b600303157420acf89b23d5}{%
family={Mathieu},
familyi={M\bibinitperiod},
given={Sébastien},
giveni={S\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=62ac593786e28c2e56fab28d272346bf}{%
family={Cornélusse},
familyi={C\bibinitperiod},
given={Bertrand},
giveni={B\bibinitperiod},
givenun=0}}%
}
\strng{namehash}{cac0f5a33afb491da6830cd9b00071a2}
\strng{fullhash}{fa8ecb65a3e2049bad6bebe5f605ff0f}
\strng{bibnamehash}{fa8ecb65a3e2049bad6bebe5f605ff0f}
\strng{authorbibnamehash}{fa8ecb65a3e2049bad6bebe5f605ff0f}
\strng{authornamehash}{cac0f5a33afb491da6830cd9b00071a2}
\strng{authorfullhash}{fa8ecb65a3e2049bad6bebe5f605ff0f}
\field{extraname}{1}
\field{sortinit}{D}
\field{sortinithash}{6f385f66841fb5e82009dc833c761848}
\field{extradatescope}{labelyear}
\field{labeldatesource}{}
\true{uniqueprimaryauthor}
\field{labelnamesource}{author}
\field{labeltitlesource}{title}
\field{abstract}{Forecasting imbalance prices is essential for strategic participation in the short-term energy markets. A novel two-step probabilistic approach is proposed, with a particular focus on the Belgian case. The first step consists of computing the net regulation volume state transition probabilities. It is modeled as a matrix computed using historical data. This matrix is then used to infer the imbalance prices since the net regulation volume can be related to the level of reserves activated and the corresponding marginal prices for each activation level are published by the Belgian Transmission System Operator one day before electricity delivery. This approach is compared to a deterministic model, a multi-layer perceptron, and a widely used probabilistic technique, Gaussian Processes.}
\field{eprinttype}{arxiv}
\field{journaltitle}{2019 16th International Conference on the European Energy Market ({EEM})}
\field{month}{9}
\field{title}{Probabilistic Forecasting of Imbalance Prices in the Belgian Context}
\field{urlday}{17}
\field{urlmonth}{4}
\field{urlyear}{2024}
\field{year}{2019}
\field{dateera}{ce}
\field{urldateera}{ce}
\field{pages}{1\bibrangedash 7}
\range{pages}{7}
\verb{doi}
\verb 10.1109/EEM.2019.8916375
\endverb
\verb{eprint}
\verb 2106.07361 [cs, eess, q-fin]
\endverb
\verb{file}
\verb arXiv.org Snapshot:/Users/victormylle/Zotero/storage/3N56FPYP/2106.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/958MBH5M/Dumas et al. - 2019 - Probabilistic Forecasting of Imbalance Prices in t.pdf:application/pdf
\endverb
\verb{urlraw}
\verb http://arxiv.org/abs/2106.07361
\endverb
\verb{url}
\verb http://arxiv.org/abs/2106.07361
\endverb
\keyw{Computer Science - Machine Learning,Electrical Engineering and Systems Science - Signal Processing,Quantitative Finance - Statistical Finance}
\endentry
\entry{dumas_deep_2022}{article}{}
\name{author}{5}{}{%
{{un=0,uniquepart=base,hash=bc1b38697de64bfe3f5e7876e531bd45}{%
@@ -327,6 +731,7 @@
\strng{authorbibnamehash}{176facf650dcf7220eec24af7e81dc53}
\strng{authornamehash}{cac0f5a33afb491da6830cd9b00071a2}
\strng{authorfullhash}{176facf650dcf7220eec24af7e81dc53}
\field{extraname}{2}
\field{sortinit}{D}
\field{sortinithash}{6f385f66841fb5e82009dc833c761848}
\field{extradatescope}{labelyear}
@@ -364,6 +769,116 @@
\verb https://linkinghub.elsevier.com/retrieve/pii/S0306261921011909
\endverb
\endentry
\entry{ho_denoising_2020}{misc}{}
\name{author}{3}{}{%
{{un=0,uniquepart=base,hash=2ac2ca10b22e4d13af1767e87495412f}{%
family={Ho},
familyi={H\bibinitperiod},
given={Jonathan},
giveni={J\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=31ac0835ffc3ba6d612252522a6a2011}{%
family={Jain},
familyi={J\bibinitperiod},
given={Ajay},
giveni={A\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=e28d4ee199593959d8c29980a64f1974}{%
family={Abbeel},
familyi={A\bibinitperiod},
given={Pieter},
giveni={P\bibinitperiod},
givenun=0}}%
}
\list{publisher}{1}{%
{arXiv}%
}
\strng{namehash}{d9c72e0919aac578c7e983d804becd3d}
\strng{fullhash}{1ef17e0fed8d834b029ebb1c6adf76b4}
\strng{bibnamehash}{1ef17e0fed8d834b029ebb1c6adf76b4}
\strng{authorbibnamehash}{1ef17e0fed8d834b029ebb1c6adf76b4}
\strng{authornamehash}{d9c72e0919aac578c7e983d804becd3d}
\strng{authorfullhash}{1ef17e0fed8d834b029ebb1c6adf76b4}
\field{sortinit}{H}
\field{sortinithash}{23a3aa7c24e56cfa16945d55545109b5}
\field{extradatescope}{labelyear}
\field{labeldatesource}{}
\true{uniqueprimaryauthor}
\field{labelnamesource}{author}
\field{labeltitlesource}{title}
\field{abstract}{We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional {CIFAR}10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art {FID} score of 3.17. On 256x256 {LSUN}, we obtain sample quality similar to {ProgressiveGAN}. Our implementation is available at https://github.com/hojonathanho/diffusion}
\field{day}{16}
\field{eprinttype}{arxiv}
\field{month}{12}
\field{number}{{arXiv}:2006.11239}
\field{title}{Denoising Diffusion Probabilistic Models}
\field{urlday}{2}
\field{urlmonth}{4}
\field{urlyear}{2024}
\field{year}{2020}
\field{dateera}{ce}
\field{urldateera}{ce}
\verb{doi}
\verb 10.48550/arXiv.2006.11239
\endverb
\verb{eprint}
\verb 2006.11239 [cs, stat]
\endverb
\verb{file}
\verb arXiv Fulltext PDF:/Users/victormylle/Zotero/storage/CYMHCMUT/Ho et al. - 2020 - Denoising Diffusion Probabilistic Models.pdf:application/pdf;arXiv.org Snapshot:/Users/victormylle/Zotero/storage/CE8R84V5/2006.html:text/html
\endverb
\verb{urlraw}
\verb http://arxiv.org/abs/2006.11239
\endverb
\verb{url}
\verb http://arxiv.org/abs/2006.11239
\endverb
\keyw{Computer Science - Machine Learning,Statistics - Machine Learning}
\endentry
\entry{noauthor_imbalance_nodate}{online}{}
\field{sortinit}{I}
\field{sortinithash}{8d291c51ee89b6cd86bf5379f0b151d8}
\field{labeldatesource}{nodate}
\field{labeltitlesource}{title}
\field{abstract}{System imbalance prices applied if an imbalance is found between injections and offtakes in a balance responsible parties ({BRPs}) balance area. When imbalance prices are published on a quarter-hourly basis, the published prices have not yet been validated and can therefore only be used as an indication of the imbalance price.Only after the published prices have been validated can they be used for invoicing purposes. The records for month M are validated after the 15th of month M+1. Contains the historical data and is refreshed daily.This dataset contains data until 21/05/2024 (before {MARI} local go-live).}
\field{langid}{british}
\field{title}{Imbalance prices per quarter-hour (Historical data)}
\field{urlday}{18}
\field{urlmonth}{5}
\field{urlyear}{2024}
\field{urldateera}{ce}
\verb{file}
\verb Snapshot:/Users/victormylle/Zotero/storage/PZI6PTQ2/information.html:text/html
\endverb
\verb{urlraw}
\verb https://opendata.elia.be/explore/dataset/ods047/information/?sort=datetime
\endverb
\verb{url}
\verb https://opendata.elia.be/explore/dataset/ods047/information/?sort=datetime
\endverb
\endentry
\entry{noauthor_intraday_nodate}{online}{}
\field{sortinit}{I}
\field{sortinithash}{8d291c51ee89b6cd86bf5379f0b151d8}
\field{labeldatesource}{nodate}
\field{labeltitlesource}{title}
\field{abstract}{Net sum of intraday nominations of the implicit capacity allocated for energy exchanges for Belgium.}
\field{langid}{british}
\field{title}{Intraday implicit net position (Belgium's balance)}
\field{urlday}{18}
\field{urlmonth}{5}
\field{urlyear}{2024}
\field{urldateera}{ce}
\verb{file}
\verb Snapshot:/Users/victormylle/Zotero/storage/XJ7KBDWG/information.html:text/html
\endverb
\verb{urlraw}
\verb https://opendata.elia.be/explore/dataset/ods022/information/?sort=datetime
\endverb
\verb{url}
\verb https://opendata.elia.be/explore/dataset/ods022/information/?sort=datetime
\endverb
\endentry
\entry{lu_scenarios_2022}{article}{}
\name{author}{4}{}{%
{{un=0,uniquepart=base,hash=e20b6fceb410a42e1abe17804a826487}{%
@@ -435,6 +950,50 @@
\endverb
\keyw{Generative adversarial networks,Conditions,Electricity Price,Point forecasting,Probabilistic forecasting}
\endentry
\entry{noauthor_measured_nodate}{online}{}
\field{sortinit}{M}
\field{sortinithash}{4625c616857f13d17ce56f7d4f97d451}
\field{labeldatesource}{nodate}
\field{labeltitlesource}{title}
\field{abstract}{Measured and upscaled, most recent, day-ahead and week-ahead forecasts for total load on the Belgian grid.}
\field{langid}{british}
\field{title}{Measured and forecasted total load on the Belgian grid (Historical data)}
\field{urlday}{18}
\field{urlmonth}{5}
\field{urlyear}{2024}
\field{urldateera}{ce}
\verb{file}
\verb Snapshot:/Users/victormylle/Zotero/storage/8857IXIQ/table.html:text/html
\endverb
\verb{urlraw}
\verb https://opendata.elia.be/explore/dataset/ods001/table/?sort=datetime
\endverb
\verb{url}
\verb https://opendata.elia.be/explore/dataset/ods001/table/?sort=datetime
\endverb
\endentry
\entry{noauthor_photovoltaic_nodate}{online}{}
\field{sortinit}{P}
\field{sortinithash}{ff3bcf24f47321b42cb156c2cc8a8422}
\field{labeldatesource}{nodate}
\field{labeltitlesource}{title}
\field{abstract}{Measured and upscaled photovoltaic power generation on the Belgian grid.Please note that the measured and forecast values are in {MW}, it is of the users responsibility to interpret the values as such.}
\field{langid}{british}
\field{title}{Photovoltaic power production estimation and forecast on Belgian grid (Historical)}
\field{urlday}{18}
\field{urlmonth}{5}
\field{urlyear}{2024}
\field{urldateera}{ce}
\verb{file}
\verb Snapshot:/Users/victormylle/Zotero/storage/7VB5YHYE/table.html:text/html
\endverb
\verb{urlraw}
\verb https://opendata.elia.be/explore/dataset/ods032/table/?sort=datetime
\endverb
\verb{url}
\verb https://opendata.elia.be/explore/dataset/ods032/table/?sort=datetime
\endverb
\endentry
\entry{poggi_electricity_2023}{article}{}
\name{author}{3}{}{%
{{un=0,uniquepart=base,hash=d5449fc584ab2f2182b0b791e9e2524e}{%
@@ -499,7 +1058,76 @@
\verb{url}
\verb https://www.mdpi.com/2673-9909/3/2/18
\endverb
\keyw{autoregressive,deep learning,electricity price forecasting,machine learning,neural network,statistical method,univariate model}
\keyw{deep learning,autoregressive,electricity price forecasting,machine learning,neural network,statistical method,univariate model}
\endentry
\entry{rasul_autoregressive_2021}{misc}{}
\name{author}{4}{}{%
{{un=0,uniquepart=base,hash=3c17107e356e9e329a5b82ae2f7cd441}{%
family={Rasul},
familyi={R\bibinitperiod},
given={Kashif},
giveni={K\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=44cad1999660adf4ecf118ba14cc42e6}{%
family={Seward},
familyi={S\bibinitperiod},
given={Calvin},
giveni={C\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=e061c52f2addf0fbd4a3708586db1f4e}{%
family={Schuster},
familyi={S\bibinitperiod},
given={Ingmar},
giveni={I\bibinitperiod},
givenun=0}}%
{{un=0,uniquepart=base,hash=626b3b250f5889797be5d59aab9e0ac5}{%
family={Vollgraf},
familyi={V\bibinitperiod},
given={Roland},
giveni={R\bibinitperiod},
givenun=0}}%
}
\list{publisher}{1}{%
{arXiv}%
}
\strng{namehash}{6df40132e228a962ad11198a9dd9b91c}
\strng{fullhash}{17d56270d94f39fe44bf03ea876141dd}
\strng{bibnamehash}{17d56270d94f39fe44bf03ea876141dd}
\strng{authorbibnamehash}{17d56270d94f39fe44bf03ea876141dd}
\strng{authornamehash}{6df40132e228a962ad11198a9dd9b91c}
\strng{authorfullhash}{17d56270d94f39fe44bf03ea876141dd}
\field{sortinit}{R}
\field{sortinithash}{5e1c39a9d46ffb6bebd8f801023a9486}
\field{extradatescope}{labelyear}
\field{labeldatesource}{}
\true{uniqueprimaryauthor}
\field{labelnamesource}{author}
\field{labeltitlesource}{title}
\field{abstract}{In this work, we propose {\textbackslash}texttt\{{TimeGrad}\}, an autoregressive model for multivariate probabilistic time series forecasting which samples from the data distribution at each time step by estimating its gradient. To this end, we use diffusion probabilistic models, a class of latent variable models closely connected to score matching and energy-based methods. Our model learns gradients by optimizing a variational bound on the data likelihood and at inference time converts white noise into a sample of the distribution of interest through a Markov chain using Langevin sampling. We demonstrate experimentally that the proposed autoregressive denoising diffusion model is the new state-of-the-art multivariate probabilistic forecasting method on real-world data sets with thousands of correlated dimensions. We hope that this method is a useful tool for practitioners and lays the foundation for future research in this area.}
\field{day}{2}
\field{eprinttype}{arxiv}
\field{month}{2}
\field{number}{{arXiv}:2101.12072}
\field{title}{Autoregressive Denoising Diffusion Models for Multivariate Probabilistic Time Series Forecasting}
\field{urlday}{15}
\field{urlmonth}{10}
\field{urlyear}{2023}
\field{year}{2021}
\field{dateera}{ce}
\field{urldateera}{ce}
\verb{eprint}
\verb 2101.12072 [cs]
\endverb
\verb{file}
\verb arXiv.org Snapshot:/Users/victormylle/Zotero/storage/8LIRWZ4G/2101.html:text/html;Full Text PDF:/Users/victormylle/Zotero/storage/QPPFJVR5/Rasul et al. - 2021 - Autoregressive Denoising Diffusion Models for Mult.pdf:application/pdf
\endverb
\verb{urlraw}
\verb http://arxiv.org/abs/2101.12072
\endverb
\verb{url}
\verb http://arxiv.org/abs/2101.12072
\endverb
\keyw{Computer Science - Machine Learning,Computer Science - Artificial Intelligence}
\endentry
\entry{weron_electricity_2014}{article}{}
\name{author}{1}{}{%
@@ -553,9 +1181,33 @@
\verb{url}
\verb https://www.sciencedirect.com/science/article/pii/S0169207014001083
\endverb
\keyw{Autoregression,Day-ahead market,Electricity price forecasting,Factor model,Forecast combination,Neural network,Probabilistic forecast,Seasonality}
\keyw{Electricity price forecasting,Autoregression,Day-ahead market,Factor model,Forecast combination,Neural network,Probabilistic forecast,Seasonality}
\endentry
\entry{noauthor_wind_nodate}{online}{}
\field{sortinit}{W}
\field{sortinithash}{4315d78024d0cea9b57a0c6f0e35ed0d}
\field{labeldatesource}{nodate}
\field{labeltitlesource}{title}
\field{abstract}{Measured and upscaled wind power generation on the Belgian grid.Please note that the measured and forecast values are in {MW}, it is of the users responsibility to interpret the values as such.}
\field{langid}{british}
\field{title}{Wind power production estimation and forecast on Belgian grid (Historical)}
\field{urlday}{18}
\field{urlmonth}{5}
\field{urlyear}{2024}
\field{urldateera}{ce}
\verb{file}
\verb Snapshot:/Users/victormylle/Zotero/storage/UTJUH5VQ/information.html:text/html
\endverb
\verb{urlraw}
\verb https://opendata.elia.be/explore/dataset/ods031/information/
\endverb
\verb{url}
\verb https://opendata.elia.be/explore/dataset/ods031/information/
\endverb
\endentry
\enddatalist
\missing{elia_open_data}
\missing{ho2020denoising}
\endrefsection
\endinput

View File

@@ -2829,12 +2829,21 @@
<bcf:datasource type="file" datatype="bibtex" glob="false">./references.bib</bcf:datasource>
</bcf:bibdata>
<bcf:section number="0">
<bcf:citekey order="1" intorder="1">weron_electricity_2014</bcf:citekey>
<bcf:citekey order="2" intorder="1">poggi_electricity_2023</bcf:citekey>
<bcf:citekey order="3" intorder="1">lu_scenarios_2022</bcf:citekey>
<bcf:citekey order="4" intorder="1">dumas_deep_2022</bcf:citekey>
<bcf:citekey order="5" intorder="1">rasul_autoregressive_2021</bcf:citekey>
<bcf:citekey order="6" intorder="1">dumas_deep_2022</bcf:citekey>
<bcf:citekey order="1" intorder="1">ho_denoising_2020</bcf:citekey>
<bcf:citekey order="2" intorder="1">ho2020denoising</bcf:citekey>
<bcf:citekey order="3" intorder="1">ho2020denoising</bcf:citekey>
<bcf:citekey order="4" intorder="1">weron_electricity_2014</bcf:citekey>
<bcf:citekey order="5" intorder="1">poggi_electricity_2023</bcf:citekey>
<bcf:citekey order="6" intorder="1">lu_scenarios_2022</bcf:citekey>
<bcf:citekey order="7" intorder="1">dumas_deep_2022</bcf:citekey>
<bcf:citekey order="8" intorder="1">rasul_autoregressive_2021</bcf:citekey>
<bcf:citekey order="9" intorder="1">dumas_probabilistic_2019</bcf:citekey>
<bcf:citekey order="10" intorder="1">elia_open_data</bcf:citekey>
<bcf:citekey order="11" intorder="1">noauthor_imbalance_nodate</bcf:citekey>
<bcf:citekey order="12" intorder="1">noauthor_measured_nodate</bcf:citekey>
<bcf:citekey order="13" intorder="1">noauthor_photovoltaic_nodate</bcf:citekey>
<bcf:citekey order="14" intorder="1">noauthor_wind_nodate</bcf:citekey>
<bcf:citekey order="15" intorder="1">noauthor_intraday_nodate</bcf:citekey>
</bcf:section>
<!-- SORTING TEMPLATES -->
<bcf:sortingtemplate name="nyt">

View File

@@ -0,0 +1,22 @@
[0] Config.pm:307> INFO - This is Biber 2.19
[0] Config.pm:310> INFO - Logfile is 'verslag.blg'
[38] biber-darwin:340> INFO - === Sat May 18, 2024, 23:55:25
[48] Biber.pm:419> INFO - Reading 'verslag.bcf'
[100] Biber.pm:979> INFO - Found 14 citekeys in bib section 0
[111] Biber.pm:4419> INFO - Processing section 0
[116] Biber.pm:4610> INFO - Looking for bibtex file './references.bib' for section 0
[119] bibtex.pm:1713> INFO - LaTeX decoding ...
[146] bibtex.pm:1519> INFO - Found BibTeX data source './references.bib'
[198] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'variable = shifted' with 'variable = non-ignorable'
[198] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'normalization = NFD' with 'normalization = prenormalized'
[198] Biber.pm:4239> INFO - Sorting list 'nyt/apasortcite//global/global' of type 'entry' with template 'nyt' and locale 'en-US'
[198] Biber.pm:4245> INFO - No sort tailoring available for locale 'en-US'
[211] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'variable = shifted' with 'variable = non-ignorable'
[211] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'normalization = NFD' with 'normalization = prenormalized'
[211] Biber.pm:4239> INFO - Sorting list 'nyt/global//global/global' of type 'entry' with template 'nyt' and locale 'en-US'
[211] Biber.pm:4245> INFO - No sort tailoring available for locale 'en-US'
[221] bbl.pm:660> INFO - Writing 'verslag.bbl' with encoding 'UTF-8'
[230] bbl.pm:763> INFO - Output to verslag.bbl
[230] Biber.pm:131> WARN - I didn't find a database entry for 'ho2020denoising' (section 0)
[230] Biber.pm:131> WARN - I didn't find a database entry for 'elia_open_data' (section 0)
[230] Biber.pm:133> INFO - WARNINGS: 2

View File

@@ -1,4 +1,4 @@
This is pdfTeX, Version 3.141592653-2.6-1.40.25 (TeX Live 2023) (preloaded format=pdflatex 2023.9.17) 18 MAY 2024 20:25
This is pdfTeX, Version 3.141592653-2.6-1.40.25 (TeX Live 2023) (preloaded format=pdflatex 2023.9.17) 18 MAY 2024 23:58
entering extended mode
restricted \write18 enabled.
file:line:error style messages enabled.
@@ -1073,32 +1073,41 @@ Package: ulem 2019/11/18
\c@g@acro@QR@int=\count483
\c@g@acro@AQR@int=\count484
\c@g@acro@NAQR@int=\count485
\c@g@acro@MSE@int=\count486
\c@g@acro@MAE@int=\count487
\c@g@acro@CRPS@int=\count488
\c@g@acro@NRV@int=\count489
\c@g@acro@PV@int=\count490
\c@g@acro@NP@int=\count491
\c@g@acro@TSO@int=\count492
\c@g@acro@DSO@int=\count493
\c@g@acro@BRP@int=\count494
\c@g@acro@BSP@int=\count495
\c@g@acro@SI@int=\count496
\c@g@acro@FCR@int=\count497
\c@g@acro@aFRR@int=\count498
\c@g@acro@mFRR@int=\count499
\c@g@acro@MW@int=\count500
\c@g@acro@GRU@int=\count486
\c@g@acro@LSTM@int=\count487
\c@g@acro@GAN@int=\count488
\c@g@acro@CTSGAN@int=\count489
\c@g@acro@VAE@int=\count490
\c@g@acro@MLP@int=\count491
\c@g@acro@GP@int=\count492
\c@g@acro@MSE@int=\count493
\c@g@acro@MAE@int=\count494
\c@g@acro@CRPS@int=\count495
\c@g@acro@TSPA@int=\count496
\c@g@acro@PLF@int=\count497
\c@g@acro@NRV@int=\count498
\c@g@acro@PV@int=\count499
\c@g@acro@NP@int=\count500
\c@g@acro@TSO@int=\count501
\c@g@acro@DSO@int=\count502
\c@g@acro@BRP@int=\count503
\c@g@acro@BSP@int=\count504
\c@g@acro@SI@int=\count505
\c@g@acro@FCR@int=\count506
\c@g@acro@aFRR@int=\count507
\c@g@acro@mFRR@int=\count508
\c@g@acro@MW@int=\count509
)
Package csquotes Info: Checking for multilingual support...
Package csquotes Info: ... found 'babel' package.
Package csquotes Info: Adjusting default style.
Package csquotes Info: Redefining alias 'default' -> 'english'.
LaTeX Font Info: Trying to load font information for T1+LinuxLibertineT-TLF on input line 133.
LaTeX Font Info: Trying to load font information for T1+LinuxLibertineT-TLF on input line 134.
(/usr/local/texlive/2023/texmf-dist/tex/latex/libertine/T1LinuxLibertineT-TLF.fd
File: T1LinuxLibertineT-TLF.fd 2017/03/20 (autoinst) Font definitions for T1/LinuxLibertineT-TLF.
)
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
(Font) scaled to size 12.0pt on input line 133.
(Font) scaled to size 12.0pt on input line 134.
(./verslag.aux (./sections/introduction.aux) (./sections/background.aux) (./sections/policies.aux) (./sections/literature_study.aux) (./sections/appendix.aux
LaTeX Warning: Label `fig:ar_linear_gru_comparison' multiply defined.
@@ -1109,36 +1118,36 @@ LaTeX Warning: Label `fig:ar_linear_gru_comparison' multiply defined.
))
\openout1 = `verslag.aux'.
LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 133.
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 133.
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 133.
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 133.
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: Checking defaults for TS1/cmr/m/n on input line 133.
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 133.
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 133.
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: Checking defaults for LS1/libertinust1math/m/n on input line 133.
LaTeX Font Info: Trying to load font information for LS1+libertinust1math on input line 133.
LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 134.
LaTeX Font Info: ... okay on input line 134.
LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 134.
LaTeX Font Info: ... okay on input line 134.
LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 134.
LaTeX Font Info: ... okay on input line 134.
LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 134.
LaTeX Font Info: ... okay on input line 134.
LaTeX Font Info: Checking defaults for TS1/cmr/m/n on input line 134.
LaTeX Font Info: ... okay on input line 134.
LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 134.
LaTeX Font Info: ... okay on input line 134.
LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 134.
LaTeX Font Info: ... okay on input line 134.
LaTeX Font Info: Checking defaults for LS1/libertinust1math/m/n on input line 134.
LaTeX Font Info: Trying to load font information for LS1+libertinust1math on input line 134.
(/usr/local/texlive/2023/texmf-dist/tex/latex/libertinust1math/ls1libertinust1math.fd
File: ls1libertinust1math.fd 2016/03/23 Fontinst v1.933 font definitions for LS1/libertinust1math.
)
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: Checking defaults for LS2/libertinust1mathsym/m/n on input line 133.
LaTeX Font Info: Trying to load font information for LS2+libertinust1mathsym on input line 133.
LaTeX Font Info: ... okay on input line 134.
LaTeX Font Info: Checking defaults for LS2/libertinust1mathsym/m/n on input line 134.
LaTeX Font Info: Trying to load font information for LS2+libertinust1mathsym on input line 134.
(/usr/local/texlive/2023/texmf-dist/tex/latex/libertinust1math/ls2libertinust1mathsym.fd
File: ls2libertinust1mathsym.fd 2015/04/17 v1.1.2-latex LibertinusT1Mathsym LS2 font definitions
)
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: Checking defaults for PD1/pdf/m/n on input line 133.
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: Checking defaults for PU/pdf/m/n on input line 133.
LaTeX Font Info: ... okay on input line 133.
LaTeX Font Info: ... okay on input line 134.
LaTeX Font Info: Checking defaults for PD1/pdf/m/n on input line 134.
LaTeX Font Info: ... okay on input line 134.
LaTeX Font Info: Checking defaults for PU/pdf/m/n on input line 134.
LaTeX Font Info: ... okay on input line 134.
*geometry* driver: auto-detecting
*geometry* detected driver: pdftex
@@ -1176,16 +1185,16 @@ LaTeX Font Info: ... okay on input line 133.
(/usr/local/texlive/2023/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
[Loading MPS to PDF converter (version 2006.09.02).]
\scratchcounter=\count501
\scratchcounter=\count510
\scratchdimen=\dimen333
\scratchbox=\box77
\nofMPsegments=\count502
\nofMParguments=\count503
\nofMPsegments=\count511
\nofMParguments=\count512
\everyMPshowfont=\toks50
\MPscratchCnt=\count504
\MPscratchCnt=\count513
\MPscratchDim=\dimen334
\MPnumerator=\count505
\makeMPintoPDFobject=\count506
\MPnumerator=\count514
\makeMPintoPDFobject=\count515
\everyMPtoPDFconversion=\toks51
) (/usr/local/texlive/2023/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
Package: epstopdf-base 2020-01-24 v2.11 Base part for package epstopdf
@@ -1193,17 +1202,17 @@ Package epstopdf-base Info: Redefining graphics rule for `.eps' on input line 48
(/usr/local/texlive/2023/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
File: epstopdf-sys.cfg 2010/07/13 v1.3 Configuration of (r)epstopdf for TeX Live
))
\c@mv@tabular=\count507
\c@mv@boldtabular=\count508
LaTeX Info: Command `\dddot' is already robust on input line 133.
LaTeX Info: Command `\ddddot' is already robust on input line 133.
\c@mv@tabular=\count516
\c@mv@boldtabular=\count517
LaTeX Info: Command `\dddot' is already robust on input line 134.
LaTeX Info: Command `\ddddot' is already robust on input line 134.
Package caption Info: Begin \AtBeginDocument code.
Package caption Info: hyperref package is loaded.
Package caption Info: End \AtBeginDocument code.
(/usr/local/texlive/2023/texmf-dist/tex/latex/translations/translations-basic-dictionary-english.trsl
File: translations-basic-dictionary-english.trsl (english translation file `translations-basic-dictionary')
)
Package translations Info: loading dictionary `translations-basic-dictionary' for `english'. on input line 133.
Package translations Info: loading dictionary `translations-basic-dictionary' for `english'. on input line 134.
Package biblatex Info: Trying to load language 'english' -> 'english-apa'...
Package biblatex Info: ... file 'english-apa.lbx' found.
(/usr/local/texlive/2023/texmf-dist/tex/latex/biblatex-apa/english-apa.lbx
@@ -1233,10 +1242,25 @@ Package biblatex Info: Automatic encoding selection.
Package biblatex Info: Trying to load bibliographic data...
Package biblatex Info: ... file 'verslag.bbl' found.
(./verslag.bbl)
Package biblatex Info: Reference section=0 on input line 133.
Package biblatex Info: Reference segment=0 on input line 133.
Package hyperref Info: Link coloring ON on input line 133.
(./verslag.bbl
Package biblatex Warning: The following entry could not be found
(biblatex) in the database:
(biblatex) elia_open_data
(biblatex) Please verify the spelling and rerun
(biblatex) LaTeX afterwards.
Package biblatex Warning: The following entry could not be found
(biblatex) in the database:
(biblatex) ho2020denoising
(biblatex) Please verify the spelling and rerun
(biblatex) LaTeX afterwards.
)
Package biblatex Info: Reference section=0 on input line 134.
Package biblatex Info: Reference segment=0 on input line 134.
Package hyperref Info: Link coloring ON on input line 134.
(./verslag.out) (./verslag.out)
\@outlinefile=\write5
\openout5 = `verslag.out'.
@@ -1273,85 +1297,85 @@ Package hyperref Info: Link coloring ON on input line 133.
* \@reversemarginfalse
* (1in=72.27pt=25.4mm, 1cm=28.453pt)
LaTeX Font Info: Trying to load font information for T1+LinuxBiolinumT-TLF on input line 140.
LaTeX Font Info: Trying to load font information for T1+LinuxBiolinumT-TLF on input line 141.
(/usr/local/texlive/2023/texmf-dist/tex/latex/libertine/T1LinuxBiolinumT-TLF.fd
File: T1LinuxBiolinumT-TLF.fd 2017/03/20 (autoinst) Font definitions for T1/LinuxBiolinumT-TLF.
)
LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/m/n' will be
(Font) scaled to size 12.0pt on input line 140.
(Font) scaled to size 12.0pt on input line 141.
pdfTeX warning: pdflatex (file ./ea-en.pdf): PDF inclusion: found PDF version <1.7>, but at most version <1.5> allowed
<ea-en.pdf, id=139, 192.05753pt x 64.01918pt>
File: ea-en.pdf Graphic file (type pdf)
<use ea-en.pdf>
Package pdftex.def Info: ea-en.pdf used on input line 140.
Package pdftex.def Info: ea-en.pdf used on input line 141.
(pdftex.def) Requested size: 271.63632pt x 90.54071pt.
LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/b/n' will be
(Font) scaled to size 12.0pt on input line 140.
(Font) scaled to size 12.0pt on input line 141.
LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/b/n' will be
(Font) scaled to size 24.88pt on input line 140.
(Font) scaled to size 24.88pt on input line 141.
LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/b/n' will be
(Font) scaled to size 17.28pt on input line 140.
(Font) scaled to size 17.28pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/m/n' will be
(Font) scaled to size 12.0pt on input line 140.
(Font) scaled to size 12.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/m/n' will be
(Font) scaled to size 8.0pt on input line 140.
(Font) scaled to size 8.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/m/n' will be
(Font) scaled to size 6.0pt on input line 140.
(Font) scaled to size 6.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/m/it' will be
(Font) scaled to size 12.0pt on input line 140.
(Font) scaled to size 12.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/m/it' will be
(Font) scaled to size 8.0pt on input line 140.
(Font) scaled to size 8.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/m/it' will be
(Font) scaled to size 6.0pt on input line 140.
(Font) scaled to size 6.0pt on input line 141.
LaTeX Font Info: Font shape `LS2/libertinust1mathsym/m/n' will be
(Font) scaled to size 12.0pt on input line 140.
(Font) scaled to size 12.0pt on input line 141.
LaTeX Font Info: Font shape `LS2/libertinust1mathsym/m/n' will be
(Font) scaled to size 8.0pt on input line 140.
(Font) scaled to size 8.0pt on input line 141.
LaTeX Font Info: Font shape `LS2/libertinust1mathsym/m/n' will be
(Font) scaled to size 6.0pt on input line 140.
LaTeX Font Info: Trying to load font information for LS2+libertinust1mathex on input line 140.
(Font) scaled to size 6.0pt on input line 141.
LaTeX Font Info: Trying to load font information for LS2+libertinust1mathex on input line 141.
(/usr/local/texlive/2023/texmf-dist/tex/latex/libertinust1math/ls2libertinust1mathex.fd
File: ls2libertinust1mathex.fd 2016/03/15 v1.0.0-latex LibertinusMath extensions LS2 font definitions
)
LaTeX Font Info: Font shape `LS2/libertinust1mathex/m/n' will be
(Font) scaled to size 12.0pt on input line 140.
(Font) scaled to size 12.0pt on input line 141.
LaTeX Font Info: Font shape `LS2/libertinust1mathex/m/n' will be
(Font) scaled to size 8.0pt on input line 140.
(Font) scaled to size 8.0pt on input line 141.
LaTeX Font Info: Font shape `LS2/libertinust1mathex/m/n' will be
(Font) scaled to size 6.0pt on input line 140.
LaTeX Font Info: Trying to load font information for LS1+libertinust1mathbb on input line 140.
(Font) scaled to size 6.0pt on input line 141.
LaTeX Font Info: Trying to load font information for LS1+libertinust1mathbb on input line 141.
(/usr/local/texlive/2023/texmf-dist/tex/latex/libertinust1math/ls1libertinust1mathbb.fd
File: ls1libertinust1mathbb.fd 2016/08/22 latex LibertinusT1Mathbb LS1 font definitions
)
LaTeX Font Info: Font shape `LS1/libertinust1mathbb/m/n' will be
(Font) scaled to size 12.0pt on input line 140.
(Font) scaled to size 12.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1mathbb/m/n' will be
(Font) scaled to size 8.0pt on input line 140.
(Font) scaled to size 8.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1mathbb/m/n' will be
(Font) scaled to size 6.0pt on input line 140.
(Font) scaled to size 6.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/b/n' will be
(Font) scaled to size 12.0pt on input line 140.
(Font) scaled to size 12.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/b/n' will be
(Font) scaled to size 8.0pt on input line 140.
(Font) scaled to size 8.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/b/n' will be
(Font) scaled to size 6.0pt on input line 140.
(Font) scaled to size 6.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/b/it' will be
(Font) scaled to size 12.0pt on input line 140.
(Font) scaled to size 12.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/b/it' will be
(Font) scaled to size 8.0pt on input line 140.
(Font) scaled to size 8.0pt on input line 141.
LaTeX Font Info: Font shape `LS1/libertinust1math/b/it' will be
(Font) scaled to size 6.0pt on input line 140.
(Font) scaled to size 6.0pt on input line 141.
LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/b/n' will be
(Font) scaled to size 14.4pt on input line 140.
(Font) scaled to size 14.4pt on input line 141.
Underfull \hbox (badness 10000) in paragraph at lines 140--140
Underfull \hbox (badness 10000) in paragraph at lines 141--141
[]
Overfull \hbox (33.0pt too wide) in paragraph at lines 140--140
Overfull \hbox (33.0pt too wide) in paragraph at lines 141--141
[][][][][][][][]
[]
@@ -1361,14 +1385,14 @@ pdfTeX warning: pdflatex (file ./ugent-en.pdf): PDF inclusion: found PDF version
<ugent-en.pdf, id=140, 106.69862pt x 85.3589pt>
File: ugent-en.pdf Graphic file (type pdf)
<use ugent-en.pdf>
Package pdftex.def Info: ugent-en.pdf used on input line 140.
Package pdftex.def Info: ugent-en.pdf used on input line 141.
(pdftex.def) Requested size: 150.91232pt x 120.72095pt.
[1
{/usr/local/texlive/2023/texmf-var/fonts/map/pdftex/updmap/pdftex.map}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_25tcsq.enc} <./ea-en.pdf> <./ugent-en.pdf>]
LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/m/n' will be
(Font) scaled to size 17.28pt on input line 148.
(Font) scaled to size 17.28pt on input line 149.
(./verslag.toc
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/b/n' will be
(Font) scaled to size 12.0pt on input line 3.
@@ -1380,7 +1404,7 @@ LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/b/n' will be
pdfTeX warning (ext4): destination with the same identifier (name{page.1}) has been already used, duplicate ignored
<to be read again>
\relax
l.149 \newpage
l.150 \newpage
[1
{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_nh77jq.enc}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_76gpa5.enc}]
@@ -1396,51 +1420,51 @@ l.149 \newpage
] [4]
LaTeX Font Info: Trying to load font information for TS1+LinuxLibertineT-TLF on input line 65.
LaTeX Font Info: Trying to load font information for TS1+LinuxLibertineT-TLF on input line 58.
(/usr/local/texlive/2023/texmf-dist/tex/latex/libertine/TS1LinuxLibertineT-TLF.fd
File: TS1LinuxLibertineT-TLF.fd 2017/03/20 (autoinst) Font definitions for TS1/LinuxLibertineT-TLF.
)
LaTeX Font Info: Font shape `TS1/LinuxLibertineT-TLF/m/n' will be
(Font) scaled to size 12.0pt on input line 65.
(Font) scaled to size 12.0pt on input line 58.
[5{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_naooyc.enc}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusMI.enc}]
Underfull \hbox (badness 10000) in paragraph at lines 92--97
Underfull \hbox (badness 10000) in paragraph at lines 85--90
[]
Underfull \hbox (badness 10000) in paragraph at lines 92--97
Underfull \hbox (badness 10000) in paragraph at lines 85--90
[]
[6]
Overfull \hbox (4.77582pt too wide) in paragraph at lines 111--113
Overfull \hbox (4.77582pt too wide) in paragraph at lines 104--106
[]\T1/LinuxLibertineT-TLF/m/n/12 There ex-ist many dif-fer-ent types of gen-er-a-tive mod-els. Some of the most pop-u-lar ones are:
[]
LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/m/n' will be
(Font) scaled to size 14.4pt on input line 119.
(Font) scaled to size 14.4pt on input line 112.
[7]
<images/quantile_regression/cdf_quantiles_example.png, id=224, 722.7pt x 433.62pt>
File: images/quantile_regression/cdf_quantiles_example.png Graphic file (type png)
<use images/quantile_regression/cdf_quantiles_example.png>
Package pdftex.def Info: images/quantile_regression/cdf_quantiles_example.png used on input line 128.
Package pdftex.def Info: images/quantile_regression/cdf_quantiles_example.png used on input line 121.
(pdftex.def) Requested size: 364.19667pt x 218.51653pt.
<images/quantile_regression/reconstructed_cdf.png, id=227, 722.7pt x 433.62pt>
File: images/quantile_regression/reconstructed_cdf.png Graphic file (type png)
<use images/quantile_regression/reconstructed_cdf.png>
Package pdftex.def Info: images/quantile_regression/reconstructed_cdf.png used on input line 139.
Package pdftex.def Info: images/quantile_regression/reconstructed_cdf.png used on input line 132.
(pdftex.def) Requested size: 364.19667pt x 218.51653pt.
[8 <./images/quantile_regression/cdf_quantiles_example.png>]
Underfull \hbox (badness 10000) in paragraph at lines 146--147
Underfull \hbox (badness 10000) in paragraph at lines 139--140
[]
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
(Font) scaled to size 8.0pt on input line 149.
(Font) scaled to size 8.0pt on input line 142.
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
(Font) scaled to size 6.0pt on input line 149.
(Font) scaled to size 6.0pt on input line 142.
[9{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusMR.enc}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusEX.enc}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusSYM.enc} <./images/quantile_regression/reconstructed_cdf.png>]
Overfull \hbox (17.3759pt too wide) in paragraph at lines 180--181
Overfull \hbox (17.3759pt too wide) in paragraph at lines 173--174
\T1/LinuxLibertineT-TLF/m/n/12 Generative mod-els can be broadly clas-si-fied into two types: au-tore-gres-sive and non-autoregressive
[]
@@ -1448,36 +1472,36 @@ Overfull \hbox (17.3759pt too wide) in paragraph at lines 180--181
<images/quantile_regression/rnn/RNN_diagram.png, id=260, 753.81625pt x 324.21124pt>
File: images/quantile_regression/rnn/RNN_diagram.png Graphic file (type png)
<use images/quantile_regression/rnn/RNN_diagram.png>
Package pdftex.def Info: images/quantile_regression/rnn/RNN_diagram.png used on input line 234.
Package pdftex.def Info: images/quantile_regression/rnn/RNN_diagram.png used on input line 227.
(pdftex.def) Requested size: 364.19667pt x 156.63872pt.
[13 <./images/quantile_regression/rnn/RNN_diagram.png>]
<images/diffusion/Generation-with-Diffusion-Models.png, id=268, 926.00159pt x 228.96758pt>
<images/diffusion/Generation-with-Diffusion-Models.png, id=269, 926.00159pt x 228.96758pt>
File: images/diffusion/Generation-with-Diffusion-Models.png Graphic file (type png)
<use images/diffusion/Generation-with-Diffusion-Models.png>
Package pdftex.def Info: images/diffusion/Generation-with-Diffusion-Models.png used on input line 262.
Package pdftex.def Info: images/diffusion/Generation-with-Diffusion-Models.png used on input line 253.
(pdftex.def) Requested size: 364.19667pt x 90.05513pt.
[14 <./images/diffusion/Generation-with-Diffusion-Models.png>] [15{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusBMR.enc}{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertinust1math/libusBB.enc}]
Package caption Warning: \label without proper reference on input line 309.
See the caption package documentation for explanation.
LaTeX Warning: Reference `fig:diffusion_process' on page 16 undefined on input line 302.
<images/diffusion/diffusion_graphical_model.png, id=282, 979.66pt x 185.69376pt>
<images/diffusion/diffusion_graphical_model.png, id=284, 979.66pt x 185.69376pt>
File: images/diffusion/diffusion_graphical_model.png Graphic file (type png)
<use images/diffusion/diffusion_graphical_model.png>
Package pdftex.def Info: images/diffusion/diffusion_graphical_model.png used on input line 306.
Package pdftex.def Info: images/diffusion/diffusion_graphical_model.png used on input line 297.
(pdftex.def) Requested size: 364.19667pt x 69.03145pt.
LaTeX Warning: Citation 'ho2020denoising' on page 16 undefined on input line 298.
LaTeX Warning: Citation 'ho2020denoising' on page 16 undefined on input line 298.
[16 <./images/diffusion/diffusion_graphical_model.png>]
LaTeX Font Info: Trying to load font information for U+bbm on input line 335.
LaTeX Font Info: Trying to load font information for U+bbm on input line 325.
(/usr/local/texlive/2023/texmf-dist/tex/latex/bbm-macros/ubbm.fd
File: ubbm.fd 1999/03/15 V 1.2 Font definition for bbm font - TH
)
<images/quantile_regression/crps_visualization.png, id=292, 611.4042pt x 395.3169pt>
<images/quantile_regression/crps_visualization.png, id=294, 611.4042pt x 395.3169pt>
File: images/quantile_regression/crps_visualization.png Graphic file (type png)
<use images/quantile_regression/crps_visualization.png>
Package pdftex.def Info: images/quantile_regression/crps_visualization.png used on input line 353.
Package pdftex.def Info: images/quantile_regression/crps_visualization.png used on input line 343.
(pdftex.def) Requested size: 364.19667pt x 235.4849pt.
) [17 <./images/quantile_regression/crps_visualization.png>]
\openout2 = `sections/policies.aux'.
@@ -1489,19 +1513,19 @@ Package pdftex.def Info: images/quantile_regression/crps_visualization.png used
] [19]
\openout2 = `sections/literature_study.aux'.
(./sections/literature_study.tex
LaTeX Warning: Citation 'rasul_autoregressive_2021' on page 20 undefined on input line 9.
[20
(./sections/literature_study.tex [20
]) [21] (./sections/results.tex [22
]) [21] (./sections/results.tex
LaTeX Warning: Citation 'elia_open_data' on page 22 undefined on input line 5.
[22
] (./sections/results/models/linear.tex [23] [24]
<images/quantile_regression/nrv_mean_std_over_quarter.png, id=343, 722.7pt x 433.62pt>
<images/quantile_regression/nrv_mean_std_over_quarter.png, id=359, 722.7pt x 433.62pt>
File: images/quantile_regression/nrv_mean_std_over_quarter.png Graphic file (type png)
<use images/quantile_regression/nrv_mean_std_over_quarter.png>
Package pdftex.def Info: images/quantile_regression/nrv_mean_std_over_quarter.png used on input line 42.
@@ -1510,49 +1534,51 @@ Package pdftex.def Info: images/quantile_regression/nrv_mean_std_over_quarter.pn
LaTeX Warning: Reference `fig:autoregressive_linear_model_samples' on page 26 undefined on input line 81.
[26 <./images/quantile_regression/nrv_mean_std_over_quarter.png>]
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=357, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=366, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 86.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png, id=358, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png, id=367, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png Graphic file (type png)
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png>
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png used on input line 90.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=359, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=368, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 93.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png, id=360, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png, id=369, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png Graphic file (type png)
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png>
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png used on input line 97.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=361, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=370, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 100.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png, id=362, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png, id=371, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png Graphic file (type png)
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png>
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png used on input line 104.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=363, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=372, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png Graphic file (type png)
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png used on input line 107.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
LaTeX Font Info: Font shape `T1/LinuxLibertineT-TLF/m/n' will be
(Font) scaled to size 10.95pt on input line 108.
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png, id=364, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png, id=373, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png Graphic file (type png)
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png>
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png used on input line 112.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
[26 <./images/quantile_regression/nrv_mean_std_over_quarter.png>]
Overfull \vbox (27.25206pt too high) has occurred while \output is active []
[27 <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_864.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_864.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_4320.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_4320.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_6336.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_6336.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP_QE-Sample_7008.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Sample_7008.png>]
Overfull \hbox (11.8445pt too wide) in paragraph at lines 122--123
\T1/LinuxLibertineT-TLF/m/n/12 sam-ples of the au-tore-gres-sive model are more re-al-is-tic than the sam-ples of the non-autoregressive
@@ -1563,197 +1589,203 @@ Underfull \hbox (badness 10000) in paragraph at lines 122--123
[]
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png, id=380, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png, id=396, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png Graphic file (type png)
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png>
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png used on input line 127.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png, id=381, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png, id=397, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png Graphic file (type png)
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png>
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png used on input line 131.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png, id=382, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png, id=398, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png Graphic file (type png)
<use images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png>
Package pdftex.def Info: images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png used on input line 134.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png, id=383, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png, id=399, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png Graphic file (type png)
<use images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png>
Package pdftex.def Info: images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png used on input line 139.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Training.jpeg, id=385, 883.8621pt x 609.9588pt>
[28 <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png>]
<images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Training.jpeg, id=410, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Training.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Training.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Training.jpeg used on input line 151.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
<images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Test.jpeg, id=386, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Test.jpeg, id=411, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Test.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Test.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Test.jpeg used on input line 156.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
<images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Training.jpeg, id=387, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Training.jpeg, id=412, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Training.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Training.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Training.jpeg used on input line 161.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
<images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Test.jpeg, id=388, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Test.jpeg, id=413, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Test.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Test.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Test.jpeg used on input line 166.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
[28 <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_864_samples.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_864_samples.png> <./images/quantile_regression/aqr_linear_model_samples/AQR_NRV_Load_Wind_PV_NP-QE-Example_4320_samples.png> <./images/quantile_regression/naqr_linear_model_samples/NAQR_NRV_Load_Wind_PV_NP-Example_4320_samples.png>]) (./sections/results/models/non-linear.tex [29 <./images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Training.jpeg> <./images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Test.jpeg> <./images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Training.jpeg> <./images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Test.jpeg>] [30{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_7grukw.enc}]
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=413, 1180.8918pt x 595.5048pt>
Underfull \hbox (badness 4582) in paragraph at lines 169--169
[]\T1/LinuxLibertineT-TLF/m/n/12 Figure 10: |Over/underestimation of the quan-tiles for the au-tore-gres-sive and non-
[]
) [29 <./images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Training.jpeg> <./images/quantile_regression/quantile_performance/AQR_Quantile_Performance_Test.jpeg> <./images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Training.jpeg> <./images/quantile_regression/quantile_performance/NAQR_Quantile_Performance_Test.jpeg>] (./sections/results/models/non-linear.tex [30{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_7grukw.enc}]
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=429, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
<use images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
Package pdftex.def Info: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 78.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=414, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=430, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
<use images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
Package pdftex.def Info: images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 82.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=415, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=431, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
<use images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
Package pdftex.def Info: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 85.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=416, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=432, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
<use images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
Package pdftex.def Info: images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 89.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=417, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=433, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
<use images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
Package pdftex.def Info: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 92.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=418, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=434, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
<use images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
Package pdftex.def Info: images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 96.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=419, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=435, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png Graphic file (type png)
<use images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>
Package pdftex.def Info: images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png used on input line 99.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=420, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=436, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png Graphic file (type png)
<use images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>
Package pdftex.def Info: images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png used on input line 104.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
[31]
<images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Training.jpeg, id=427, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Training.jpeg, id=443, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Training.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Training.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Training.jpeg used on input line 117.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
<images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Test.jpeg, id=428, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Test.jpeg, id=444, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Test.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Test.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Test.jpeg used on input line 122.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
<images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Training.jpeg, id=429, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Training.jpeg, id=445, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Training.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Training.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Training.jpeg used on input line 127.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
<images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Test.jpeg, id=430, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Test.jpeg, id=446, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Test.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Test.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Test.jpeg used on input line 132.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
Underfull \hbox (badness 4582) in paragraph at lines 135--135
[]\T1/LinuxLibertineT-TLF/m/n/12 Figure 11: |Over/underestimation of the quan-tiles for the au-tore-gres-sive and non-
[]\T1/LinuxLibertineT-TLF/m/n/12 Figure 12: |Over/underestimation of the quan-tiles for the au-tore-gres-sive and non-
[]
) [32 <./images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png> <./images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_864.png> <./images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png> <./images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_4320.png> <./images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png> <./images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_6336.png> <./images/quantile_regression/aqr_non_linear_model_samples/AQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png> <./images/quantile_regression/naqr_non_linear_model_samples/NAQR_NL_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>] (./sections/results/models/gru.tex [33 <./images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Training.jpeg> <./images/quantile_regression/quantile_performance/AQR_NL_Quantile_Performance_Test.jpeg> <./images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Training.jpeg> <./images/quantile_regression/quantile_performance/NAQR_NL_Quantile_Performance_Test.jpeg>] [34]
<images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=458, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=474, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
<use images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
Package pdftex.def Info: images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 75.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=459, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png, id=475, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png Graphic file (type png)
<use images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png>
Package pdftex.def Info: images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png used on input line 79.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=460, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=476, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
<use images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
Package pdftex.def Info: images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 82.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=461, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png, id=477, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png Graphic file (type png)
<use images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png>
Package pdftex.def Info: images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png used on input line 86.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=462, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=478, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
<use images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
Package pdftex.def Info: images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 89.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=463, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png, id=479, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png Graphic file (type png)
<use images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png>
Package pdftex.def Info: images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png used on input line 93.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=464, 1180.8918pt x 595.5048pt>
<images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=480, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png Graphic file (type png)
<use images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>
Package pdftex.def Info: images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png used on input line 96.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
<images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=465, 1180.8918pt x 595.5048pt>
<images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png, id=481, 1180.8918pt x 595.5048pt>
File: images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png Graphic file (type png)
<use images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>
Package pdftex.def Info: images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png used on input line 101.
(pdftex.def) Requested size: 223.07211pt x 112.49284pt.
[35]
<images/quantile_regression/quantile_performance/AQR_GRU_QP_Train.jpeg, id=471, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/AQR_GRU_QP_Train.jpeg, id=487, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/AQR_GRU_QP_Train.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/AQR_GRU_QP_Train.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/AQR_GRU_QP_Train.jpeg used on input line 114.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
<images/quantile_regression/quantile_performance/AQR_GRU_QP_Test.jpeg, id=472, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/AQR_GRU_QP_Test.jpeg, id=488, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/AQR_GRU_QP_Test.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/AQR_GRU_QP_Test.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/AQR_GRU_QP_Test.jpeg used on input line 119.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
<images/quantile_regression/quantile_performance/NAQR_GRU_QP_Train.jpeg, id=473, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/NAQR_GRU_QP_Train.jpeg, id=489, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/NAQR_GRU_QP_Train.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/NAQR_GRU_QP_Train.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/NAQR_GRU_QP_Train.jpeg used on input line 124.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
<images/quantile_regression/quantile_performance/NAQR_GRU_QP_Test.jpeg, id=474, 883.8621pt x 609.9588pt>
<images/quantile_regression/quantile_performance/NAQR_GRU_QP_Test.jpeg, id=490, 883.8621pt x 609.9588pt>
File: images/quantile_regression/quantile_performance/NAQR_GRU_QP_Test.jpeg Graphic file (type jpg)
<use images/quantile_regression/quantile_performance/NAQR_GRU_QP_Test.jpeg>
Package pdftex.def Info: images/quantile_regression/quantile_performance/NAQR_GRU_QP_Test.jpeg used on input line 129.
(pdftex.def) Requested size: 223.07211pt x 153.94125pt.
Underfull \hbox (badness 4582) in paragraph at lines 132--132
[]\T1/LinuxLibertineT-TLF/m/n/12 Figure 13: |Over/underestimation of the quan-tiles for the au-tore-gres-sive and non-
[]\T1/LinuxLibertineT-TLF/m/n/12 Figure 14: |Over/underestimation of the quan-tiles for the au-tore-gres-sive and non-
[]
) [36 <./images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png> <./images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_864.png> <./images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png> <./images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_4320.png> <./images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png> <./images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_6336.png> <./images/quantile_regression/aqr_gru_model_examples/AQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png> <./images/quantile_regression/naqr_gru_model_examples/NAQR_GRU_NRV_Load_Wind_PV_NP_QE-Sample_7008.png>] (./sections/results/models/diffusion.tex [37 <./images/quantile_regression/quantile_performance/AQR_GRU_QP_Train.jpeg> <./images/quantile_regression/quantile_performance/AQR_GRU_QP_Test.jpeg> <./images/quantile_regression/quantile_performance/NAQR_GRU_QP_Train.jpeg> <./images/quantile_regression/quantile_performance/NAQR_GRU_QP_Test.jpeg>]
<images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 1_00000000.jpeg, id=497, 1166.4378pt x 581.0508pt>
<images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 1_00000000.jpeg, id=513, 1166.4378pt x 581.0508pt>
File: images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 1_00000000.jpeg Graphic file (type jpg)
<use images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 1_00000000.jpeg>
Package pdftex.def Info: images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 1_00000000.jpeg used on input line 20.
(pdftex.def) Requested size: 204.85846pt x 102.04892pt.
<images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 2_00000000.jpeg, id=498, 1166.4378pt x 581.0508pt>
<images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 2_00000000.jpeg, id=514, 1166.4378pt x 581.0508pt>
File: images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 2_00000000.jpeg Graphic file (type jpg)
<use images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 2_00000000.jpeg>
Package pdftex.def Info: images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 2_00000000.jpeg used on input line 22.
(pdftex.def) Requested size: 204.85846pt x 102.04892pt.
<images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 3_00000000.jpeg, id=499, 1166.4378pt x 581.0508pt>
<images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 3_00000000.jpeg, id=515, 1166.4378pt x 581.0508pt>
File: images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 3_00000000.jpeg Graphic file (type jpg)
<use images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 3_00000000.jpeg>
Package pdftex.def Info: images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 3_00000000.jpeg used on input line 27.
(pdftex.def) Requested size: 204.85846pt x 102.04892pt.
<images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 4_00000000.jpeg, id=500, 1166.4378pt x 581.0508pt>
<images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 4_00000000.jpeg, id=516, 1166.4378pt x 581.0508pt>
File: images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 4_00000000.jpeg Graphic file (type jpg)
<use images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 4_00000000.jpeg>
Package pdftex.def Info: images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 4_00000000.jpeg used on input line 29.
@@ -1766,28 +1798,28 @@ LaTeX Warning: `h' float specifier changed to `ht'.
Overfull \vbox (38.77904pt too high) has occurred while \output is active []
[39 <./images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 1_00000000.jpeg> <./images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 2_00000000.jpeg> <./images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 3_00000000.jpeg> <./images/diffusion/results/intermediates/Testing Intermediates 864_Sample intermediate 4_00000000.jpeg>]
<images/diffusion/results/samples/Diffusion_Test_Example_864.jpeg, id=513, 1166.4378pt x 581.0508pt>
<images/diffusion/results/samples/Diffusion_Test_Example_864.jpeg, id=529, 1166.4378pt x 581.0508pt>
File: images/diffusion/results/samples/Diffusion_Test_Example_864.jpeg Graphic file (type jpg)
<use images/diffusion/results/samples/Diffusion_Test_Example_864.jpeg>
Package pdftex.def Info: images/diffusion/results/samples/Diffusion_Test_Example_864.jpeg used on input line 90.
(pdftex.def) Requested size: 223.07211pt x 111.11894pt.
<images/diffusion/results/samples/Diffusion_Test_Example_4320.jpeg, id=514, 1166.4378pt x 581.0508pt>
<images/diffusion/results/samples/Diffusion_Test_Example_4320.jpeg, id=530, 1166.4378pt x 581.0508pt>
File: images/diffusion/results/samples/Diffusion_Test_Example_4320.jpeg Graphic file (type jpg)
<use images/diffusion/results/samples/Diffusion_Test_Example_4320.jpeg>
Package pdftex.def Info: images/diffusion/results/samples/Diffusion_Test_Example_4320.jpeg used on input line 94.
(pdftex.def) Requested size: 223.07211pt x 111.11894pt.
<images/diffusion/results/samples/Diffusion_Test_Example_6336.jpeg, id=515, 1166.4378pt x 581.0508pt>
<images/diffusion/results/samples/Diffusion_Test_Example_6336.jpeg, id=531, 1166.4378pt x 581.0508pt>
File: images/diffusion/results/samples/Diffusion_Test_Example_6336.jpeg Graphic file (type jpg)
<use images/diffusion/results/samples/Diffusion_Test_Example_6336.jpeg>
Package pdftex.def Info: images/diffusion/results/samples/Diffusion_Test_Example_6336.jpeg used on input line 98.
(pdftex.def) Requested size: 223.07211pt x 111.11894pt.
<images/diffusion/results/samples/Diffusion_Test_Example_7008.jpeg, id=516, 1166.4378pt x 581.0508pt>
<images/diffusion/results/samples/Diffusion_Test_Example_7008.jpeg, id=532, 1166.4378pt x 581.0508pt>
File: images/diffusion/results/samples/Diffusion_Test_Example_7008.jpeg Graphic file (type jpg)
<use images/diffusion/results/samples/Diffusion_Test_Example_7008.jpeg>
Package pdftex.def Info: images/diffusion/results/samples/Diffusion_Test_Example_7008.jpeg used on input line 102.
(pdftex.def) Requested size: 223.07211pt x 111.11894pt.
[40 <./images/diffusion/results/samples/Diffusion_Test_Example_864.jpeg> <./images/diffusion/results/samples/Diffusion_Test_Example_4320.jpeg> <./images/diffusion/results/samples/Diffusion_Test_Example_6336.jpeg> <./images/diffusion/results/samples/Diffusion_Test_Example_7008.jpeg>]
<images/diffusion/results/samples/Diffusion_Test_Example_864_Only_NRV.jpeg, id=523, 1166.4378pt x 581.0508pt>
<images/diffusion/results/samples/Diffusion_Test_Example_864_Only_NRV.jpeg, id=539, 1166.4378pt x 581.0508pt>
File: images/diffusion/results/samples/Diffusion_Test_Example_864_Only_NRV.jpeg Graphic file (type jpg)
<use images/diffusion/results/samples/Diffusion_Test_Example_864_Only_NRV.jpeg>
Package pdftex.def Info: images/diffusion/results/samples/Diffusion_Test_Example_864_Only_NRV.jpeg used on input line 113.
@@ -1796,7 +1828,7 @@ File: images/diffusion/results/samples/Diffusion_Test_Example_864.jpeg Graphic f
<use images/diffusion/results/samples/Diffusion_Test_Example_864.jpeg>
Package pdftex.def Info: images/diffusion/results/samples/Diffusion_Test_Example_864.jpeg used on input line 117.
(pdftex.def) Requested size: 223.07211pt x 111.11894pt.
<images/diffusion/results/samples/Diffusion_Test_Example_4320_Only_NRV.jpeg, id=524, 1166.4378pt x 581.0508pt>
<images/diffusion/results/samples/Diffusion_Test_Example_4320_Only_NRV.jpeg, id=540, 1166.4378pt x 581.0508pt>
File: images/diffusion/results/samples/Diffusion_Test_Example_4320_Only_NRV.jpeg Graphic file (type jpg)
<use images/diffusion/results/samples/Diffusion_Test_Example_4320_Only_NRV.jpeg>
Package pdftex.def Info: images/diffusion/results/samples/Diffusion_Test_Example_4320_Only_NRV.jpeg used on input line 121.
@@ -1846,27 +1878,32 @@ Underfull \hbox (badness 10000) in paragraph at lines 6--7
LaTeX Font Info: Font shape `TS1/LinuxLibertineT-TLF/b/n' will be
(Font) scaled to size 12.0pt on input line 12.
) (./sections/results/policies/nrv_samples_policy.tex [45{/usr/local/texlive/2023/texmf-dist/fonts/enc/dvips/libertine/lbtn_7f4ce4.enc}]
<images/diffusion/policies/comparison/Testing_864_Low_CRPS.jpeg, id=557, 1166.4378pt x 581.0508pt>
<images/diffusion/policies/comparison/Testing_864_Low_CRPS.jpeg, id=573, 1166.4378pt x 581.0508pt>
File: images/diffusion/policies/comparison/Testing_864_Low_CRPS.jpeg Graphic file (type jpg)
<use images/diffusion/policies/comparison/Testing_864_Low_CRPS.jpeg>
Package pdftex.def Info: images/diffusion/policies/comparison/Testing_864_Low_CRPS.jpeg used on input line 48.
(pdftex.def) Requested size: 223.07211pt x 111.11894pt.
<images/diffusion/policies/comparison/Testing_864_High_CRPS.jpeg, id=558, 1166.4378pt x 581.0508pt>
<images/diffusion/policies/comparison/Testing_864_High_CRPS.jpeg, id=574, 1166.4378pt x 581.0508pt>
File: images/diffusion/policies/comparison/Testing_864_High_CRPS.jpeg Graphic file (type jpg)
<use images/diffusion/policies/comparison/Testing_864_High_CRPS.jpeg>
Package pdftex.def Info: images/diffusion/policies/comparison/Testing_864_High_CRPS.jpeg used on input line 52.
(pdftex.def) Requested size: 223.07211pt x 111.11894pt.
<images/diffusion/policies/comparison/Testing_7008_Low_CRPS.jpeg, id=559, 1166.4378pt x 581.0508pt>
<images/diffusion/policies/comparison/Testing_7008_Low_CRPS.jpeg, id=575, 1166.4378pt x 581.0508pt>
File: images/diffusion/policies/comparison/Testing_7008_Low_CRPS.jpeg Graphic file (type jpg)
<use images/diffusion/policies/comparison/Testing_7008_Low_CRPS.jpeg>
Package pdftex.def Info: images/diffusion/policies/comparison/Testing_7008_Low_CRPS.jpeg used on input line 55.
(pdftex.def) Requested size: 223.07211pt x 111.11894pt.
<images/diffusion/policies/comparison/Testing_7008_High_CRPS.jpeg, id=560, 1166.4378pt x 581.0508pt>
<images/diffusion/policies/comparison/Testing_7008_High_CRPS.jpeg, id=576, 1166.4378pt x 581.0508pt>
File: images/diffusion/policies/comparison/Testing_7008_High_CRPS.jpeg Graphic file (type jpg)
<use images/diffusion/policies/comparison/Testing_7008_High_CRPS.jpeg>
Package pdftex.def Info: images/diffusion/policies/comparison/Testing_7008_High_CRPS.jpeg used on input line 60.
(pdftex.def) Requested size: 223.07211pt x 111.11894pt.
[46] [47 <./images/diffusion/policies/comparison/Testing_864_Low_CRPS.jpeg> <./images/diffusion/policies/comparison/Testing_864_High_CRPS.jpeg> <./images/diffusion/policies/comparison/Testing_7008_Low_CRPS.jpeg> <./images/diffusion/policies/comparison/Testing_7008_High_CRPS.jpeg>])) [48] [49] [50]
[46] [47 <./images/diffusion/policies/comparison/Testing_864_Low_CRPS.jpeg> <./images/diffusion/policies/comparison/Testing_864_High_CRPS.jpeg> <./images/diffusion/policies/comparison/Testing_7008_Low_CRPS.jpeg> <./images/diffusion/policies/comparison/Testing_7008_High_CRPS.jpeg>])) [48] [49]
Overfull \hbox (190.49152pt too wide) in paragraph at lines 197--197
\T1/LinuxLibertineT-TLF/m/n/12 cast-ing of im-bal-ance prices in the bel-gian con-text. | [],
[]
[50]
\openout2 = `sections/appendix.aux'.
(./sections/appendix.tex
@@ -2010,28 +2047,23 @@ LaTeX Warning: There were multiply-defined labels.
Package rerunfilecheck Info: File `verslag.out' has not changed.
(rerunfilecheck) Checksum: F2AF7787A9291093A9EEB35E4D1789C7;4849.
Package biblatex Warning: Please (re)run Biber on the file:
(biblatex) verslag
(biblatex) and rerun LaTeX afterwards.
Package logreq Info: Writing requests to 'verslag.run.xml'.
\openout1 = `verslag.run.xml'.
)
Here is how much of TeX's memory you used:
42850 strings out of 476025
911014 string characters out of 5790017
43767 strings out of 476025
948327 string characters out of 5790017
1884388 words of memory out of 5000000
62403 multiletter control sequences out of 15000+600000
609682 words of font info for 109 fonts, out of 8000000 for 9000
63312 multiletter control sequences out of 15000+600000
612391 words of font info for 118 fonts, out of 8000000 for 9000
1141 hyphenation exceptions out of 8191
84i,16n,131p,2100b,5180s stack positions out of 10000i,1000n,20000p,200000b,200000s
</Users/victormylle/Library/texlive/2023/texmf-var/fonts/pk/ljfour/public/bbm/bbm12.600pk></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertinust1math/LibertinusT1Math.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinBiolinumT.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinBiolinumTB.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineT.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineTB.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/libertine/LinLibertineTI.pfb></usr/local/texlive/2023/texmf-dist/fonts/type1/public/stix/stix-mathcal.pfb>
Output written on verslag.pdf (53 pages, 9027690 bytes).
Output written on verslag.pdf (53 pages, 9039490 bytes).
PDF statistics:
695 PDF objects out of 1000 (max. 8388607)
513 compressed objects within 6 object streams
132 named destinations out of 1000 (max. 500000)
724 PDF objects out of 1000 (max. 8388607)
542 compressed objects within 6 object streams
140 named destinations out of 1000 (max. 500000)
592 words of extra memory for PDF output out of 10000 (max. 10000000)

Binary file not shown.

View File

@@ -41,7 +41,7 @@
>
]>
<requests version="1.0">
<internal package="biblatex" priority="9" active="1">
<internal package="biblatex" priority="9" active="0">
<generic>latex</generic>
<provides type="dynamic">
<file>verslag.bcf</file>
@@ -64,7 +64,7 @@
<file>english-apa.lbx</file>
</requires>
</internal>
<external package="biblatex" priority="5" active="1">
<external package="biblatex" priority="5" active="0">
<generic>biber</generic>
<cmdline>
<binary>biber</binary>

Binary file not shown.

View File

@@ -31,6 +31,7 @@
\usepackage{tikz}
\usepackage{acro}
\usepackage{pdflscape}
\usetikzlibrary{positioning, calc}
% Electricity market

View File

@@ -22,8 +22,8 @@
\contentsline {subsection}{\numberline {5.2}Policies for Battery Optimization}{21}{subsection.5.2}%
\contentsline {section}{\numberline {6}Results \& Discussion}{22}{section.6}%
\contentsline {subsection}{\numberline {6.1}Data}{22}{subsection.6.1}%
\contentsline {subsection}{\numberline {6.2}Quantile Regression}{23}{subsection.6.2}%
\contentsline {subsubsection}{\numberline {6.2.1}Linear Model}{23}{subsubsection.6.2.1}%
\contentsline {subsection}{\numberline {6.2}Quantile Regression}{24}{subsection.6.2}%
\contentsline {subsubsection}{\numberline {6.2.1}Linear Model}{24}{subsubsection.6.2.1}%
\contentsline {subsubsection}{\numberline {6.2.2}Non-Linear Model}{30}{subsubsection.6.2.2}%
\contentsline {subsubsection}{\numberline {6.2.3}GRU Model}{33}{subsubsection.6.2.3}%
\contentsline {subsection}{\numberline {6.3}Diffusion}{37}{subsection.6.3}%

BIN
sample_13344_plot.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 281 KiB

BIN
sample_4320_plot.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 278 KiB

BIN
sample_6336_plot.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 160 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 263 KiB

BIN
sample_7008_plot.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 195 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 312 KiB

BIN
sample_864_plot.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 184 KiB

BIN
sample_864_samples_plot.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 274 KiB

View File

@@ -176,22 +176,22 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
crps_from_samples_metric.append(crps[0].mean().item())
if epoch is not None:
if epoch is not None and task is not None:
task.get_logger().report_scalar(
title="CRPS_from_samples",
series="test",
series="val",
value=np.mean(crps_from_samples_metric),
iteration=epoch,
)
# using the policy evaluator, evaluate the policy with the generated samples
if self.policy_evaluator is not None:
if self.policy_evaluator is not None and epoch != -1:
optimal_penalty, profit, charge_cycles = (
self.policy_evaluator.optimize_penalty_for_target_charge_cycles(
idx_samples=generated_samples,
test_loader=dataloader,
initial_penalty=900,
target_charge_cycles=283,
target_charge_cycles=58 * 400 / 356,
initial_learning_rate=5,
max_iterations=100,
tolerance=1,
@@ -205,22 +205,30 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
task.get_logger().report_scalar(
title="Optimal Penalty",
series="test",
series="val",
value=optimal_penalty,
iteration=epoch,
)
task.get_logger().report_scalar(
title="Optimal Profit", series="test", value=profit, iteration=epoch
title="Optimal Profit", series="val", value=profit, iteration=epoch
)
task.get_logger().report_scalar(
title="Optimal Charge Cycles",
series="test",
series="val",
value=charge_cycles,
iteration=epoch,
)
return (
np.mean(crps_from_samples_metric),
profit,
charge_cycles,
optimal_penalty,
generated_samples,
)
return np.mean(crps_from_samples_metric), generated_samples
def log_final_metrics(self, task, dataloader, train: bool = True):

View File

@@ -148,19 +148,6 @@ class Trainer:
running_loss /= len(train_loader.dataset)
test_loss = self.test(val_loader)
if self.patience is not None:
if (
self.best_score is None
or test_loss < self.best_score + self.delta
):
self.save_checkpoint(test_loss, task, epoch)
counter = 0
else:
counter += 1
if counter >= self.patience:
print("Early stopping triggered")
break
if task:
task.get_logger().report_scalar(
title=self.criterion.__class__.__name__,
@@ -194,7 +181,20 @@ class Trainer:
# )
if hasattr(self, "calculate_crps_from_samples"):
self.calculate_crps_from_samples(task, val_loader, epoch)
_, profit, charge_cycles, penalty, _ = self.calculate_crps_from_samples(task, val_loader, epoch)
if self.patience is not None:
if (
self.best_score is None
or profit > self.best_score
):
self.save_checkpoint(profit, task, epoch)
counter = 0
else:
counter += 1
if counter >= self.patience:
print("Early stopping triggered")
break
if task:
self.finish_training(task=task)

View File

@@ -3,7 +3,7 @@ from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(
task_name="AQR: GRU (8 - 512) + Load + Wind + PV + QE + NP"
task_name="AQR: Non-Linear (4 - 512) + Load + Wind + PV + QE + NP"
)
task.execute_remotely(queue_name="default", exit_process=True)
@@ -46,7 +46,7 @@ data_config.NOMINAL_NET_POSITION = True
data_config = task.connect(data_config, name="data_features")
data_processor = DataProcessor(data_config, path="", lstm=True)
data_processor = DataProcessor(data_config, path="", lstm=False)
data_processor.set_batch_size(512)
data_processor.set_full_day_skip(False)
@@ -70,7 +70,7 @@ else:
model_parameters = {
"learning_rate": 0.0001,
"hidden_size": 512,
"num_layers": 8,
"num_layers": 4,
"dropout": 0.2,
"time_feature_embedding": 5,
}
@@ -83,25 +83,25 @@ time_embedding = TimeEmbedding(
# time_embedding = TrigonometricTimeEmbedding(data_processor.get_time_feature_size())
lstm_model = GRUModel(
time_embedding.output_dim(inputDim),
len(quantiles),
hidden_size=model_parameters["hidden_size"],
num_layers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
)
# non_linear_model = NonLinearRegression(
# lstm_model = GRUModel(
# time_embedding.output_dim(inputDim),
# len(quantiles),
# hiddenSize=model_parameters["hidden_size"],
# numLayers=model_parameters["num_layers"],
# hidden_size=model_parameters["hidden_size"],
# num_layers=model_parameters["num_layers"],
# dropout=model_parameters["dropout"],
# )
non_linear_model = NonLinearRegression(
time_embedding.output_dim(inputDim),
len(quantiles),
hiddenSize=model_parameters["hidden_size"],
numLayers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
)
# linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
model = nn.Sequential(time_embedding, lstm_model)
model = nn.Sequential(time_embedding, non_linear_model)
model.output_size = 1
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
@@ -119,16 +119,16 @@ trainer = AutoRegressiveQuantileTrainer(
data_processor,
quantiles,
"cuda",
policy_evaluator=None,
policy_evaluator=policy_evaluator,
debug=False,
)
trainer.add_metrics_to_track(
[PinballLoss(quantiles), MSELoss(), L1Loss(), CRPSLoss(quantiles)]
)
trainer.early_stopping(patience=25)
trainer.plot_every(15)
trainer.train(task=task, epochs=epochs, remotely=True)
trainer.early_stopping(patience=6)
trainer.plot_every(4)
trainer.train(task=task, epochs=epochs, remotely=False)
### Policy Evaluation ###
idx_samples = trainer.test_set_samples