Added crps + profit logging and updated plots for non autoregressive models

This commit is contained in:
2024-02-28 17:12:51 +01:00
parent 420c9dc6ac
commit fe1e388ffb
6 changed files with 253 additions and 70 deletions

View File

@@ -3,7 +3,7 @@ from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(
task_name="Autoregressive Quantile Regression: Non Linear"
task_name="Non Autoregressive Quantile Regression: Non Linear"
)
task.execute_remotely(queue_name="default", exit_process=True)
@@ -11,7 +11,10 @@ from src.policies.PolicyEvaluator import PolicyEvaluator
from src.policies.simple_baseline import BaselinePolicy, Battery
from src.models.lstm_model import GRUModel
from src.data import DataProcessor, DataConfig
from src.trainers.quantile_trainer import AutoRegressiveQuantileTrainer
from src.trainers.quantile_trainer import (
AutoRegressiveQuantileTrainer,
NonAutoRegressiveQuantileRegression,
)
from src.trainers.trainer import Trainer
from src.utils.clearml import ClearMLHelper
from src.models import *
@@ -46,7 +49,7 @@ data_processor.set_full_day_skip(False)
#### Hyperparameters ####
data_processor.set_output_size(1)
data_processor.set_output_size(96)
inputDim = data_processor.get_input_size()
epochs = 300
@@ -77,7 +80,7 @@ time_embedding = TimeEmbedding(
# lstm_model = GRUModel(time_embedding.output_dim(inputDim), len(quantiles), hidden_size=model_parameters["hidden_size"], num_layers=model_parameters["num_layers"], dropout=model_parameters["dropout"])
non_linear_model = NonLinearRegression(
time_embedding.output_dim(inputDim),
len(quantiles),
len(quantiles) * 96,
hiddenSize=model_parameters["hidden_size"],
numLayers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
@@ -85,6 +88,7 @@ non_linear_model = NonLinearRegression(
# linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
model = nn.Sequential(time_embedding, non_linear_model)
model.output_size = 96
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
### Policy Evaluator ###
@@ -93,7 +97,18 @@ baseline_policy = BaselinePolicy(battery, data_path="")
policy_evaluator = PolicyEvaluator(baseline_policy, task)
#### Trainer ####
trainer = AutoRegressiveQuantileTrainer(
# trainer = AutoRegressiveQuantileTrainer(
# model,
# inputDim,
# optimizer,
# data_processor,
# quantiles,
# "cuda",
# policy_evaluator=policy_evaluator,
# debug=False,
# )
trainer = NonAutoRegressiveQuantileRegression(
model,
inputDim,
optimizer,

View File

@@ -1,7 +1,9 @@
from src.utils.clearml import ClearMLHelper
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(task_name="Diffusion Training")
task = clearml_helper.get_task(
task_name="Diffusion Training: hidden_sizes=[64, 64], lr=0.0001, time_dim=8"
)
task.execute_remotely(queue_name="default", exit_process=True)
from src.models import *
@@ -37,9 +39,9 @@ inputDim = data_processor.get_input_size()
print("Input dim: ", inputDim)
model_parameters = {
"epochs": 5000,
"epochs": 8000,
"learning_rate": 0.0001,
"hidden_sizes": [128, 128],
"hidden_sizes": [64, 64],
"time_dim": 8,
}