Updated Thesis and linear baseline

This commit is contained in:
2024-04-16 21:19:19 +02:00
parent ef094c659c
commit 937b6abc0b
42 changed files with 814 additions and 8138 deletions

View File

@@ -2,7 +2,7 @@ from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(task_name="AQR: Non Linear")
task = clearml_helper.get_task(task_name="AQR: Linear Baseline")
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.PolicyEvaluator import PolicyEvaluator
@@ -27,21 +27,21 @@ from src.models.time_embedding_layer import TimeEmbedding
data_config = DataConfig()
data_config.NRV_HISTORY = True
data_config.LOAD_HISTORY = True
data_config.LOAD_FORECAST = True
data_config.LOAD_HISTORY = False
data_config.LOAD_FORECAST = False
data_config.WIND_FORECAST = True
data_config.WIND_HISTORY = True
data_config.WIND_FORECAST = False
data_config.WIND_HISTORY = False
data_config.QUARTER = True
data_config.DAY_OF_WEEK = True
data_config.QUARTER = False
data_config.DAY_OF_WEEK = False
data_config.NOMINAL_NET_POSITION = True
data_config.NOMINAL_NET_POSITION = False
data_config = task.connect(data_config, name="data_features")
data_processor = DataProcessor(data_config, path="", lstm=False)
data_processor = DataProcessor(data_config, path="", lstm=True)
data_processor.set_batch_size(512)
data_processor.set_full_day_skip(False)
@@ -64,17 +64,18 @@ else:
model_parameters = {
"learning_rate": 0.0001,
"hidden_size": 512,
"num_layers": 5,
"hidden_size": 256,
"num_layers": 2,
"dropout": 0.2,
"time_feature_embedding": 8,
}
model_parameters = task.connect(model_parameters, name="model_parameters")
time_embedding = TimeEmbedding(
data_processor.get_time_feature_size(), model_parameters["time_feature_embedding"]
)
# time_embedding = TimeEmbedding(
# data_processor.get_time_feature_size(), model_parameters["time_feature_embedding"]
# )
# lstm_model = GRUModel(
# time_embedding.output_dim(inputDim),
# len(quantiles),
@@ -83,17 +84,19 @@ time_embedding = TimeEmbedding(
# dropout=model_parameters["dropout"],
# )
non_linear_model = NonLinearRegression(
time_embedding.output_dim(inputDim),
len(quantiles),
hiddenSize=model_parameters["hidden_size"],
numLayers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
)
# non_linear_model = NonLinearRegression(
# time_embedding.output_dim(inputDim),
# len(quantiles),
# hiddenSize=model_parameters["hidden_size"],
# numLayers=model_parameters["num_layers"],
# dropout=model_parameters["dropout"],
# )
# linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
linear_model = LinearRegression(inputDim, len(quantiles))
model = nn.Sequential(time_embedding, non_linear_model)
# model = nn.Sequential(time_embedding, lstm_model)
model = linear_model
model.output_size = 1
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
@@ -117,8 +120,8 @@ trainer = AutoRegressiveQuantileTrainer(
trainer.add_metrics_to_track(
[PinballLoss(quantiles), MSELoss(), L1Loss(), CRPSLoss(quantiles)]
)
trainer.early_stopping(patience=10)
trainer.plot_every(5)
trainer.early_stopping(patience=5)
trainer.plot_every(2)
trainer.train(task=task, epochs=epochs, remotely=True)
### Policy Evaluation ###
@@ -137,7 +140,7 @@ optimal_penalty, profit, charge_cycles = (
test_loader=test_loader,
initial_penalty=1000,
target_charge_cycles=283,
learning_rate=15,
initial_learning_rate=3,
max_iterations=150,
tolerance=1,
)