Non linear results section done

This commit is contained in:
2024-05-06 14:23:10 +02:00
parent 177fa1ad86
commit 19ab597ae6
31 changed files with 376 additions and 310 deletions

View File

@@ -2,8 +2,8 @@ from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(task_name="AQR: Linear + QE (dim 2)")
# task.execute_remotely(queue_name="default", exit_process=True)
task = clearml_helper.get_task(task_name="AQR: Non-Linear (16 - 256) + QE (dim 2)")
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.PolicyEvaluator import PolicyEvaluator
from src.policies.simple_baseline import BaselinePolicy, Battery
@@ -67,8 +67,8 @@ else:
model_parameters = {
"learning_rate": 0.0001,
"hidden_size": 512,
"num_layers": 8,
"hidden_size": 256,
"num_layers": 16,
"dropout": 0.2,
"time_feature_embedding": 2,
}
@@ -89,17 +89,17 @@ time_embedding = TimeEmbedding(
# dropout=model_parameters["dropout"],
# )
# non_linear_model = NonLinearRegression(
# time_embedding.output_dim(inputDim),
# len(quantiles),
# hiddenSize=model_parameters["hidden_size"],
# numLayers=model_parameters["num_layers"],
# dropout=model_parameters["dropout"],
# )
non_linear_model = NonLinearRegression(
time_embedding.output_dim(inputDim),
len(quantiles),
hiddenSize=model_parameters["hidden_size"],
numLayers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
)
linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
# linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
model = nn.Sequential(time_embedding, linear_model)
model = nn.Sequential(time_embedding, non_linear_model)
model.output_size = 1
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])

View File

@@ -1,10 +1,8 @@
from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NAQR: Non-Linear")
task = clearml_helper.get_task(
task_name="NAQR: Non-Linear (8 - 512) + NRV + LOAD + PV + WIND + NP"
)
clearml_helper = ClearMLHelper(project_name="Thesis/NAQR: GRU")
task = clearml_helper.get_task(task_name="NAQR: GRU (2 - 256)")
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.PolicyEvaluator import PolicyEvaluator
@@ -29,21 +27,21 @@ from src.models.time_embedding_layer import TimeEmbedding
data_config = DataConfig()
data_config.NRV_HISTORY = True
data_config.LOAD_HISTORY = True
data_config.LOAD_FORECAST = True
data_config.LOAD_HISTORY = False
data_config.LOAD_FORECAST = False
data_config.WIND_FORECAST = True
data_config.WIND_HISTORY = True
data_config.WIND_FORECAST = False
data_config.WIND_HISTORY = False
data_config.PV_FORECAST = True
data_config.PV_HISTORY = True
data_config.PV_FORECAST = False
data_config.PV_HISTORY = False
data_config.NOMINAL_NET_POSITION = True
data_config.NOMINAL_NET_POSITION = False
data_config = task.connect(data_config, name="data_features")
data_processor = DataProcessor(data_config, path="", lstm=False)
data_processor = DataProcessor(data_config, path="", lstm=True)
data_processor.set_batch_size(64)
data_processor.set_full_day_skip(True)
@@ -66,8 +64,8 @@ else:
model_parameters = {
"learning_rate": 0.0001,
"hidden_size": 512,
"num_layers": 8,
"hidden_size": 256,
"num_layers": 2,
"dropout": 0.2,
}
@@ -75,16 +73,24 @@ model_parameters = task.connect(model_parameters, name="model_parameters")
# linear_model = LinearRegression(inputDim, len(quantiles) * 96)
non_linear_model = NonLinearRegression(
# non_linear_model = NonLinearRegression(
# inputDim,
# len(quantiles) * 96,
# hiddenSize=model_parameters["hidden_size"],
# numLayers=model_parameters["num_layers"],
# dropout=model_parameters["dropout"],
# )
lstm_model = GRUModel(
inputDim,
len(quantiles) * 96,
hiddenSize=model_parameters["hidden_size"],
numLayers=model_parameters["num_layers"],
len(quantiles),
hidden_size=model_parameters["hidden_size"],
num_layers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
)
model = non_linear_model
model = lstm_model
model.output_size = 96
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])