Started writing about GRU model

This commit is contained in:
2024-04-22 15:54:25 +02:00
parent ac08707369
commit 12bff03d69
39 changed files with 479 additions and 117 deletions

View File

@@ -2,9 +2,7 @@ from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(
task_name="AQR: Non-Linear (8 - 512 - 0.2) + Load + PV + Wind + Net Position + QE (dim 5)"
)
task = clearml_helper.get_task(task_name="AQR: GRU (2 - 256)")
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.PolicyEvaluator import PolicyEvaluator
@@ -30,19 +28,19 @@ data_config = DataConfig()
data_config.NRV_HISTORY = True
data_config.LOAD_HISTORY = True
data_config.LOAD_FORECAST = True
data_config.LOAD_HISTORY = False
data_config.LOAD_FORECAST = False
data_config.WIND_FORECAST = True
data_config.WIND_HISTORY = True
data_config.WIND_FORECAST = False
data_config.WIND_HISTORY = False
data_config.PV_FORECAST = True
data_config.PV_HISTORY = True
data_config.PV_FORECAST = False
data_config.PV_HISTORY = False
data_config.QUARTER = True
data_config.QUARTER = False
data_config.DAY_OF_WEEK = False
data_config.NOMINAL_NET_POSITION = True
data_config.NOMINAL_NET_POSITION = False
data_config = task.connect(data_config, name="data_features")
@@ -70,8 +68,8 @@ else:
model_parameters = {
"learning_rate": 0.0001,
"hidden_size": 512,
"num_layers": 8,
"hidden_size": 256,
"num_layers": 2,
"dropout": 0.2,
"time_feature_embedding": 5,
}
@@ -84,25 +82,25 @@ time_embedding = TimeEmbedding(
# time_embedding = TrigonometricTimeEmbedding(data_processor.get_time_feature_size())
# lstm_model = GRUModel(
# time_embedding.output_dim(inputDim),
# len(quantiles),
# hidden_size=model_parameters["hidden_size"],
# num_layers=model_parameters["num_layers"],
# dropout=model_parameters["dropout"],
# )
non_linear_model = NonLinearRegression(
lstm_model = GRUModel(
time_embedding.output_dim(inputDim),
len(quantiles),
hiddenSize=model_parameters["hidden_size"],
numLayers=model_parameters["num_layers"],
hidden_size=model_parameters["hidden_size"],
num_layers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
)
# non_linear_model = NonLinearRegression(
# time_embedding.output_dim(inputDim),
# len(quantiles),
# hiddenSize=model_parameters["hidden_size"],
# numLayers=model_parameters["num_layers"],
# dropout=model_parameters["dropout"],
# )
# linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
model = nn.Sequential(time_embedding, non_linear_model)
model = nn.Sequential(time_embedding, lstm_model)
model.output_size = 1
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])