Compared more policy results
This commit is contained in:
@@ -145,4 +145,4 @@ Test data: 01-01-2023 until 08-10–2023
|
||||
|
||||
- [ ] Meer verschil bekijken tussen GRU en diffusion
|
||||
- [ ] Andere lagen voor diffusion model (GRU, kijken naar TSDiff)
|
||||
- [ ] Policies met andere modellen (Linear, Non Linear)
|
||||
- [x] Policies met andere modellen (Linear, Non Linear)
|
||||
File diff suppressed because one or more lines are too long
@@ -13,7 +13,7 @@ from src.models.time_embedding_layer import TimeEmbedding
|
||||
|
||||
#### ClearML ####
|
||||
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
|
||||
task = clearml_helper.get_task(task_name="Autoregressive Quantile Regression: Non Linear + Quarter + DoW + Load + Wind + Net")
|
||||
task = clearml_helper.get_task(task_name="Autoregressive Quantile Regression: Linear + Quarter + DoW + Load + Wind + Net")
|
||||
|
||||
|
||||
#### Data Processor ####
|
||||
@@ -68,9 +68,10 @@ model_parameters = task.connect(model_parameters, name="model_parameters")
|
||||
|
||||
time_embedding = TimeEmbedding(data_processor.get_time_feature_size(), model_parameters["time_feature_embedding"])
|
||||
# lstm_model = GRUModel(time_embedding.output_dim(inputDim), len(quantiles), hidden_size=model_parameters["hidden_size"], num_layers=model_parameters["num_layers"], dropout=model_parameters["dropout"])
|
||||
non_linear_model = NonLinearRegression(time_embedding.output_dim(inputDim), len(quantiles), hiddenSize=model_parameters["hidden_size"], numLayers=model_parameters["num_layers"], dropout=model_parameters["dropout"])
|
||||
# non_linear_model = NonLinearRegression(time_embedding.output_dim(inputDim), len(quantiles), hiddenSize=model_parameters["hidden_size"], numLayers=model_parameters["num_layers"], dropout=model_parameters["dropout"])
|
||||
linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
|
||||
|
||||
model = nn.Sequential(time_embedding, non_linear_model)
|
||||
model = nn.Sequential(time_embedding, linear_model)
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
|
||||
|
||||
#### Trainer ####
|
||||
|
||||
Reference in New Issue
Block a user