Updated thesis

This commit is contained in:
2024-05-09 13:25:22 +02:00
parent 4c4914e227
commit 907f62d9cd
29 changed files with 286 additions and 144 deletions

View File

@@ -2,7 +2,7 @@ from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(task_name="AQR: Non-Linear (16 - 256) + QE (dim 2)")
task = clearml_helper.get_task(task_name="AQR: Linear + Load + Wind + PV + QE + NP")
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.PolicyEvaluator import PolicyEvaluator
@@ -27,19 +27,19 @@ data_config = DataConfig()
data_config.NRV_HISTORY = True
data_config.LOAD_HISTORY = False
data_config.LOAD_FORECAST = False
data_config.LOAD_HISTORY = True
data_config.LOAD_FORECAST = True
data_config.WIND_FORECAST = False
data_config.WIND_HISTORY = False
data_config.WIND_FORECAST = True
data_config.WIND_HISTORY = True
data_config.PV_FORECAST = False
data_config.PV_HISTORY = False
data_config.PV_FORECAST = True
data_config.PV_HISTORY = True
data_config.QUARTER = True
data_config.DAY_OF_WEEK = False
data_config.NOMINAL_NET_POSITION = False
data_config.NOMINAL_NET_POSITION = True
data_config = task.connect(data_config, name="data_features")
@@ -89,25 +89,25 @@ time_embedding = TimeEmbedding(
# dropout=model_parameters["dropout"],
# )
non_linear_model = NonLinearRegression(
time_embedding.output_dim(inputDim),
len(quantiles),
hiddenSize=model_parameters["hidden_size"],
numLayers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
)
# non_linear_model = NonLinearRegression(
# time_embedding.output_dim(inputDim),
# len(quantiles),
# hiddenSize=model_parameters["hidden_size"],
# numLayers=model_parameters["num_layers"],
# dropout=model_parameters["dropout"],
# )
# linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
model = nn.Sequential(time_embedding, non_linear_model)
model = nn.Sequential(time_embedding, linear_model)
model.output_size = 1
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
### Policy Evaluator ###
# battery = Battery(2, 1)
# baseline_policy = BaselinePolicy(battery, data_path="")
# policy_evaluator = PolicyEvaluator(baseline_policy, task)
battery = Battery(2, 1)
baseline_policy = BaselinePolicy(battery, data_path="")
policy_evaluator = PolicyEvaluator(baseline_policy, task)
#### Trainer ####
trainer = AutoRegressiveQuantileTrainer(
@@ -117,7 +117,7 @@ trainer = AutoRegressiveQuantileTrainer(
data_processor,
quantiles,
"cuda",
policy_evaluator=None,
policy_evaluator=policy_evaluator,
debug=False,
)
@@ -129,32 +129,32 @@ trainer.plot_every(15)
trainer.train(task=task, epochs=epochs, remotely=True)
### Policy Evaluation ###
# idx_samples = trainer.test_set_samples
# _, test_loader = trainer.data_processor.get_dataloaders(
# predict_sequence_length=trainer.model.output_size, full_day_skip=False
# )
idx_samples = trainer.test_set_samples
_, test_loader = trainer.data_processor.get_dataloaders(
predict_sequence_length=trainer.model.output_size, full_day_skip=False
)
# policy_evaluator.evaluate_test_set(idx_samples, test_loader)
# policy_evaluator.plot_profits_table()
# policy_evaluator.plot_thresholds_per_day()
policy_evaluator.evaluate_test_set(idx_samples, test_loader)
policy_evaluator.plot_profits_table()
policy_evaluator.plot_thresholds_per_day()
# optimal_penalty, profit, charge_cycles = (
# policy_evaluator.optimize_penalty_for_target_charge_cycles(
# idx_samples=idx_samples,
# test_loader=test_loader,
# initial_penalty=1000,
# target_charge_cycles=283,
# initial_learning_rate=3,
# max_iterations=150,
# tolerance=1,
# )
# )
optimal_penalty, profit, charge_cycles = (
policy_evaluator.optimize_penalty_for_target_charge_cycles(
idx_samples=idx_samples,
test_loader=test_loader,
initial_penalty=1000,
target_charge_cycles=283,
initial_learning_rate=3,
max_iterations=150,
tolerance=1,
)
)
# print(
# f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
# )
# task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty)
# task.get_logger().report_single_value(name="Optimal Profit", value=profit)
# task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles)
print(
f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
)
task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty)
task.get_logger().report_single_value(name="Optimal Profit", value=profit)
task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles)
task.close()

View File

@@ -2,7 +2,7 @@ from src.utils.clearml import ClearMLHelper
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(
task_name="Diffusion Training: hidden_sizes=[1024, 1024, 1024, 1024] (300 steps), lr=0.0001, time_dim=8 + Load + Wind + PV + NP"
task_name="Diffusion Training: hidden_sizes=[1024, 1024, 1024, 1024] (300 steps), lr=0.0001, time_dim=8"
)
task.execute_remotely(queue_name="default", exit_process=True)
@@ -19,16 +19,16 @@ from src.policies.PolicyEvaluator import PolicyEvaluator
data_config = DataConfig()
data_config.NRV_HISTORY = True
data_config.LOAD_HISTORY = True
data_config.LOAD_FORECAST = True
data_config.LOAD_HISTORY = False
data_config.LOAD_FORECAST = False
data_config.PV_FORECAST = True
data_config.PV_HISTORY = True
data_config.PV_FORECAST = False
data_config.PV_HISTORY = False
data_config.WIND_FORECAST = True
data_config.WIND_HISTORY = True
data_config.WIND_FORECAST = False
data_config.WIND_HISTORY = False
data_config.NOMINAL_NET_POSITION = True
data_config.NOMINAL_NET_POSITION = False
data_config = task.connect(data_config, name="data_features")