Updated thesis

This commit is contained in:
2024-04-25 14:08:43 +02:00
parent c7bcd5be55
commit 361414cd41
7 changed files with 127 additions and 105 deletions

View File

@@ -242,30 +242,31 @@ class DiffusionTrainer:
_, generated_sampels = self.test(test_loader, -1, task)
# self.policy_evaluator.plot_profits_table()
if self.policy_evaluator:
optimal_penalty, profit, charge_cycles = (
self.policy_evaluator.optimize_penalty_for_target_charge_cycles(
idx_samples=generated_sampels,
test_loader=test_loader,
initial_penalty=900,
target_charge_cycles=283,
initial_learning_rate=1,
max_iterations=50,
tolerance=1,
optimal_penalty, profit, charge_cycles = (
self.policy_evaluator.optimize_penalty_for_target_charge_cycles(
idx_samples=generated_sampels,
test_loader=test_loader,
initial_penalty=900,
target_charge_cycles=283,
initial_learning_rate=1,
max_iterations=50,
tolerance=1,
)
)
)
print(
f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
)
print(
f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
)
task.get_logger().report_single_value(
name="Optimal Penalty", value=optimal_penalty
)
task.get_logger().report_single_value(name="Optimal Profit", value=profit)
task.get_logger().report_single_value(
name="Optimal Charge Cycles", value=charge_cycles
)
task.get_logger().report_single_value(
name="Optimal Penalty", value=optimal_penalty
)
task.get_logger().report_single_value(name="Optimal Profit", value=profit)
task.get_logger().report_single_value(
name="Optimal Charge Cycles", value=charge_cycles
)
if task:
task.close()
@@ -436,13 +437,13 @@ class DiffusionTrainer:
inversed_samples_batched - inversed_expanded_targets
)
inversed_mae_mean = inversed_mae.mean()
all_inversed_mae.extend(inversed_mae_mean.tolist())
all_inversed_mae.append(inversed_mae_mean)
inversed_mse = np.square(
inversed_samples_batched - inversed_expanded_targets
)
inversed_mse_mean = inversed_mse.mean()
all_inversed_mse.extend(inversed_mse_mean.tolist())
all_inversed_mse.append(inversed_mse_mean)
# add all values from crps_mean to all_crps
all_crps.extend(crps_mean.tolist())
@@ -460,12 +461,12 @@ class DiffusionTrainer:
mean_inversed_mae = np.array(all_inversed_mae).mean()
task.get_logger().report_single_value(
name="test_MSELoss", value=mean_inversed_mae
name="test_L1Loss", value=mean_inversed_mae
)
mean_inversed_mse = np.array(all_inversed_mse).mean()
task.get_logger().report_single_value(
name="test_L1Loss", value=mean_inversed_mse
name="test_MSELoss", value=mean_inversed_mse
)
if self.best_score is None or mean_crps < self.best_score:

View File

@@ -2,7 +2,7 @@ from src.utils.clearml import ClearMLHelper
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(
task_name="Diffusion Training: hidden_sizes=[256, 256], lr=0.0001, time_dim=8"
task_name="Diffusion Training: hidden_sizes=[256, 256], lr=0.0001, time_dim=8 + Load + PV + Wind + NP"
)
task.execute_remotely(queue_name="default", exit_process=True)
@@ -19,16 +19,19 @@ from src.policies.PolicyEvaluator import PolicyEvaluator
data_config = DataConfig()
data_config.NRV_HISTORY = True
data_config.LOAD_HISTORY = False
data_config.LOAD_FORECAST = False
data_config.LOAD_HISTORY = True
data_config.LOAD_FORECAST = True
data_config.WIND_FORECAST = False
data_config.WIND_HISTORY = False
data_config.PV_FORECAST = True
data_config.PV_HISTORY = True
data_config.QUARTER = False
data_config.DAY_OF_WEEK = False
data_config.WIND_FORECAST = True
data_config.WIND_HISTORY = True
data_config.NOMINAL_NET_POSITION = False
data_config.QUARTER = True
data_config.DAY_OF_WEEK = True
data_config.NOMINAL_NET_POSITION = True
data_config = task.connect(data_config, name="data_features")
@@ -42,7 +45,7 @@ print("Input dim: ", inputDim)
model_parameters = {
"epochs": 15000,
"learning_rate": 0.0001,
"hidden_sizes": [256, 256],
"hidden_sizes": [256, 256, 256],
"time_dim": 8,
}
@@ -70,7 +73,5 @@ baseline_policy = BaselinePolicy(battery, data_path="")
policy_evaluator = PolicyEvaluator(baseline_policy, task)
#### Trainer ####
trainer = DiffusionTrainer(
model, data_processor, "cuda", policy_evaluator=policy_evaluator
)
trainer = DiffusionTrainer(model, data_processor, "cuda", policy_evaluator=None)
trainer.train(model_parameters["epochs"], model_parameters["learning_rate"], task)