Compare commits

10 Commits

Author SHA1 Message Date
420c9dc6ac Added yesterday policy evaluator 2024-02-26 18:21:06 +01:00
ca120e5715 Finished baseline policy evaluator 2024-02-26 18:20:53 +01:00
be38536758 Adding baseline policy evaluator 2024-02-26 16:26:03 +01:00
f1b54df2c9 Policy evaluation during training 2024-02-25 22:13:00 +01:00
Victor Mylle
90751866a4 Fixed git lfs issue 2024-02-22 16:52:03 +01:00
Victor Mylle
4ad3336b98 Set training script to execute remotely 2024-02-21 18:13:51 +01:00
Victor Mylle
f8823f7efa Autoregressive Quantile Training with Policy evaluation 2024-02-21 18:11:38 +01:00
Victor Mylle
2b22b6935e Merge branch 'February-Report' into main 2024-02-19 15:49:15 +01:00
Victor Mylle
174a82fab2 Plots to compare between quantile regression and diffusion 2024-02-18 19:21:59 +01:00
Victor Mylle
bd250a664b Fixed diffusion confidence interval plot 2024-02-18 16:01:18 +01:00
16 changed files with 1132 additions and 275 deletions

1
.gitattributes vendored
View File

@@ -1 +0,0 @@
*.csv filter=lfs diff=lfs merge=lfs -text

View File

@@ -2,6 +2,7 @@ FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
RUN apt-get update
RUN apt-get install -y git
RUN apt-get install texlive-latex-base texlive-fonts-recommended texlive-fonts-extra texlive-bibtex-extra
COPY requirements.txt /tmp/requirements.txt

View File

@@ -159,7 +159,7 @@ Test data: 01-01-2023 until 08-102023
TODO:
- [ ] diffusion model oefening generative models vragen
- [x] diffusion model oefening generative models vragen -> geen lab hierop
- [ ] Non autoregressive models policy testen (Non Linear eerst) -> als dit al slect, niet verder kijken, wel vermelden
- [ ] Policy in test set -> over charge cycles (stop trading electricity)

View File

@@ -25,12 +25,19 @@ class NrvDataset(Dataset):
self.sequence_length = sequence_length
self.predict_sequence_length = predict_sequence_length
self.samples_to_skip = self.skip_samples(dataframe=dataframe)
self.samples_to_skip = self.skip_samples(dataframe=dataframe, full_day_skip=self.full_day_skip)
total_indices = set(
range(len(dataframe) - self.sequence_length - self.predict_sequence_length)
)
self.valid_indices = sorted(list(total_indices - set(self.samples_to_skip)))
# full day indices
full_day_skipped_samples = self.skip_samples(dataframe=dataframe, full_day_skip=True)
full_day_total_indices = set(
range(len(dataframe) - self.sequence_length - self.predict_sequence_length)
)
self.full_day_valid_indices = sorted(list(full_day_total_indices - set(full_day_skipped_samples)))
self.history_features = []
if self.data_config.LOAD_HISTORY:
self.history_features.append("total_load")
@@ -73,7 +80,7 @@ class NrvDataset(Dataset):
self.history_features, self.forecast_features = self.preprocess_data(dataframe)
def skip_samples(self, dataframe):
def skip_samples(self, dataframe, full_day_skip):
nan_rows = dataframe[dataframe.isnull().any(axis=1)]
nan_indices = nan_rows.index
skip_indices = [
@@ -91,7 +98,7 @@ class NrvDataset(Dataset):
# add indices that are not the start of a day (00:15) to the skip indices (use datetime column)
# get indices of all 00:15 timestamps
if self.full_day_skip:
if full_day_skip:
start_of_day_indices = dataframe[
dataframe["datetime"].dt.time != pd.Timestamp("00:00:00").time()
].index

View File

@@ -0,0 +1,253 @@
from clearml import Task
from tqdm import tqdm
from src.policies.simple_baseline import BaselinePolicy
import pandas as pd
import numpy as np
import torch
import plotly.express as px
from src.utils.imbalance_price_calculator import ImbalancePriceCalculator
class PolicyEvaluator:
def __init__(self, baseline_policy: BaselinePolicy, task: Task = None):
self.baseline_policy = baseline_policy
self.ipc = ImbalancePriceCalculator(data_path="")
self.dates = baseline_policy.test_data["DateTime"].dt.date.unique()
self.dates = pd.to_datetime(self.dates)
### Load Imbalance Prices ###
imbalance_prices = pd.read_csv("data/imbalance_prices.csv", sep=";")
imbalance_prices["DateTime"] = pd.to_datetime(
imbalance_prices["DateTime"], utc=True
)
self.imbalance_prices = imbalance_prices.sort_values(by=["DateTime"])
self.penalties = [0, 100, 300, 500, 800, 1000, 1500]
self.profits = []
self.task = task
def get_imbanlance_prices_for_date(self, date):
imbalance_prices_day = self.imbalance_prices[
self.imbalance_prices["DateTime"].dt.date == date
]
return imbalance_prices_day["Positive imbalance price"].values
def evaluate_for_date(
self,
date,
idx_samples,
test_loader,
charge_thresholds=np.arange(-100, 250, 25),
discharge_thresholds=np.arange(-100, 250, 25),
):
idx = test_loader.dataset.get_idx_for_date(date.date())
print("Evaluated for idx: ", idx)
(initial, samples) = idx_samples[idx]
if len(initial.shape) == 2:
initial = initial.cpu().numpy()[0][-1]
else:
initial = initial.cpu().numpy()[-1]
samples = samples.cpu().numpy()
initial = np.repeat(initial, samples.shape[0])
combined = np.concatenate((initial.reshape(-1, 1), samples), axis=1)
reconstructed_imbalance_prices = (
self.ipc.get_imbalance_prices_2023_for_date_vectorized(date, combined)
)
reconstructed_imbalance_prices = torch.tensor(
reconstructed_imbalance_prices, device="cuda"
)
real_imbalance_prices = self.get_imbanlance_prices_for_date(date.date())
for penalty in self.penalties:
found_charge_thresholds, found_discharge_thresholds = (
self.baseline_policy.get_optimal_thresholds(
reconstructed_imbalance_prices,
charge_thresholds,
discharge_thresholds,
penalty,
)
)
predicted_charge_threshold = found_charge_thresholds.mean(axis=0)
predicted_discharge_threshold = found_discharge_thresholds.mean(axis=0)
### Determine Profits and Charge Cycles ###
simulated_profit, simulated_charge_cycles = self.baseline_policy.simulate(
torch.tensor([[real_imbalance_prices]]),
torch.tensor([predicted_charge_threshold]),
torch.tensor([predicted_discharge_threshold]),
)
self.profits.append(
[
date,
penalty,
simulated_profit[0][0].item(),
simulated_charge_cycles[0][0].item(),
predicted_charge_threshold.item(),
predicted_discharge_threshold.item(),
]
)
def evaluate_test_set(self, idx_samples, test_loader):
self.profits = []
try:
for date in tqdm(self.dates):
self.evaluate_for_date(date, idx_samples, test_loader)
except KeyboardInterrupt:
print("Interrupted")
raise KeyboardInterrupt
except Exception as e:
print(e)
pass
self.profits = pd.DataFrame(
self.profits,
columns=[
"Date",
"Penalty",
"Profit",
"Charge Cycles",
"Charge Threshold",
"Discharge Threshold",
],
)
def plot_profits_table(self):
# Check if task or penalties are not set
if (
self.task is None
or not hasattr(self, "penalties")
or not hasattr(self, "profits")
):
print("Task, penalties, or profits not defined.")
return
if self.profits.empty:
print("Profits DataFrame is empty.")
return
# Aggregate profits and charge cycles by penalty, calculating totals and per-year values
aggregated = self.profits.groupby("Penalty").agg(
Total_Profit=("Profit", "sum"),
Total_Charge_Cycles=("Charge Cycles", "sum"),
Num_Days=("Date", "nunique"),
)
aggregated["Profit_Per_Year"] = (
aggregated["Total_Profit"] / aggregated["Num_Days"] * 365
)
aggregated["Charge_Cycles_Per_Year"] = (
aggregated["Total_Charge_Cycles"] / aggregated["Num_Days"] * 365
)
# Reset index to make 'Penalty' a column again and drop unnecessary columns
final_df = aggregated.reset_index().drop(
columns=["Total_Profit", "Total_Charge_Cycles", "Num_Days"]
)
# Rename columns to match expected output
final_df.columns = [
"Penalty",
"Total Profit (per year)",
"Total Charge Cycles (per year)",
]
# Profits till 400
profits_till_400 = self.get_profits_till_400()
# aggregate the final_df and profits_till_400 with columns: Penalty, total profit, total charge cycles, profit till 400, total charge cycles
final_df = final_df.merge(profits_till_400, on="Penalty")
# Log the final results table
self.task.get_logger().report_table(
"Test Set Results", "Profits per Penalty", iteration=0, table_plot=final_df
)
def plot_thresholds_per_day(self):
if self.task is None:
return
fig = px.line(
self.profits[self.profits["Penalty"] == 0],
x="Date",
y=["Charge Threshold", "Discharge Threshold"],
title="Charge and Discharge Thresholds per Day",
)
fig.update_layout(
width=1000,
height=600,
title_x=0.5,
)
self.task.get_logger().report_plotly(
"Thresholds per Day", "Thresholds per Day", iteration=0, figure=fig
)
def get_profits_as_scalars(self):
aggregated = self.profits.groupby("Penalty").agg(
Total_Profit=("Profit", "sum"),
Total_Charge_Cycles=("Charge Cycles", "sum"),
Num_Days=("Date", "nunique"),
)
aggregated["Profit_Per_Year"] = (
aggregated["Total_Profit"] / aggregated["Num_Days"] * 365
)
aggregated["Charge_Cycles_Per_Year"] = (
aggregated["Total_Charge_Cycles"] / aggregated["Num_Days"] * 365
)
# Reset index to make 'Penalty' a column again and drop unnecessary columns
final_df = aggregated.reset_index().drop(
columns=["Total_Profit", "Total_Charge_Cycles", "Num_Days"]
)
# Rename columns to match expected output
final_df.columns = ["Penalty", "Total Profit", "Total Charge Cycles"]
return final_df
def get_profits_till_400(self, profits: pd.DataFrame = None):
if profits is None:
profits = self.profits
# calculates profits until 400 charge cycles per year are reached
number_of_days = len(profits["Date"].unique())
usable_charge_cycles = (400 / 365) * number_of_days
# now sum the profit until the usable charge cycles are reached
penalty_profits = {}
penalty_charge_cycles = {}
for index, row in profits.iterrows():
penalty = row["Penalty"]
profit = row["Profit"]
charge_cycles = row["Charge Cycles"]
if penalty not in penalty_profits:
penalty_profits[penalty] = 0
penalty_charge_cycles[penalty] = 0
if penalty_charge_cycles[penalty] < usable_charge_cycles:
penalty_profits[penalty] += profit
penalty_charge_cycles[penalty] += charge_cycles
df = pd.DataFrame(
list(
zip(
penalty_profits.keys(),
penalty_profits.values(),
penalty_charge_cycles.values(),
)
),
columns=["Penalty", "Profit_till_400", "Cycles_till_400"],
)
return df

View File

@@ -0,0 +1,200 @@
from clearml import Task
from src.policies.simple_baseline import BaselinePolicy
from src.policies.PolicyEvaluator import PolicyEvaluator
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
class BaselinePolicyEvaluator(PolicyEvaluator):
def __init__(self, baseline_policy: BaselinePolicy, task: Task = None):
super(BaselinePolicyEvaluator, self).__init__(baseline_policy, task)
self.train_profits = []
def determine_thresholds_for_date(self, date):
charge_thresholds = np.arange(-100, 250, 25)
discharge_thresholds = np.arange(-100, 250, 25)
real_imbalance_prices = self.get_imbanlance_prices_for_date(date.date())
for penalty in self.penalties:
found_charge_thresholds, found_discharge_thresholds = (
self.baseline_policy.get_optimal_thresholds(
torch.tensor([real_imbalance_prices]),
charge_thresholds,
discharge_thresholds,
penalty,
)
)
best_charge_threshold = found_charge_thresholds
best_discharge_threshold = found_discharge_thresholds
simulated_profit, simulated_charge_cycles = self.baseline_policy.simulate(
torch.tensor([[real_imbalance_prices]]),
torch.tensor([best_charge_threshold]),
torch.tensor([best_discharge_threshold]),
)
self.train_profits.append(
[
date,
penalty,
simulated_profit[0][0].item(),
simulated_charge_cycles[0][0].item(),
best_charge_threshold.item(),
best_discharge_threshold.item(),
]
)
def determine_best_thresholds(self):
self.train_profits = []
dates = self.baseline_policy.train_data["DateTime"].dt.date.unique()
dates = pd.to_datetime(dates)
try:
for date in tqdm(dates):
self.determine_thresholds_for_date(date)
except Exception as e:
print(e)
pass
self.train_profits = pd.DataFrame(
self.train_profits,
columns=[
"Date",
"Penalty",
"Profit",
"Charge Cycles",
"Charge Threshold",
"Discharge Threshold",
],
)
number_of_days = len(self.train_profits["Date"].unique())
usable_charge_cycles = (400 / 365) * number_of_days
intermediate_values = {penalty: {} for penalty in self.penalties}
# find the best threshold combination for each penalty based on the total profit on the data
for penalty in self.penalties:
profits_for_penalty = self.train_profits[
self.train_profits["Penalty"] == penalty
]
for index, row in profits_for_penalty.iterrows():
charge_threshold = row["Charge Threshold"]
discharge_threshold = row["Discharge Threshold"]
if (charge_threshold, discharge_threshold) not in intermediate_values[
penalty
]:
intermediate_values[penalty][
(charge_threshold, discharge_threshold)
] = (0, 0)
new_charge_cycles = (
intermediate_values[penalty][
(charge_threshold, discharge_threshold)
][1]
+ row["Charge Cycles"]
)
new_profit = (
intermediate_values[penalty][
(charge_threshold, discharge_threshold)
][0]
+ row["Profit"]
)
if new_charge_cycles <= usable_charge_cycles:
intermediate_values[penalty][
(charge_threshold, discharge_threshold)
] = (new_profit, new_charge_cycles)
best_thresholds = {penalty: [0, 0, 0, 0] for penalty in self.penalties}
for penalty in self.penalties:
best_profit = 0
for threshold, values in intermediate_values[penalty].items():
if values[0] > best_profit:
best_profit = values[0]
best_thresholds[penalty][0] = threshold[0]
best_thresholds[penalty][1] = threshold[1]
best_thresholds[penalty][2] = best_profit
best_thresholds[penalty][3] = values[1]
# create dataframe from best_thresholds with columns, Penalty, Charge Threshold, Discharge Threshold, Profit
data = [
(penalty, values[0], values[1], values[2], values[3])
for penalty, values in best_thresholds.items()
]
best_thresholds_df = pd.DataFrame(
data,
columns=[
"Penalty",
"Charge Threshold",
"Discharge Threshold",
"Profit (training data)",
f"Charge Cycles (training data: max {usable_charge_cycles})",
],
)
if self.task:
self.task.get_logger().report_table(
"Baseline Train Data",
"Best Thresholds for each Penalty on Training Data (up to 400 cycles / year)",
iteration=0,
table_plot=best_thresholds_df,
)
return best_thresholds
def evaluate_test_set(self, thresholds: dict):
"""Evaluate the test set using the given thresholds (multiple penalties)
Args:
thresholds (dict): Dictionary with penalties as keys and the corresponding thresholds tuple as values
"""
self.profits = []
try:
for date in tqdm(self.dates):
real_imbalance_prices = self.get_imbanlance_prices_for_date(date.date())
for penalty in thresholds.keys():
charge_threshold = thresholds[penalty][0]
discharge_threshold = thresholds[penalty][1]
simulated_profit, simulated_charge_cycles = (
self.baseline_policy.simulate(
torch.tensor([[real_imbalance_prices]]),
torch.tensor([charge_threshold]),
torch.tensor([discharge_threshold]),
)
)
self.profits.append(
[
date,
penalty,
simulated_profit[0][0].item(),
simulated_charge_cycles[0][0].item(),
charge_threshold,
discharge_threshold,
]
)
self.profits = pd.DataFrame(
self.profits,
columns=[
"Date",
"Penalty",
"Profit",
"Charge Cycles",
"Charge Threshold",
"Discharge Threshold",
],
)
except Exception as e:
print(e)
pass

View File

@@ -0,0 +1,76 @@
from datetime import timedelta
from clearml import Task
from src.policies.simple_baseline import BaselinePolicy
from src.policies.PolicyEvaluator import PolicyEvaluator
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
class YesterdayBaselinePolicyEvaluator(PolicyEvaluator):
def __init__(self, baseline_policy: BaselinePolicy, task: Task = None):
super(YesterdayBaselinePolicyEvaluator, self).__init__(baseline_policy, task)
def evaluate_for_date(
self,
date,
charge_thresholds=np.arange(-100, 250, 25),
discharge_thresholds=np.arange(-100, 250, 25),
):
real_imbalance_prices = self.get_imbanlance_prices_for_date(date.date())
yesterday_imbalance_prices = self.get_imbanlance_prices_for_date(
date.date() - timedelta(days=1)
)
yesterday_imbalance_prices = torch.tensor(
np.array([yesterday_imbalance_prices]), device="cpu"
)
for penalty in self.penalties:
yesterday_charge_thresholds, yesterday_discharge_thresholds = (
self.baseline_policy.get_optimal_thresholds(
yesterday_imbalance_prices,
charge_thresholds,
discharge_thresholds,
penalty,
)
)
yesterday_profit, yesterday_charge_cycles = self.baseline_policy.simulate(
torch.tensor([[real_imbalance_prices]]),
torch.tensor([yesterday_charge_thresholds.mean(axis=0)]),
torch.tensor([yesterday_discharge_thresholds.mean(axis=0)]),
)
self.profits.append(
[
date,
penalty,
yesterday_profit[0][0].item(),
yesterday_charge_cycles[0][0].item(),
yesterday_charge_thresholds.mean(axis=0).item(),
yesterday_discharge_thresholds.mean(axis=0).item(),
]
)
def evaluate_test_set(self):
self.profits = []
try:
for date in tqdm(self.dates):
self.evaluate_for_date(date)
except Exception as e:
print(e)
pass
self.profits = pd.DataFrame(
self.profits,
columns=[
"Date",
"Penalty",
"Profit",
"Charge Cycles",
"Charge Threshold",
"Discharge Threshold",
],
)

View File

@@ -0,0 +1,21 @@
from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(task_name="Global Thresholds Baseline")
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.baselines.BaselinePolicyEvaluator import BaselinePolicyEvaluator
from src.policies.simple_baseline import BaselinePolicy, Battery
### Policy Evaluator ###
battery = Battery(2, 1)
baseline_policy = BaselinePolicy(battery, data_path="")
policy_evaluator = BaselinePolicyEvaluator(baseline_policy, task)
thresholds = policy_evaluator.determine_best_thresholds()
policy_evaluator.evaluate_test_set(thresholds)
policy_evaluator.plot_profits_table()
task.close()

View File

@@ -0,0 +1,22 @@
from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(task_name="Global Thresholds Baseline")
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.baselines.BaselinePolicyEvaluator import BaselinePolicyEvaluator
from src.policies.simple_baseline import BaselinePolicy, Battery
from src.policies.baselines.YesterdayBaselinePolicyExecutor import (
YesterdayBaselinePolicyEvaluator,
)
### Policy Evaluator ###
battery = Battery(2, 1)
baseline_policy = BaselinePolicy(battery, data_path="")
policy_evaluator = YesterdayBaselinePolicyEvaluator(baseline_policy, task)
policy_evaluator.evaluate_test_set()
policy_evaluator.plot_profits_table()
task.close()

View File

@@ -8,7 +8,8 @@ import pandas as pd
import datetime
from tqdm import tqdm
from src.utils.imbalance_price_calculator import ImbalancePriceCalculator
import time
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
### import functions ###
@@ -16,7 +17,7 @@ from src.trainers.quantile_trainer import auto_regressive as quantile_auto_regre
from src.trainers.diffusion_trainer import sample_diffusion
from src.utils.clearml import ClearMLHelper
# argparse to parse task id and model type
### Arguments ###
parser = argparse.ArgumentParser()
parser.add_argument('--task_id', type=str, default=None)
parser.add_argument('--model_type', type=str, default=None)
@@ -27,6 +28,7 @@ assert args.task_id is not None, "Please specify task id"
assert args.model_type is not None, "Please specify model type"
assert args.model_name is not None, "Please specify model name"
### Baseline Policy ###
battery = Battery(2, 1)
baseline_policy = BaselinePolicy(battery, data_path="")
@@ -163,20 +165,17 @@ def get_next_day_profits_for_date(model, data_processor, test_loader, date, ipc,
return predicted_nrv_profits_cycles, baseline_profits_cycles, _charge_thresholds, _discharge_thresholds
def next_day_test_set(model, data_processor, test_loader, ipc, predict_NRV: callable):
penalties = [0, 10, 50, 150, 300, 500, 600, 800, 1000, 1500, 2000, 2500]
penalties = [0, 50, 250, 500, 1000, 1500]
predicted_nrv_profits_cycles = {i: [0, 0] for i in penalties}
baseline_profits_cycles = {i: [0, 0] for i in penalties}
charge_thresholds = {}
discharge_thresholds = {}
# get all dates in test set
dates = baseline_policy.test_data["DateTime"].dt.date.unique()
# dates back to datetime
dates = pd.to_datetime(dates)
for date in tqdm(dates[:10]):
for date in tqdm(dates):
try:
new_predicted_nrv_profits_cycles, new_baseline_profits_cycles, new_charge_thresholds, new_discharge_thresholds = get_next_day_profits_for_date(model, data_processor, test_loader, date, ipc, predict_NRV, penalties)
@@ -191,8 +190,7 @@ def next_day_test_set(model, data_processor, test_loader, ipc, predict_NRV: call
baseline_profits_cycles[penalty][1] += new_baseline_profits_cycles[penalty][1]
except Exception as e:
# print(f"Error for date {date}")
raise e
print(f"Error for date {date}")
return predicted_nrv_profits_cycles, baseline_profits_cycles, charge_thresholds, discharge_thresholds
@@ -222,9 +220,6 @@ def main():
# the charge_thresholds is a dictionary with date as key. The values of the dictionary is another dictionary with keys as penalties and values as the charge thresholds
# create density plot that shows a density plot of the charge thresholds for each penalty (use seaborn displot) (One plot with a different color for each penalty)
import seaborn as sns
import matplotlib.pyplot as plt
charge_thresholds_for_penalty = {}
for d in charge_thresholds.values():
for penalty, thresholds in d.items():
@@ -239,47 +234,73 @@ def main():
discharge_thresholds_for_penalty[penalty] = []
discharge_thresholds_for_penalty[penalty].extend(thresholds)
def plot_threshold_distribution(thresholds: dict, title: str):
data_to_plot = []
for penalty, values in thresholds.items():
for value in values:
data_to_plot.append({'Penalty': penalty, 'Value': value.item()})
df = pd.DataFrame(data_to_plot)
palette = sns.color_palette("bright", len(thresholds.keys()))
fig = sns.displot(data=df, x="Value", hue="Penalty", kind="kde", palette=palette)
plt.title('Density of Charge Thresholds by Penalty')
plt.xlabel('Charge Threshold')
plt.ylabel('Density')
plt.legend(title='Penalty')
task.get_logger().report_matplotlib_figure(
"Policy Results",
title,
iteration=0,
figure=fig
)
plt.close()
### Plot charge thresholds distribution ###
data_to_plot = []
for penalty, values in charge_thresholds_for_penalty.items():
for value in values:
data_to_plot.append({'Penalty': penalty, 'Value': value.item()})
df = pd.DataFrame(data_to_plot)
print(df.head())
palette = sns.color_palette("bright", len(charge_thresholds.keys()))
fig = sns.displot(data=df, x="Value", hue="Penalty", kind="kde", palette=palette)
plt.title('Density of Charge Thresholds by Penalty')
plt.xlabel('Charge Threshold')
plt.ylabel('Density')
plt.legend(title='Penalty')
task.get_logger().report_matplotlib_figure(
"Policy Results",
"Charge Thresholds",
iteration=0,
figure=fig
)
plt.close()
plot_threshold_distribution(charge_thresholds_for_penalty, "Charge Thresholds")
### Plot discharge thresholds distribution ###
data_to_plot = []
for penalty, values in discharge_thresholds_for_penalty.items():
for value in values:
data_to_plot.append({'Penalty': penalty, 'Value': value.item()})
df = pd.DataFrame(data_to_plot)
palette = sns.color_palette("bright", len(discharge_thresholds.keys()))
fig = sns.displot(data=df, x="Value", hue="Penalty", kind="kde", palette=palette)
plt.title('Density of Charge Thresholds by Penalty')
plt.xlabel('Charge Threshold')
plt.ylabel('Density')
plt.legend(title='Penalty')
task.get_logger().report_matplotlib_figure(
"Policy Results",
"Discharge Thresholds",
iteration=0,
figure=fig
)
plt.close()
plot_threshold_distribution(discharge_thresholds_for_penalty, "Discharge Thresholds")
def plot_thresholds_per_day(thresholds: dict, title: str):
# plot mean charge threshold per day (per penalty (other color))
data_to_plot = []
for date, values in thresholds.items():
for penalty, value in values.items():
mean_val = value.mean().item()
std_val = value.std().item() # Calculate standard deviation
data_to_plot.append({'Date': date, 'Penalty': penalty, 'Mean': mean_val, 'StdDev': std_val})
print(f"Date: {date}, Penalty: {penalty}, Mean: {mean_val}, StdDev: {std_val}")
df = pd.DataFrame(data_to_plot)
df["Date"] = pd.to_datetime(df["Date"])
fig = px.line(
df,
x="Date",
y="Mean",
color="Penalty",
title=title,
labels={"Mean": "Threshold", "Date": "Date"},
markers=True, # Adds markers to the lines
hover_data=["Penalty"], # Adds additional hover information
)
fig.update_layout(
width=1000, # Set the width of the figure
height=600, # Set the height of the figure
title_x=0.5, # Center the title horizontally
)
task.get_logger().report_plotly(
"Thresholds per Day",
title,
iteration=0,
figure=fig
)
### Plot mean charge thresholds per day ###
plot_thresholds_per_day(charge_thresholds, "Mean Charge Thresholds per Day")
### Plot mean discharge thresholds per day ###
plot_thresholds_per_day(discharge_thresholds, "Mean Discharge Thresholds per Day")
# create dataframe with columns "name", "penalty", "profit", "cycles"

View File

@@ -33,67 +33,29 @@ class AutoRegressiveTrainer(Trainer):
self.model.output_size = 1
def debug_plots(self, task, train: bool, data_loader, sample_indices, epoch):
num_samples = len(sample_indices)
rows = num_samples # One row per sample since we only want one column
# check if self has get_plot_error
if hasattr(self, "get_plot_error"):
cols = 2
print("Using get_plot_error")
else:
cols = 1
print("Using get_plot")
fig = make_subplots(
rows=rows,
cols=cols,
subplot_titles=[f"Sample {i+1}" for i in range(num_samples)],
)
for i, idx in enumerate(sample_indices):
auto_regressive_output = self.auto_regressive(data_loader.dataset, [idx])
for actual_idx, idx in sample_indices.items():
auto_regressive_output = self.auto_regressive(data_loader.dataset, [idx]*1000)
if len(auto_regressive_output) == 3:
initial, predictions, target = auto_regressive_output
else:
initial, predictions, _, target = auto_regressive_output
initial, _, predictions, target = auto_regressive_output
initial = initial.squeeze(0)
predictions = predictions.squeeze(0)
target = target.squeeze(0)
# keep one initial
initial = initial[0]
target = target[0]
sub_fig = self.get_plot(initial, target, predictions, show_legend=(i == 0))
predictions = predictions
row = i + 1
col = 1
fig = self.get_plot(initial, target, predictions, show_legend=(0 == 0))
for trace in sub_fig.data:
fig.add_trace(trace, row=row, col=col)
if cols == 2:
error_sub_fig = self.get_plot_error(
target, predictions
)
for trace in error_sub_fig.data:
fig.add_trace(trace, row=row, col=col + 1)
loss = self.criterion(
predictions.to(self.device), target.to(self.device)
).item()
fig["layout"]["annotations"][i].update(
text=f"{self.criterion.__class__.__name__}: {loss:.6f}"
task.get_logger().report_matplotlib_figure(
title="Training" if train else "Testing",
series=f'Sample {actual_idx}',
iteration=epoch,
figure=fig,
)
# y axis same for all plots
# fig.update_yaxes(range=[-1, 1], col=1)
fig.update_layout(height=1000 * rows)
task.get_logger().report_plotly(
title=f"{'Training' if train else 'Test'} Samples",
series="full_day",
iteration=epoch,
figure=fig,
)
def auto_regressive(self, data_loader, idx, sequence_length: int = 96):
self.model.eval()

View File

@@ -1,6 +1,7 @@
from clearml import Task
import torch
import torch.nn as nn
from src.policies.PolicyEvaluator import PolicyEvaluator
from torchinfo import summary
from src.losses.crps_metric import crps_from_samples
from src.data.preprocessing import DataProcessor
@@ -13,14 +14,20 @@ import seaborn as sns
import matplotlib.patches as mpatches
def sample_diffusion(model: DiffusionModel, n: int, inputs: torch.tensor, noise_steps=1000, beta_start=1e-4, beta_end=0.02, ts_length=96):
def sample_diffusion(
model: DiffusionModel,
n: int,
inputs: torch.tensor,
noise_steps=1000,
beta_start=1e-4,
beta_end=0.02,
ts_length=96,
):
device = next(model.parameters()).device
beta = torch.linspace(beta_start, beta_end, noise_steps).to(device)
alpha = 1. - beta
alpha = 1.0 - beta
alpha_hat = torch.cumprod(alpha, dim=0)
# inputs: (num_features) -> (batch_size, num_features)
# inputs: (time_steps, num_features) -> (batch_size, time_steps, num_features)
if len(inputs.shape) == 2:
inputs = inputs.repeat(n, 1)
elif len(inputs.shape) == 3:
@@ -41,53 +48,74 @@ def sample_diffusion(model: DiffusionModel, n: int, inputs: torch.tensor, noise_
else:
noise = torch.zeros_like(x)
x = 1/torch.sqrt(_alpha) * (x-((1-_alpha) / (torch.sqrt(1 - _alpha_hat))) * predicted_noise) + torch.sqrt(_beta) * noise
x = (
1
/ torch.sqrt(_alpha)
* (x - ((1 - _alpha) / (torch.sqrt(1 - _alpha_hat))) * predicted_noise)
+ torch.sqrt(_beta) * noise
)
x = torch.clamp(x, -1.0, 1.0)
return x
class DiffusionTrainer:
def __init__(self, model: nn.Module, data_processor: DataProcessor, device: torch.device):
def __init__(
self,
model: nn.Module,
data_processor: DataProcessor,
device: torch.device,
policy_evaluator: PolicyEvaluator = None,
):
self.model = model
self.device = device
self.noise_steps = 20
self.beta_start = 1e-4
self.noise_steps = 30
self.beta_start = 0.0001
self.beta_end = 0.02
self.ts_length = 96
self.data_processor = data_processor
self.beta = torch.linspace(self.beta_start, self.beta_end, self.noise_steps).to(self.device)
self.alpha = 1. - self.beta
self.beta = torch.linspace(self.beta_start, self.beta_end, self.noise_steps).to(
self.device
)
self.alpha = 1.0 - self.beta
self.alpha_hat = torch.cumprod(self.alpha, dim=0)
self.best_score = None
self.policy_evaluator = policy_evaluator
def noise_time_series(self, x: torch.tensor, t: int):
""" Add noise to time series
"""Add noise to time series
Args:
x (torch.tensor): shape (batch_size, time_steps)
t (int): index of time step
"""
sqrt_alpha_hat = torch.sqrt(self.alpha_hat[t])[:, None]
sqrt_one_minus_alpha_hat = torch.sqrt(1. - self.alpha_hat[t])[:, None]
sqrt_one_minus_alpha_hat = torch.sqrt(1.0 - self.alpha_hat[t])[:, None]
noise = torch.randn_like(x)
return sqrt_alpha_hat * x + sqrt_one_minus_alpha_hat * noise, noise
def sample_timesteps(self, n: int):
""" Sample timesteps for noise
"""Sample timesteps for noise
Args:
n (int): number of samples
"""
return torch.randint(low=1, high=self.noise_steps, size=(n,))
def sample(self, model: DiffusionModel, n: int, inputs: torch.tensor):
x = sample_diffusion(model, n, inputs, self.noise_steps, self.beta_start, self.beta_end, self.ts_length)
x = sample_diffusion(
model,
n,
inputs,
self.noise_steps,
self.beta_start,
self.beta_end,
self.ts_length,
)
model.train()
return x
def random_samples(self, train: bool = True, num_samples: int = 10):
train_loader, test_loader = self.data_processor.get_dataloaders(
predict_sequence_length=96
@@ -98,9 +126,20 @@ class DiffusionTrainer:
else:
loader = test_loader
indices = np.random.randint(0, len(loader.dataset) - 1, size=num_samples)
# set seed
np.random.seed(42)
actual_indices = np.random.choice(
loader.dataset.full_day_valid_indices, num_samples, replace=False
)
indices = {}
for i in actual_indices:
indices[i] = loader.dataset.valid_indices.index(i)
print(actual_indices)
return indices
def init_clearml_task(self, task):
task.add_tags(self.model.__class__.__name__)
task.add_tags(self.__class__.__name__)
@@ -110,13 +149,24 @@ class DiffusionTrainer:
if self.data_processor.lstm:
inputDim = self.data_processor.get_input_size()
other_input_data = torch.randn(1024, inputDim[1], self.model.other_inputs_dim).to(self.device)
other_input_data = torch.randn(
1024, inputDim[1], self.model.other_inputs_dim
).to(self.device)
else:
other_input_data = torch.randn(1024, self.model.other_inputs_dim).to(self.device)
task.set_configuration_object("model", str(summary(self.model, input_data=[input_data, time_steps, other_input_data])))
other_input_data = torch.randn(1024, self.model.other_inputs_dim).to(
self.device
)
task.set_configuration_object(
"model",
str(
summary(
self.model, input_data=[input_data, time_steps, other_input_data]
)
),
)
self.data_processor = task.connect(self.data_processor, name="data_processor")
def train(self, epochs: int, learning_rate: float, task: Task = None):
self.best_score = None
optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
@@ -150,7 +200,7 @@ class DiffusionTrainer:
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss /= len(train_loader.dataset)
if epoch % 40 == 0 and epoch != 0:
@@ -159,21 +209,24 @@ class DiffusionTrainer:
if task:
task.get_logger().report_scalar(
title=criterion.__class__.__name__,
series='train',
series="train",
iteration=epoch,
value=loss.item(),
)
if epoch % 150 == 0 and epoch != 0:
self.debug_plots(task, True, train_loader, train_sample_indices, epoch)
self.debug_plots(task, False, test_loader, test_sample_indices, epoch)
self.debug_plots(
task, True, train_loader, train_sample_indices, epoch
)
self.debug_plots(
task, False, test_loader, test_sample_indices, epoch
)
if task:
task.close()
def debug_plots(self, task, training: bool, data_loader, sample_indices, epoch):
for i, idx in enumerate(sample_indices):
for actual_idx, idx in sample_indices.items():
features, target, _ = data_loader.dataset[idx]
features = features.to(self.device)
@@ -182,61 +235,115 @@ class DiffusionTrainer:
self.model.eval()
with torch.no_grad():
samples = self.sample(self.model, 100, features).cpu().numpy()
ci_99_upper = np.quantile(samples, 0.99, axis=0)
ci_99_lower = np.quantile(samples, 0.01, axis=0)
samples = self.data_processor.inverse_transform(samples)
target = self.data_processor.inverse_transform(target)
ci_95_upper = np.quantile(samples, 0.95, axis=0)
ci_95_lower = np.quantile(samples, 0.05, axis=0)
ci_99_upper = np.quantile(samples, 0.995, axis=0)
ci_99_lower = np.quantile(samples, 0.005, axis=0)
ci_90_upper = np.quantile(samples, 0.9, axis=0)
ci_90_lower = np.quantile(samples, 0.1, axis=0)
ci_95_upper = np.quantile(samples, 0.975, axis=0)
ci_95_lower = np.quantile(samples, 0.025, axis=0)
ci_50_upper = np.quantile(samples, 0.5, axis=0)
ci_50_lower = np.quantile(samples, 0.5, axis=0)
ci_90_upper = np.quantile(samples, 0.95, axis=0)
ci_90_lower = np.quantile(samples, 0.05, axis=0)
ci_50_lower = np.quantile(samples, 0.25, axis=0)
ci_50_upper = np.quantile(samples, 0.75, axis=0)
sns.set_theme()
time_steps = np.arange(0, 96)
fig, ax = plt.subplots(figsize=(20, 10))
ax.plot(time_steps, samples.mean(axis=0), label="Mean of NRV samples", linewidth=3)
ax.plot(
time_steps,
samples.mean(axis=0),
label="Mean of NRV samples",
linewidth=3,
)
# ax.fill_between(time_steps, ci_lower, ci_upper, color='b', alpha=0.2, label='Full Interval')
ax.fill_between(time_steps, ci_99_lower, ci_99_upper, color='b', alpha=0.2, label='99% Interval')
ax.fill_between(time_steps, ci_95_lower, ci_95_upper, color='b', alpha=0.2, label='95% Interval')
ax.fill_between(time_steps, ci_90_lower, ci_90_upper, color='b', alpha=0.2, label='90% Interval')
ax.fill_between(time_steps, ci_50_lower, ci_50_upper, color='b', alpha=0.2, label='50% Interval')
ax.fill_between(
time_steps,
ci_99_lower,
ci_99_upper,
color="b",
alpha=0.2,
label="99% Interval",
)
ax.fill_between(
time_steps,
ci_95_lower,
ci_95_upper,
color="b",
alpha=0.2,
label="95% Interval",
)
ax.fill_between(
time_steps,
ci_90_lower,
ci_90_upper,
color="b",
alpha=0.2,
label="90% Interval",
)
ax.fill_between(
time_steps,
ci_50_lower,
ci_50_upper,
color="b",
alpha=0.2,
label="50% Interval",
)
ax.plot(target, label="Real NRV", linewidth=3)
# full_interval_patch = mpatches.Patch(color='b', alpha=0.2, label='Full Interval')
ci_99_patch = mpatches.Patch(color='b', alpha=0.3, label='99% Interval')
ci_95_patch = mpatches.Patch(color='b', alpha=0.4, label='95% Interval')
ci_90_patch = mpatches.Patch(color='b', alpha=0.5, label='90% Interval')
ci_50_patch = mpatches.Patch(color='b', alpha=0.6, label='50% Interval')
ci_99_patch = mpatches.Patch(color="b", alpha=0.3, label="99% Interval")
ci_95_patch = mpatches.Patch(color="b", alpha=0.4, label="95% Interval")
ci_90_patch = mpatches.Patch(color="b", alpha=0.5, label="90% Interval")
ci_50_patch = mpatches.Patch(color="b", alpha=0.6, label="50% Interval")
ax.legend(handles=[ci_99_patch, ci_95_patch, ci_90_patch, ci_50_patch, ax.lines[0], ax.lines[1]])
ax.legend(
handles=[
ci_99_patch,
ci_95_patch,
ci_90_patch,
ci_50_patch,
ax.lines[0],
ax.lines[1],
]
)
task.get_logger().report_matplotlib_figure(
title="Training" if training else "Testing",
series=f'Sample {i}',
series=f"Sample {actual_idx}",
iteration=epoch,
figure=fig,
)
plt.close()
def test(self, data_loader: torch.utils.data.DataLoader, epoch: int, task: Task = None):
def test(
self, data_loader: torch.utils.data.DataLoader, epoch: int, task: Task = None
):
all_crps = []
for inputs, targets, _ in data_loader:
generated_samples = {}
for inputs, targets, idx_batch in data_loader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
print(inputs.shape, targets.shape)
number_of_samples = 100
sample = self.sample(self.model, number_of_samples, inputs)
# reduce samples from (batch_size*number_of_samples, time_steps) to (batch_size, number_of_samples, time_steps)
samples_batched = sample.reshape(inputs.shape[0], number_of_samples, 96)
# add samples to generated_samples generated_samples[idx.item()] = (initial, samples)
for i, (idx, samples) in enumerate(zip(idx_batch, samples_batched)):
generated_samples[idx.item()] = (
self.data_processor.inverse_transform(inputs[i][:96]),
self.data_processor.inverse_transform(samples),
)
# calculate crps
crps = crps_from_samples(samples_batched, targets)
crps_mean = crps.mean(axis=1)
@@ -252,16 +359,38 @@ class DiffusionTrainer:
if task:
task.get_logger().report_scalar(
title="CRPS",
series='test',
value=mean_crps,
iteration=epoch
title="CRPS", series="test", value=mean_crps, iteration=epoch
)
if self.policy_evaluator:
_, test_loader = self.data_processor.get_dataloaders(
predict_sequence_length=self.ts_length, full_day_skip=True
)
self.policy_evaluator.evaluate_test_set(generated_samples, test_loader)
df = self.policy_evaluator.get_profits_as_scalars()
for idx, row in df.iterrows():
task.get_logger().report_scalar(
title="Profit",
series=f"penalty_{row['Penalty']}",
value=row["Total Profit"],
iteration=epoch,
)
df = self.policy_evaluator.get_profits_till_400()
for idx, row in df.iterrows():
task.get_logger().report_scalar(
title="Profit_till_400",
series=f"penalty_{row['Penalty']}",
value=row["Profit_till_400"],
iteration=epoch,
)
def save_checkpoint(self, val_loss, task, iteration: int):
torch.save(self.model, "checkpoint.pt")
task.update_output_model(
model_path="checkpoint.pt", iteration=iteration, auto_delete_file=False
)
self.best_score = val_loss

View File

@@ -1,5 +1,6 @@
import torch
from tqdm import tqdm
from src.policies.PolicyEvaluator import PolicyEvaluator
from src.losses.crps_metric import crps_from_samples
from src.trainers.trainer import Trainer
from src.trainers.autoregressive_trainer import AutoRegressiveTrainer
@@ -10,6 +11,9 @@ import plotly.graph_objects as go
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.patches as mpatches
def sample_from_dist(quantiles, preds):
@@ -28,10 +32,11 @@ def sample_from_dist(quantiles, preds):
# random probabilities of (1000, 1)
import random
probs = np.array([random.random() for _ in range(1000)])
spline = CubicSpline(quantiles, preds, axis=1)
samples = spline(probs)
# get the diagonal
@@ -39,6 +44,7 @@ def sample_from_dist(quantiles, preds):
return samples
def auto_regressive(dataset, model, quantiles, idx_batch, sequence_length: int = 96):
device = next(model.parameters()).device
prev_features, targets = dataset.get_batch(idx_batch)
@@ -62,7 +68,7 @@ def auto_regressive(dataset, model, quantiles, idx_batch, sequence_length: int =
predictions_full = new_predictions_full.unsqueeze(1)
for i in range(sequence_length - 1):
if len(list(prev_features.shape)) == 2:
if len(list(prev_features.shape)) == 2:
new_features = torch.cat(
(prev_features[:, 1:96], samples), dim=1
) # (batch_size, 96)
@@ -99,9 +105,7 @@ def auto_regressive(dataset, model, quantiles, idx_batch, sequence_length: int =
) # (batch_size, sequence_length)
with torch.no_grad():
new_predictions_full = model(
prev_features
) # (batch_size, quantiles)
new_predictions_full = model(prev_features) # (batch_size, quantiles)
predictions_full = torch.cat(
(predictions_full, new_predictions_full.unsqueeze(1)), dim=1
) # (batch_size, sequence_length, quantiles)
@@ -120,6 +124,7 @@ def auto_regressive(dataset, model, quantiles, idx_batch, sequence_length: int =
target_full.unsqueeze(-1),
)
class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
def __init__(
self,
@@ -129,10 +134,13 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
data_processor: DataProcessor,
quantiles: list,
device: torch.device,
policy_evaluator: PolicyEvaluator = None,
debug: bool = True,
):
self.quantiles = quantiles
self.test_set_samples = {}
self.policy_evaluator = policy_evaluator
criterion = PinballLoss(quantiles=quantiles)
super().__init__(
@@ -147,6 +155,7 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
def calculate_crps_from_samples(self, task, dataloader, epoch: int):
crps_from_samples_metric = []
generated_samples = {}
with torch.no_grad():
total_samples = len(dataloader.dataset) - 96
@@ -155,24 +164,59 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
if len(idx_batch) == 0:
continue
for idx in tqdm(idx_batch):
computed_idx_batch = [idx] * 100
_, _, samples, targets = self.auto_regressive(
initial, _, samples, targets = self.auto_regressive(
dataloader.dataset, idx_batch=computed_idx_batch
)
generated_samples[idx.item()] = (
self.data_processor.inverse_transform(initial),
self.data_processor.inverse_transform(samples),
)
samples = samples.unsqueeze(0)
targets = targets.squeeze(-1)
targets = targets[0].unsqueeze(0)
crps = crps_from_samples(samples, targets)
crps_from_samples_metric.append(crps[0].mean().item())
task.get_logger().report_scalar(
title="CRPS_from_samples", series="test", value=np.mean(crps_from_samples_metric), iteration=epoch
title="CRPS_from_samples",
series="test",
value=np.mean(crps_from_samples_metric),
iteration=epoch,
)
# using the policy evaluator, evaluate the policy with the generated samples
if self.policy_evaluator is not None:
_, test_loader = self.data_processor.get_dataloaders(
predict_sequence_length=self.model.output_size, full_day_skip=True
)
self.policy_evaluator.evaluate_test_set(generated_samples, test_loader)
df = self.policy_evaluator.get_profits_as_scalars()
# for each row, report the profits
for idx, row in df.iterrows():
task.get_logger().report_scalar(
title="Profit",
series=f"penalty_{row['Penalty']}",
value=row["Total Profit"],
iteration=epoch,
)
df = self.policy_evaluator.get_profits_till_400()
for idx, row in df.iterrows():
task.get_logger().report_scalar(
title="Profit_till_400",
series=f"penalty_{row['Penalty']}",
value=row["Profit_till_400"],
iteration=epoch,
)
def log_final_metrics(self, task, dataloader, train: bool = True):
metrics = {metric.__class__.__name__: 0.0 for metric in self.metrics_to_track}
transformed_metrics = {
@@ -192,19 +236,25 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
if train == False:
for idx in tqdm(idx_batch):
computed_idx_batch = [idx] * 100
_, outputs, samples, targets = self.auto_regressive(
computed_idx_batch = [idx] * 250
initial, outputs, samples, targets = self.auto_regressive(
dataloader.dataset, idx_batch=computed_idx_batch
)
# save the samples for the idx, these will be used for evaluating the policy
self.test_set_samples[idx.item()] = (
self.data_processor.inverse_transform(initial),
self.data_processor.inverse_transform(samples),
)
samples = samples.unsqueeze(0)
targets = targets.squeeze(-1)
targets = targets[0].unsqueeze(0)
crps = crps_from_samples(samples, targets)
crps_from_samples_metric.append(crps[0].mean().item())
_, outputs, samples, targets = self.auto_regressive(
dataloader.dataset, idx_batch=idx_batch
)
@@ -258,39 +308,39 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
if train == False:
task.get_logger().report_single_value(
name="test_CRPS_from_samples_transformed", value=np.mean(crps_from_samples_metric)
name="test_CRPS_from_samples_transformed",
value=np.mean(crps_from_samples_metric),
)
def get_plot_error(
self,
next_day,
predictions,
):
metric = PinballLoss(quantiles=self.quantiles)
fig = go.Figure()
# def get_plot_error(
# self,
# next_day,
# predictions,
# ):
# metric = PinballLoss(quantiles=self.quantiles)
# fig = go.Figure()
next_day_np = next_day.view(-1).cpu().numpy()
predictions_np = predictions.cpu().numpy()
# next_day_np = next_day.view(-1).cpu().numpy()
# predictions_np = predictions.cpu().numpy()
if True:
next_day_np = self.data_processor.inverse_transform(next_day_np)
predictions_np = self.data_processor.inverse_transform(predictions_np)
# if True:
# next_day_np = self.data_processor.inverse_transform(next_day_np)
# predictions_np = self.data_processor.inverse_transform(predictions_np)
# for each time step, calculate the error using the metric
errors = []
for i in range(96):
# # for each time step, calculate the error using the metric
# errors = []
# for i in range(96):
target_tensor = torch.tensor(next_day_np[i]).unsqueeze(0)
prediction_tensor = torch.tensor(predictions_np[i]).unsqueeze(0)
# target_tensor = torch.tensor(next_day_np[i]).unsqueeze(0)
# prediction_tensor = torch.tensor(predictions_np[i]).unsqueeze(0)
errors.append(metric(prediction_tensor, target_tensor))
# errors.append(metric(prediction_tensor, target_tensor))
# plot the error
fig.add_trace(go.Scatter(x=np.arange(96), y=errors, name=metric.__class__.__name__))
fig.update_layout(title=f"Error of {metric.__class__.__name__} for each time step")
return fig
# # plot the error
# fig.add_trace(go.Scatter(x=np.arange(96), y=errors, name=metric.__class__.__name__))
# fig.update_layout(title=f"Error of {metric.__class__.__name__} for each time step")
# return fig
def get_plot(
self,
@@ -312,33 +362,114 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
next_day_np = self.data_processor.inverse_transform(next_day_np)
predictions_np = self.data_processor.inverse_transform(predictions_np)
ci_99_upper = np.quantile(predictions_np, 0.995, axis=0)
ci_99_lower = np.quantile(predictions_np, 0.005, axis=0)
ci_95_upper = np.quantile(predictions_np, 0.975, axis=0)
ci_95_lower = np.quantile(predictions_np, 0.025, axis=0)
ci_90_upper = np.quantile(predictions_np, 0.95, axis=0)
ci_90_lower = np.quantile(predictions_np, 0.05, axis=0)
ci_50_lower = np.quantile(predictions_np, 0.25, axis=0)
ci_50_upper = np.quantile(predictions_np, 0.75, axis=0)
# Add traces for current and next day
fig.add_trace(go.Scatter(x=np.arange(96), y=current_day_np, name="Current Day"))
fig.add_trace(go.Scatter(x=96 + np.arange(96), y=next_day_np, name="Next Day"))
# fig.add_trace(go.Scatter(x=np.arange(96), y=current_day_np, name="Current Day"))
# fig.add_trace(go.Scatter(x=96 + np.arange(96), y=next_day_np, name="Next Day"))
for i, q in enumerate(self.quantiles):
fig.add_trace(
go.Scatter(
x=96 + np.arange(96),
y=predictions_np[:, i],
name=f"Prediction (Q={q})",
line=dict(dash="dash"),
)
)
# for i, q in enumerate(self.quantiles):
# fig.add_trace(
# go.Scatter(
# x=96 + np.arange(96),
# y=predictions_np[:, i],
# name=f"Prediction (Q={q})",
# line=dict(dash="dash"),
# )
# )
# Update the layout
fig.update_layout(
title="Predictions and Quantiles of the Linear Model",
showlegend=show_legend,
# # Update the layout
# fig.update_layout(
# title="Predictions and Quantiles of the Linear Model",
# showlegend=show_legend,
# )
sns.set_theme()
time_steps = np.arange(0, 96)
fig, ax = plt.subplots(figsize=(20, 10))
ax.plot(
time_steps,
predictions_np.mean(axis=0),
label="Mean of NRV samples",
linewidth=3,
)
# ax.fill_between(time_steps, ci_lower, ci_upper, color='b', alpha=0.2, label='Full Interval')
ax.fill_between(
time_steps,
ci_99_lower,
ci_99_upper,
color="b",
alpha=0.2,
label="99% Interval",
)
ax.fill_between(
time_steps,
ci_95_lower,
ci_95_upper,
color="b",
alpha=0.2,
label="95% Interval",
)
ax.fill_between(
time_steps,
ci_90_lower,
ci_90_upper,
color="b",
alpha=0.2,
label="90% Interval",
)
ax.fill_between(
time_steps,
ci_50_lower,
ci_50_upper,
color="b",
alpha=0.2,
label="50% Interval",
)
ax.plot(next_day_np, label="Real NRV", linewidth=3)
# full_interval_patch = mpatches.Patch(color='b', alpha=0.2, label='Full Interval')
ci_99_patch = mpatches.Patch(color="b", alpha=0.3, label="99% Interval")
ci_95_patch = mpatches.Patch(color="b", alpha=0.4, label="95% Interval")
ci_90_patch = mpatches.Patch(color="b", alpha=0.5, label="90% Interval")
ci_50_patch = mpatches.Patch(color="b", alpha=0.6, label="50% Interval")
ax.legend(
handles=[
ci_99_patch,
ci_95_patch,
ci_90_patch,
ci_50_patch,
ax.lines[0],
ax.lines[1],
]
)
return fig
def auto_regressive(self, dataset, idx_batch, sequence_length: int = 96):
return auto_regressive(dataset, self.model, self.quantiles, idx_batch, sequence_length)
return auto_regressive(
dataset, self.model, self.quantiles, idx_batch, sequence_length
)
def plot_quantile_percentages(
self, task, data_loader, train: bool = True, iteration: int = None, full_day: bool = False
self,
task,
data_loader,
train: bool = True,
iteration: int = None,
full_day: bool = False,
):
quantiles = self.quantiles
total = 0
@@ -368,20 +499,18 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
else:
inputs = inputs.to(self.device)
outputs = self.model(inputs).cpu().numpy() # (batch_size, num_quantiles)
targets = targets.squeeze(-1).cpu().numpy() # (batch_size, 1)
outputs = (
self.model(inputs).cpu().numpy()
) # (batch_size, num_quantiles)
targets = targets.squeeze(-1).cpu().numpy() # (batch_size, 1)
for i, q in enumerate(quantiles):
quantile_counter[q] += np.sum(
targets < outputs[:, i]
)
quantile_counter[q] += np.sum(targets < outputs[:, i])
total += len(targets)
# to numpy array of length len(quantiles)
percentages = np.array(
[quantile_counter[q] / total for q in quantiles]
)
percentages = np.array([quantile_counter[q] / total for q in quantiles])
bar_width = 0.35
index = np.arange(len(quantiles))
@@ -389,9 +518,7 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
# Plotting the bars
fig, ax = plt.subplots(figsize=(15, 10))
bar1 = ax.bar(
index, quantiles, bar_width, label="Ideal", color="brown"
)
bar1 = ax.bar(index, quantiles, bar_width, label="Ideal", color="brown")
bar2 = ax.bar(
index + bar_width, percentages, bar_width, label="NN model", color="blue"
)
@@ -441,7 +568,6 @@ class NonAutoRegressiveQuantileRegression(Trainer):
):
self.quantiles = quantiles
criterion = NonAutoRegressivePinballLoss(quantiles=quantiles)
super().__init__(
model=model,

View File

@@ -86,7 +86,7 @@ class Trainer:
def random_samples(self, train: bool = True, num_samples: int = 10):
train_loader, test_loader = self.data_processor.get_dataloaders(
predict_sequence_length=self.model.output_size
predict_sequence_length=96
)
if train:
@@ -94,7 +94,14 @@ class Trainer:
else:
loader = test_loader
indices = np.random.randint(0, len(loader.dataset) - 1, size=num_samples)
np.random.seed(42)
actual_indices = np.random.choice(loader.dataset.full_day_valid_indices, num_samples, replace=False)
indices = {}
for i in actual_indices:
indices[i] = loader.dataset.valid_indices.index(i)
print(actual_indices)
return indices
def train(self, epochs: int, remotely: bool = False, task: Task = None):
@@ -107,8 +114,8 @@ class Trainer:
predict_sequence_length=self.model.output_size
)
train_samples = self.random_samples(train=True)
test_samples = self.random_samples(train=False)
train_samples = self.random_samples(train=True, num_samples=5)
test_samples = self.random_samples(train=False, num_samples=5)
self.init_clearml_task(task)
@@ -189,7 +196,7 @@ class Trainer:
if task:
self.finish_training(task=task)
task.close()
# task.close()
except Exception:
if task:
task.close()

View File

@@ -1,3 +1,14 @@
from src.utils.clearml import ClearMLHelper
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(
task_name="Autoregressive Quantile Regression: Non Linear"
)
task.execute_remotely(queue_name="default", exit_process=True)
from src.policies.PolicyEvaluator import PolicyEvaluator
from src.policies.simple_baseline import BaselinePolicy, Battery
from src.models.lstm_model import GRUModel
from src.data import DataProcessor, DataConfig
from src.trainers.quantile_trainer import AutoRegressiveQuantileTrainer
@@ -11,11 +22,6 @@ import torch.nn as nn
from src.models.time_embedding_layer import TimeEmbedding
#### ClearML ####
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(task_name="Autoregressive Quantile Regression: Linear + Quarter + DoW + Load + Wind + Net")
#### Data Processor ####
data_config = DataConfig()
@@ -32,7 +38,6 @@ data_config.DAY_OF_WEEK = True
data_config.NOMINAL_NET_POSITION = True
data_config = task.connect(data_config, name="data_features")
data_processor = DataProcessor(data_config, path="", lstm=False)
@@ -58,22 +63,35 @@ else:
model_parameters = {
"learning_rate": 0.0001,
"hidden_size": 512,
"num_layers": 2,
"hidden_size": 256,
"num_layers": 4,
"dropout": 0.2,
"time_feature_embedding": 4,
"time_feature_embedding": 16,
}
model_parameters = task.connect(model_parameters, name="model_parameters")
time_embedding = TimeEmbedding(data_processor.get_time_feature_size(), model_parameters["time_feature_embedding"])
time_embedding = TimeEmbedding(
data_processor.get_time_feature_size(), model_parameters["time_feature_embedding"]
)
# lstm_model = GRUModel(time_embedding.output_dim(inputDim), len(quantiles), hidden_size=model_parameters["hidden_size"], num_layers=model_parameters["num_layers"], dropout=model_parameters["dropout"])
# non_linear_model = NonLinearRegression(time_embedding.output_dim(inputDim), len(quantiles), hiddenSize=model_parameters["hidden_size"], numLayers=model_parameters["num_layers"], dropout=model_parameters["dropout"])
linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
non_linear_model = NonLinearRegression(
time_embedding.output_dim(inputDim),
len(quantiles),
hiddenSize=model_parameters["hidden_size"],
numLayers=model_parameters["num_layers"],
dropout=model_parameters["dropout"],
)
# linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
model = nn.Sequential(time_embedding, linear_model)
model = nn.Sequential(time_embedding, non_linear_model)
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
### Policy Evaluator ###
battery = Battery(2, 1)
baseline_policy = BaselinePolicy(battery, data_path="")
policy_evaluator = PolicyEvaluator(baseline_policy, task)
#### Trainer ####
trainer = AutoRegressiveQuantileTrainer(
model,
@@ -82,6 +100,7 @@ trainer = AutoRegressiveQuantileTrainer(
data_processor,
quantiles,
"cuda",
policy_evaluator=policy_evaluator,
debug=False,
)
@@ -91,3 +110,15 @@ trainer.add_metrics_to_track(
trainer.early_stopping(patience=30)
trainer.plot_every(5)
trainer.train(task=task, epochs=epochs, remotely=True)
### Policy Evaluation ###
idx_samples = trainer.test_set_samples
_, test_loader = trainer.data_processor.get_dataloaders(
predict_sequence_length=trainer.model.output_size, full_day_skip=True
)
policy_evaluator.evaluate_test_set(idx_samples, test_loader)
policy_evaluator.plot_profits_table()
policy_evaluator.plot_thresholds_per_day()
task.close()

View File

@@ -1,25 +1,17 @@
from clearml import Task
from src.data import DataProcessor, DataConfig
from src.trainers.trainer import Trainer
from src.utils.clearml import ClearMLHelper
from src.models import *
from src.losses import *
import torch
import numpy as np
from torch.nn import MSELoss, L1Loss
from datetime import datetime
import torch.nn as nn
from src.models.time_embedding_layer import TimeEmbedding
from src.models.diffusion_model import GRUDiffusionModel, SimpleDiffusionModel
from src.trainers.diffusion_trainer import DiffusionTrainer
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
task = clearml_helper.get_task(task_name="Diffusion Training")
# execute remotely
task.execute_remotely(queue_name="default", exit_process=True)
print("Running remotely")
from src.models import *
from src.losses import *
from src.models.time_embedding_layer import TimeEmbedding
from src.models.diffusion_model import GRUDiffusionModel, SimpleDiffusionModel
from src.trainers.diffusion_trainer import DiffusionTrainer
from src.data import DataProcessor, DataConfig
from src.policies.simple_baseline import BaselinePolicy, Battery
from src.policies.PolicyEvaluator import PolicyEvaluator
#### Data Processor ####
data_config = DataConfig()
@@ -38,7 +30,7 @@ data_config.NOMINAL_NET_POSITION = True
data_config = task.connect(data_config, name="data_features")
data_processor = DataProcessor(data_config, path="", lstm=False)
data_processor.set_batch_size(128)
data_processor.set_batch_size(64)
data_processor.set_full_day_skip(True)
inputDim = data_processor.get_input_size()
@@ -47,18 +39,28 @@ print("Input dim: ", inputDim)
model_parameters = {
"epochs": 5000,
"learning_rate": 0.0001,
"hidden_sizes": [512, 512, 512],
"time_dim": 64,
"hidden_sizes": [128, 128],
"time_dim": 8,
}
model_parameters = task.connect(model_parameters, name="model_parameters")
#### Model ####
# model = SimpleDiffusionModel(96, model_parameters["hidden_sizes"], other_inputs_dim=inputDim[1], time_dim=model_parameters["time_dim"])
model = GRUDiffusionModel(96, model_parameters["hidden_sizes"], other_inputs_dim=inputDim[2], time_dim=model_parameters["time_dim"], gru_hidden_size=256)
model = SimpleDiffusionModel(
96,
model_parameters["hidden_sizes"],
other_inputs_dim=inputDim[1],
time_dim=model_parameters["time_dim"],
)
# model = GRUDiffusionModel(96, model_parameters["hidden_sizes"], other_inputs_dim=inputDim[2], time_dim=model_parameters["time_dim"], gru_hidden_size=128)
print("Starting training ...")
### Policy Evaluator ###
battery = Battery(2, 1)
baseline_policy = BaselinePolicy(battery, data_path="")
policy_evaluator = PolicyEvaluator(baseline_policy, task)
#### Trainer ####
trainer = DiffusionTrainer(model, data_processor, "cuda")
trainer.train(model_parameters["epochs"], model_parameters["learning_rate"], task)
trainer = DiffusionTrainer(
model, data_processor, "cuda", policy_evaluator=policy_evaluator
)
trainer.train(model_parameters["epochs"], model_parameters["learning_rate"], task)