Lot of changes

This commit is contained in:
Victor Mylle
2023-11-23 08:34:47 +00:00
parent 166d3967e1
commit 5de3f64a1a
9 changed files with 761 additions and 196234 deletions

View File

@@ -7,4 +7,5 @@ seaborn
statsmodels
lightgbm
prettytable
clearml
clearml
properscoring

View File

@@ -2,8 +2,16 @@ import torch
from torch.utils.data import Dataset, DataLoader
import pandas as pd
class NrvDataset(Dataset):
def __init__(self, dataframe, data_config, full_day_skip: bool = False, sequence_length=96, predict_sequence_length=96):
def __init__(
self,
dataframe,
data_config,
full_day_skip: bool = False,
sequence_length=96,
predict_sequence_length=96,
):
self.data_config = data_config
self.dataframe = dataframe
self.full_day_skip = full_day_skip
@@ -11,26 +19,40 @@ class NrvDataset(Dataset):
# reset dataframe index
self.dataframe.reset_index(drop=True, inplace=True)
self.nrv = torch.tensor(dataframe['nrv'].to_numpy(), dtype=torch.float32)
self.load_forecast = torch.tensor(dataframe['load_forecast'].to_numpy(), dtype=torch.float32)
self.total_load = torch.tensor(dataframe['total_load'].to_numpy(), dtype=torch.float32)
self.pv_gen_forecast = torch.tensor(dataframe['pv_forecast'].to_numpy(), dtype=torch.float32)
self.wind_gen_forecast = torch.tensor(dataframe['wind_forecast'].to_numpy(), dtype=torch.float32)
self.nrv = torch.tensor(dataframe["nrv"].to_numpy(), dtype=torch.float32)
self.load_forecast = torch.tensor(
dataframe["load_forecast"].to_numpy(), dtype=torch.float32
)
self.total_load = torch.tensor(
dataframe["total_load"].to_numpy(), dtype=torch.float32
)
self.pv_gen_forecast = torch.tensor(
dataframe["pv_forecast"].to_numpy(), dtype=torch.float32
)
self.wind_gen_forecast = torch.tensor(
dataframe["wind_forecast"].to_numpy(), dtype=torch.float32
)
self.sequence_length = sequence_length
self.predict_sequence_length = predict_sequence_length
self.samples_to_skip = self.skip_samples()
total_indices = set(range(len(self.nrv) - self.sequence_length - self.predict_sequence_length))
total_indices = set(
range(len(self.nrv) - self.sequence_length - self.predict_sequence_length)
)
self.valid_indices = sorted(list(total_indices - set(self.samples_to_skip)))
### TODO: Option to only use full day samples ###
### skip all samples between is the easiest way I think (not most efficient though) ###
def skip_samples(self):
nan_rows = self.dataframe[self.dataframe.isnull().any(axis=1)]
nan_indices = nan_rows.index
skip_indices = [list(range(idx-self.sequence_length-self.predict_sequence_length, idx+1)) for idx in nan_indices]
skip_indices = [
list(
range(
idx - self.sequence_length - self.predict_sequence_length, idx + 1
)
)
for idx in nan_indices
]
skip_indices = [item for sublist in skip_indices for item in sublist]
skip_indices = list(set(skip_indices))
@@ -39,7 +61,9 @@ class NrvDataset(Dataset):
# add indices that are not the start of a day (00:15) to the skip indices (use datetime column)
# get indices of all 00:15 timestamps
if self.full_day_skip:
start_of_day_indices = self.dataframe[self.dataframe['datetime'].dt.time == pd.Timestamp('00:15:00').time()].index
start_of_day_indices = self.dataframe[
self.dataframe["datetime"].dt.time == pd.Timestamp("00:15:00").time()
].index
skip_indices.extend(start_of_day_indices)
skip_indices = list(set(skip_indices))
@@ -47,47 +71,75 @@ class NrvDataset(Dataset):
def __len__(self):
return len(self.valid_indices)
def __getitem__(self, idx):
actual_idx = self.valid_indices[idx]
features = []
if self.data_config.NRV_HISTORY:
nrv = self.nrv[actual_idx:actual_idx+self.sequence_length]
nrv = self.nrv[actual_idx : actual_idx + self.sequence_length]
features.append(nrv.view(-1))
if self.data_config.LOAD_HISTORY:
load_history = self.total_load[actual_idx:actual_idx+self.sequence_length]
load_history = self.total_load[
actual_idx : actual_idx + self.sequence_length
]
features.append(load_history.view(-1))
if self.data_config.PV_HISTORY:
pv_history = self.pv_gen_forecast[actual_idx:actual_idx+self.sequence_length]
pv_history = self.pv_gen_forecast[
actual_idx : actual_idx + self.sequence_length
]
features.append(pv_history.view(-1))
if self.data_config.WIND_HISTORY:
wind_history = self.wind_gen_forecast[actual_idx:actual_idx+self.sequence_length]
wind_history = self.wind_gen_forecast[
actual_idx : actual_idx + self.sequence_length
]
features.append(wind_history.view(-1))
if self.data_config.LOAD_FORECAST:
load_forecast = self.load_forecast[actual_idx+self.sequence_length:actual_idx+self.sequence_length+self.predict_sequence_length]
load_forecast = self.load_forecast[
actual_idx
+ self.sequence_length : actual_idx
+ self.sequence_length
+ self.predict_sequence_length
]
features.append(load_forecast.view(-1))
if self.data_config.PV_FORECAST:
pv_forecast = self.pv_gen_forecast[actual_idx+self.sequence_length:actual_idx+self.sequence_length+self.predict_sequence_length]
pv_forecast = self.pv_gen_forecast[
actual_idx
+ self.sequence_length : actual_idx
+ self.sequence_length
+ self.predict_sequence_length
]
features.append(pv_forecast.view(-1))
if self.data_config.WIND_FORECAST:
wind_forecast = self.wind_gen_forecast[actual_idx+self.sequence_length:actual_idx+self.sequence_length+self.predict_sequence_length]
wind_forecast = self.wind_gen_forecast[
actual_idx
+ self.sequence_length : actual_idx
+ self.sequence_length
+ self.predict_sequence_length
]
features.append(wind_forecast.view(-1))
if not features:
raise ValueError("No features are configured to be included in the dataset.")
raise ValueError(
"No features are configured to be included in the dataset."
)
# Concatenate along dimension 0 to create a one-dimensional feature vector
all_features = torch.cat(features, dim=0)
# Target sequence, flattened if necessary
nrv_target = self.nrv[actual_idx+self.sequence_length:actual_idx+self.sequence_length+self.predict_sequence_length].view(-1)
nrv_target = self.nrv[
actual_idx
+ self.sequence_length : actual_idx
+ self.sequence_length
+ self.predict_sequence_length
].view(-1)
# check if nan values are present
if torch.isnan(all_features).any():
@@ -103,35 +155,53 @@ class NrvDataset(Dataset):
# we already have the NRV history with the newly predicted values, so we don't need to include the last 96 values
if self.data_config.LOAD_HISTORY:
load_history = self.total_load[idx:idx+self.sequence_length]
load_history = self.total_load[idx : idx + self.sequence_length]
features.append(load_history.view(-1))
if self.data_config.PV_HISTORY:
pv_history = self.pv_gen_forecast[idx:idx+self.sequence_length]
pv_history = self.pv_gen_forecast[idx : idx + self.sequence_length]
features.append(pv_history.view(-1))
if self.data_config.WIND_HISTORY:
wind_history = self.wind_gen_forecast[idx:idx+self.sequence_length]
wind_history = self.wind_gen_forecast[idx : idx + self.sequence_length]
features.append(wind_history.view(-1))
if self.data_config.LOAD_FORECAST:
load_forecast = self.load_forecast[idx+self.sequence_length:idx+self.sequence_length+self.predict_sequence_length]
load_forecast = self.load_forecast[
idx
+ self.sequence_length : idx
+ self.sequence_length
+ self.predict_sequence_length
]
features.append(load_forecast.view(-1))
if self.data_config.PV_FORECAST:
pv_forecast = self.pv_gen_forecast[idx+self.sequence_length:idx+self.sequence_length+self.predict_sequence_length]
pv_forecast = self.pv_gen_forecast[
idx
+ self.sequence_length : idx
+ self.sequence_length
+ self.predict_sequence_length
]
features.append(pv_forecast.view(-1))
if self.data_config.WIND_FORECAST:
wind_forecast = self.wind_gen_forecast[idx+self.sequence_length:idx+self.sequence_length+self.predict_sequence_length]
wind_forecast = self.wind_gen_forecast[
idx
+ self.sequence_length : idx
+ self.sequence_length
+ self.predict_sequence_length
]
features.append(wind_forecast.view(-1))
target = self.nrv[idx+self.sequence_length:idx+self.sequence_length+self.predict_sequence_length]
target = self.nrv[
idx
+ self.sequence_length : idx
+ self.sequence_length
+ self.predict_sequence_length
]
if len(features) == 0:
return None, target
all_features = torch.cat(features, dim=0)
return all_features, target
return all_features, target

View File

@@ -12,6 +12,7 @@ forecast_data_path = "../../data/load_forecast.csv"
pv_forecast_data_path = "../../data/pv_gen_forecast.csv"
wind_forecast_data_path = "../../data/wind_gen_forecast.csv"
class DataConfig:
def __init__(self):
self.NRV_HISTORY: bool = True
@@ -28,11 +29,20 @@ class DataConfig:
self.WIND_FORECAST: bool = False
self.WIND_HISTORY: bool = False
### TIME ###
self.YEAR: bool = False
self.DAY: bool = False
self.QUARTER: bool = False
class DataProcessor:
def __init__(self, data_config: DataConfig):
self.batch_size = 2048
self.train_range = (-np.inf, datetime(year=2022, month=11, day=30, tzinfo=pytz.UTC))
self.train_range = (
-np.inf,
datetime(year=2022, month=11, day=30, tzinfo=pytz.UTC),
)
self.test_range = (datetime(year=2023, month=1, day=1, tzinfo=pytz.UTC), np.inf)
self.update_range_str()
@@ -42,9 +52,17 @@ class DataProcessor:
self.pv_forecast = self.get_pv_forecast()
self.wind_forecast = self.get_wind_forecast()
self.all_features = self.history_features.merge(self.future_features, on='datetime', how='left')
self.all_features = self.all_features.merge(self.pv_forecast, on='datetime', how='left')
self.all_features = self.all_features.merge(self.wind_forecast, on='datetime', how='left')
self.all_features = self.history_features.merge(
self.future_features, on="datetime", how="left"
)
self.all_features = self.all_features.merge(
self.pv_forecast, on="datetime", how="left"
)
self.all_features = self.all_features.merge(
self.wind_forecast, on="datetime", how="left"
)
self.output_size = 96
self.data_config = data_config
@@ -59,6 +77,9 @@ class DataProcessor:
def set_full_day_skip(self, full_day_skip: bool):
self.full_day_skip = full_day_skip
def set_output_size(self, output_size: int):
self.output_size = output_size
def set_train_range(self, train_range: tuple):
self.train_range = train_range
self.update_range_str()
@@ -68,106 +89,178 @@ class DataProcessor:
self.update_range_str()
def update_range_str(self):
self.train_range_start = str(self.train_range[0]) if self.train_range[0] != -np.inf else "-inf"
self.train_range_end = str(self.train_range[1]) if self.train_range[1] != np.inf else "inf"
self.test_range_start = str(self.test_range[0]) if self.test_range[0] != -np.inf else "-inf"
self.test_range_end = str(self.test_range[1]) if self.test_range[1] != np.inf else "inf"
self.train_range_start = (
str(self.train_range[0]) if self.train_range[0] != -np.inf else "-inf"
)
self.train_range_end = (
str(self.train_range[1]) if self.train_range[1] != np.inf else "inf"
)
self.test_range_start = (
str(self.test_range[0]) if self.test_range[0] != -np.inf else "-inf"
)
self.test_range_end = (
str(self.test_range[1]) if self.test_range[1] != np.inf else "inf"
)
def get_nrv_history(self):
df = pd.read_csv(history_data_path, delimiter=';')
df = df[['datetime', 'netregulationvolume']]
df = df.rename(columns={'netregulationvolume': 'nrv'})
df['datetime'] = pd.to_datetime(df['datetime'])
counts = df['datetime'].dt.date.value_counts().sort_index()
df = df[df['datetime'].dt.date.isin(counts[counts == 96].index)]
df = pd.read_csv(history_data_path, delimiter=";")
df = df[["datetime", "netregulationvolume"]]
df = df.rename(columns={"netregulationvolume": "nrv"})
df["datetime"] = pd.to_datetime(df["datetime"])
counts = df["datetime"].dt.date.value_counts().sort_index()
df = df[df["datetime"].dt.date.isin(counts[counts == 96].index)]
df.sort_values(by="datetime", inplace=True)
return df
def get_load_forecast(self):
df = pd.read_csv(forecast_data_path, delimiter=';')
df = df.rename(columns={'Day-ahead 6PM forecast': 'load_forecast', 'Datetime': 'datetime', 'Total Load': 'total_load'})
df = df[['datetime', 'load_forecast', 'total_load']]
df['datetime'] = pd.to_datetime(df['datetime'], utc=True)
df = pd.read_csv(forecast_data_path, delimiter=";")
df = df.rename(
columns={
"Day-ahead 6PM forecast": "load_forecast",
"Datetime": "datetime",
"Total Load": "total_load",
}
)
df = df[["datetime", "load_forecast", "total_load"]]
df["datetime"] = pd.to_datetime(df["datetime"], utc=True)
df.sort_values(by="datetime", inplace=True)
return df
def get_pv_forecast(self):
df = pd.read_csv(pv_forecast_data_path, delimiter=';')
df = pd.read_csv(pv_forecast_data_path, delimiter=";")
df = df.rename(columns={'dayahead11hforecast': 'pv_forecast', 'Datetime': 'datetime'})
df = df[['datetime', 'pv_forecast']]
df = df.rename(
columns={"dayahead11hforecast": "pv_forecast", "Datetime": "datetime"}
)
df = df[["datetime", "pv_forecast"]]
df = df.groupby('datetime').mean().reset_index()
df['datetime'] = pd.to_datetime(df['datetime'], utc=True)
df = df.groupby("datetime").mean().reset_index()
df["datetime"] = pd.to_datetime(df["datetime"], utc=True)
df.sort_values(by="datetime", inplace=True)
return df
def get_wind_forecast(self):
df = pd.read_csv(wind_forecast_data_path, delimiter=';')
df = pd.read_csv(wind_forecast_data_path, delimiter=";")
df = df.rename(columns={'dayaheadforecast': 'wind_forecast', 'datetime': 'datetime'})
df = df[['datetime', 'wind_forecast']]
df = df.rename(
columns={"dayaheadforecast": "wind_forecast", "datetime": "datetime"}
)
df = df[["datetime", "wind_forecast"]]
# remove nan rows
df = df[~df['wind_forecast'].isnull()]
df = df[~df["wind_forecast"].isnull()]
df = df.groupby('datetime').mean().reset_index()
df['datetime'] = pd.to_datetime(df['datetime'], utc=True)
df = df.groupby("datetime").mean().reset_index()
df["datetime"] = pd.to_datetime(df["datetime"], utc=True)
df.sort_values(by="datetime", inplace=True)
return df
def set_batch_size(self, batch_size: int):
self.batch_size = batch_size
def get_dataloader(self, dataset, shuffle: bool = True):
batch_size = len(dataset) if self.batch_size is None else self.batch_size
return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=4)
return torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle, num_workers=4
)
def get_train_dataloader(self, transform: bool = True, predict_sequence_length: int = 96):
def get_train_dataloader(
self, transform: bool = True, predict_sequence_length: int = 96
):
train_df = self.all_features.copy()
if self.train_range[0] != -np.inf:
train_df = train_df[(train_df['datetime'] >= self.train_range[0])]
if self.train_range[1] != np.inf:
train_df = train_df[(train_df['datetime'] <= self.train_range[1])]
train_df = train_df[(train_df["datetime"] >= self.train_range[0])]
if self.train_range[1] != np.inf:
train_df = train_df[(train_df["datetime"] <= self.train_range[1])]
if transform:
train_df['nrv'] = self.nrv_scaler.fit_transform(train_df['nrv'].values.reshape(-1, 1)).reshape(-1)
train_df['load_forecast'] = self.load_forecast_scaler.fit_transform(train_df['load_forecast'].values.reshape(-1, 1)).reshape(-1)
train_df['total_load'] = self.load_forecast_scaler.transform(train_df['total_load'].values.reshape(-1, 1)).reshape(-1)
train_dataset = NrvDataset(train_df, data_config=self.data_config, full_day_skip=self.full_day_skip, predict_sequence_length=predict_sequence_length)
train_df["nrv"] = self.nrv_scaler.fit_transform(
train_df["nrv"].values.reshape(-1, 1)
).reshape(-1)
train_df["load_forecast"] = self.load_forecast_scaler.fit_transform(
train_df["load_forecast"].values.reshape(-1, 1)
).reshape(-1)
train_df["total_load"] = self.load_forecast_scaler.transform(
train_df["total_load"].values.reshape(-1, 1)
).reshape(-1)
train_dataset = NrvDataset(
train_df,
data_config=self.data_config,
full_day_skip=self.full_day_skip,
predict_sequence_length=predict_sequence_length,
)
return self.get_dataloader(train_dataset)
def get_test_dataloader(self, transform: bool = True, predict_sequence_length: int = 96):
def get_test_dataloader(
self, transform: bool = True, predict_sequence_length: int = 96
):
test_df = self.all_features.copy()
if self.test_range[0] != -np.inf:
test_df = test_df[(test_df['datetime'] >= self.test_range[0])]
if self.test_range[1] != np.inf:
test_df = test_df[(test_df['datetime'] <= self.test_range[1])]
test_df = test_df[(test_df["datetime"] >= self.test_range[0])]
if self.test_range[1] != np.inf:
test_df = test_df[(test_df["datetime"] <= self.test_range[1])]
if transform:
test_df['nrv'] = self.nrv_scaler.transform(test_df['nrv'].values.reshape(-1, 1)).reshape(-1)
test_df['load_forecast'] = self.load_forecast_scaler.transform(test_df['load_forecast'].values.reshape(-1, 1)).reshape(-1)
test_df['total_load'] = self.load_forecast_scaler.transform(test_df['total_load'].values.reshape(-1, 1)).reshape(-1)
test_df["nrv"] = self.nrv_scaler.transform(
test_df["nrv"].values.reshape(-1, 1)
).reshape(-1)
test_df["load_forecast"] = self.load_forecast_scaler.transform(
test_df["load_forecast"].values.reshape(-1, 1)
).reshape(-1)
test_df["total_load"] = self.load_forecast_scaler.transform(
test_df["total_load"].values.reshape(-1, 1)
).reshape(-1)
test_dataset = NrvDataset(test_df, data_config=self.data_config, full_day_skip=self.full_day_skip, predict_sequence_length=predict_sequence_length)
test_dataset = NrvDataset(
test_df,
data_config=self.data_config,
full_day_skip=self.full_day_skip,
predict_sequence_length=predict_sequence_length,
)
return self.get_dataloader(test_dataset, shuffle=False)
def get_dataloaders(self, transform: bool = True, predict_sequence_length: int = 96):
return self.get_train_dataloader(transform=transform, predict_sequence_length=predict_sequence_length), self.get_test_dataloader(transform=transform, predict_sequence_length=predict_sequence_length)
def inverse_transform(self, tensor: torch.Tensor):
return self.nrv_scaler.inverse_transform(tensor.cpu().numpy()).reshape(-1)
def get_dataloaders(
self, transform: bool = True, predict_sequence_length: int = 96
):
return self.get_train_dataloader(
transform=transform, predict_sequence_length=predict_sequence_length
), self.get_test_dataloader(
transform=transform, predict_sequence_length=predict_sequence_length
)
def inverse_transform(self, input_data):
try:
if isinstance(input_data, torch.Tensor):
if input_data.is_cuda:
input_data = input_data.cpu()
input_np = input_data.detach().numpy() # Convert to numpy array
elif isinstance(input_data, np.ndarray):
input_np = input_data
else:
raise TypeError("Input must be a PyTorch tensor or a NumPy array")
# Store the original shape
original_shape = input_np.shape
input_2d = input_np.reshape(-1, original_shape[-1])
transformed_2d = self.nrv_scaler.inverse_transform(input_2d)
if isinstance(input_data, torch.Tensor):
return torch.from_numpy(transformed_2d).view(original_shape)
else:
return transformed_2d.reshape(original_shape)
except Exception as e:
raise RuntimeError(f"Error in inverse_transform: {e}") from e
def get_input_size(self):
data_loader = self.get_train_dataloader()
data_loader = self.get_train_dataloader(
predict_sequence_length=self.output_size
)
input, _ = next(iter(data_loader))
return input.shape[-1]

View File

@@ -1 +1,2 @@
from .pinball_loss import PinballLoss, NonAutoRegressivePinballLoss
from .pinball_loss import PinballLoss, NonAutoRegressivePinballLoss
from .crps_metric import CRPSLoss

29
src/losses/crps_metric.py Normal file
View File

@@ -0,0 +1,29 @@
import torch
from torch import nn
import torch
class CRPSLoss(nn.Module):
def __init__(self, quantiles):
super(CRPSLoss, self).__init__()
if not torch.is_tensor(quantiles):
quantiles = torch.tensor(quantiles, dtype=torch.float32)
self.quantiles_tensor = quantiles
def forward(self, preds, target):
# preds shape: [batch_size, num_quantiles]
# unsqueeze target
target = target.unsqueeze(-1)
mask = (preds > target).float()
test = self.quantiles_tensor - mask
# square them
test = test * test
crps = torch.trapz(test, x=preds)
# mean over batch
crps = torch.mean(crps)
return crps

File diff suppressed because it is too large Load Diff

View File

@@ -10,13 +10,39 @@ from plotly.subplots import make_subplots
from trainers.trainer import Trainer
from tqdm import tqdm
class AutoRegressiveTrainer(Trainer):
def __init__(
self,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
criterion: torch.nn.Module,
data_processor: DataProcessor,
device: torch.device,
clearml_helper: ClearMLHelper = None,
debug: bool = True,
):
super().__init__(
model=model,
optimizer=optimizer,
criterion=criterion,
data_processor=data_processor,
device=device,
clearml_helper=clearml_helper,
debug=debug,
)
self.model.output_size = 1
def debug_plots(self, task, train: bool, data_loader, sample_indices, epoch):
num_samples = len(sample_indices)
rows = num_samples # One row per sample since we only want one column
cols = 1
fig = make_subplots(rows=rows, cols=cols, subplot_titles=[f'Sample {i+1}' for i in range(num_samples)])
fig = make_subplots(
rows=rows,
cols=cols,
subplot_titles=[f"Sample {i+1}" for i in range(num_samples)],
)
for i, idx in enumerate(sample_indices):
auto_regressive_output = self.auto_regressive(data_loader, idx)
@@ -26,27 +52,30 @@ class AutoRegressiveTrainer(Trainer):
initial, predictions, _, target = auto_regressive_output
sub_fig = self.get_plot(initial, target, predictions, show_legend=(i == 0))
row = i + 1
col = 1
for trace in sub_fig.data:
fig.add_trace(trace, row=row, col=col)
loss = self.criterion(predictions.to(self.device), target.to(self.device)).item()
loss = self.criterion(
predictions.to(self.device), target.to(self.device)
).item()
fig['layout']['annotations'][i].update(text=f"{loss.__class__.__name__}: {loss:.6f}")
fig["layout"]["annotations"][i].update(
text=f"{loss.__class__.__name__}: {loss:.6f}"
)
# y axis same for all plots
fig.update_yaxes(range=[-1, 1], col=1)
fig.update_layout(height=300 * rows)
task.get_logger().report_plotly(
title=f"{'Training' if train else 'Test'} Samples",
series="full_day",
iteration=epoch,
figure=fig
figure=fig,
)
def auto_regressive(self, data_loader, idx, sequence_length: int = 96):
@@ -61,14 +90,25 @@ class AutoRegressiveTrainer(Trainer):
target_full.append(target)
with torch.no_grad():
print(prev_features.shape)
prediction = self.model(prev_features.unsqueeze(0))
predictions_full.append(prediction.squeeze(-1))
for i in range(sequence_length - 1):
new_features = torch.cat((prev_features[1:97].cpu(), prediction.squeeze(-1).cpu()), dim=0)
new_features = torch.cat(
(
prev_features[1:96].cpu(),
prediction.squeeze(-1).cpu(),
),
dim=0,
)
print(new_features.shape)
# get the other needed features
other_features, new_target = data_loader.dataset.random_day_autoregressive(idx + i + 1)
other_features, new_target = data_loader.dataset.random_day_autoregressive(
idx + i + 1
)
if other_features is not None:
prev_features = torch.cat((new_features, other_features), dim=0)
@@ -80,14 +120,20 @@ class AutoRegressiveTrainer(Trainer):
# predict
with torch.no_grad():
prediction = self.model(new_features.unsqueeze(0).to(self.device))
prediction = self.model(prev_features.unsqueeze(0).to(self.device))
predictions_full.append(prediction.squeeze(-1))
return initial_sequence.cpu(), torch.stack(predictions_full).cpu(), torch.stack(target_full).cpu()
return (
initial_sequence.cpu(),
torch.stack(predictions_full).cpu(),
torch.stack(target_full).cpu(),
)
def log_final_metrics(self, task, dataloader, train: bool = True):
metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
transformed_metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
metrics = {metric.__class__.__name__: 0.0 for metric in self.metrics_to_track}
transformed_metrics = {
metric.__class__.__name__: 0.0 for metric in self.metrics_to_track
}
with torch.no_grad():
# iterate idx over dataset
@@ -96,15 +142,23 @@ class AutoRegressiveTrainer(Trainer):
for idx in tqdm(range(total_amount_samples)):
_, outputs, targets = self.auto_regressive(dataloader, idx)
inversed_outputs = torch.tensor(self.data_processor.inverse_transform(outputs))
inversed_inputs = torch.tensor(self.data_processor.inverse_transform(targets))
inversed_outputs = torch.tensor(
self.data_processor.inverse_transform(outputs)
)
inversed_inputs = torch.tensor(
self.data_processor.inverse_transform(targets)
)
outputs = outputs.to(self.device)
targets = targets.to(self.device)
for metric in self.metrics_to_track:
transformed_metrics[metric.__class__.__name__] += metric(outputs, targets)
metrics[metric.__class__.__name__] += metric(inversed_outputs, inversed_inputs)
transformed_metrics[metric.__class__.__name__] += metric(
outputs, targets
)
metrics[metric.__class__.__name__] += metric(
inversed_outputs, inversed_inputs
)
for metric in self.metrics_to_track:
metrics[metric.__class__.__name__] /= total_amount_samples
@@ -112,16 +166,20 @@ class AutoRegressiveTrainer(Trainer):
for metric_name, metric_value in metrics.items():
if train:
metric_name = f'train_{metric_name}'
metric_name = f"train_{metric_name}"
else:
metric_name = f'test_{metric_name}'
task.get_logger().report_single_value(name=metric_name, value=metric_value)
metric_name = f"test_{metric_name}"
task.get_logger().report_single_value(
name=metric_name, value=metric_value
)
for metric_name, metric_value in transformed_metrics.items():
if train:
metric_name = f'train_transformed_{metric_name}'
metric_name = f"train_transformed_{metric_name}"
else:
metric_name = f'test_transformed_{metric_name}'
metric_name = f"test_transformed_{metric_name}"
task.get_logger().report_single_value(name=metric_name, value=metric_value)
task.get_logger().report_single_value(
name=metric_name, value=metric_value
)

View File

@@ -1,11 +1,16 @@
from losses import CRPSLoss
from utils.clearml import ClearMLHelper
from data.preprocessing import DataProcessor, DataConfig
import numpy as np
import plotly.graph_objects as go
from trainers.trainer import Trainer
import torch
class ProbabilisticBaselineTrainer:
def __init__(self, quantiles, data_processor: DataProcessor, clearml_helper: ClearMLHelper):
class ProbabilisticBaselineTrainer(Trainer):
def __init__(
self, quantiles, data_processor: DataProcessor, clearml_helper: ClearMLHelper
):
self.data_processor = data_processor
data_config = DataConfig()
@@ -14,6 +19,8 @@ class ProbabilisticBaselineTrainer:
self.clearml_helper = clearml_helper
self.quantiles = quantiles
self.metrics_to_track = []
def init_clearml_task(self):
if not self.clearml_helper:
return None
@@ -37,13 +44,14 @@ class ProbabilisticBaselineTrainer:
try:
time_steps = [[] for _ in range(96)]
train_loader, test_loader = self.data_processor.get_dataloaders(predict_sequence_length=96)
train_loader, test_loader = self.data_processor.get_dataloaders(
predict_sequence_length=96
)
for inputs, _ in train_loader:
for i in range(96):
time_steps[i].extend(inputs[:, i].numpy())
all_quantiles = []
for i, time_values in enumerate(time_steps):
quantiles = np.quantile(time_values, self.quantiles)
@@ -66,28 +74,86 @@ class ProbabilisticBaselineTrainer:
task.set_archived(True)
raise
def finish_training(self, quantile_values, task):
def log_final_metrics(self, task, dataloader, quantile_values, train: bool = True):
metric = CRPSLoss(self.quantiles)
fig = self.plot_quantiles(quantile_values)
task.get_logger().report_plotly(
title=f"Training Quantile Values",
series="Quantile Values",
figure=fig
crps_values = []
crps_inversed_values = []
# Convert quantile_values to a tensor once outside the loop
quantile_values_tensor = torch.tensor(quantile_values)
quantile_values_expanded = quantile_values_tensor.unsqueeze(0)
for _, targets in dataloader:
# Expand quantile_values for each batch
quantile_values_batch = quantile_values_expanded.repeat(
targets.size(0), 1, 1
)
# Inverse transform targets and quantile_values
inversed_targets = self.data_processor.inverse_transform(targets)
inversed_quantile_values = self.data_processor.inverse_transform(
quantile_values_batch
)
# Calculate CRPS for both original and inversed values
m = metric(quantile_values_batch, targets)
crps_values.append(
m.item()
) # Assuming m is a tensor, use .item() to get the value
m_inversed = metric(inversed_quantile_values, inversed_targets)
crps_inversed_values.append(m_inversed.item())
# Compute mean CRPS
crps_mean = np.mean(crps_values)
crps_inversed_mean = np.mean(crps_inversed_values)
metric_name_transformed = metric.__class__.__name__
metric_name = metric.__class__.__name__
if train:
metric_name = "train_" + metric_name
metric_name_transformed = "train_transformed_" + metric_name_transformed
else:
metric_name = "test_" + metric_name
metric_name_transformed = "test_transformed_" + metric_name_transformed
task.get_logger().report_single_value(
name=metric_name_transformed, value=crps_mean
)
task.get_logger().report_single_value(
name=metric_name, value=crps_inversed_mean
)
def finish_training(self, quantile_values, task):
fig = self.plot_quantiles(quantile_values)
task.get_logger().report_plotly(
title=f"Training Quantile Values", series="Quantile Values", figure=fig
)
train_loader, test_loader = self.data_processor.get_dataloaders(
predict_sequence_length=96
)
self.log_final_metrics(task, train_loader, quantile_values, train=True)
self.log_final_metrics(task, test_loader, quantile_values, train=False)
def plot_quantiles(self, quantile_values):
fig = go.Figure()
for i, q in enumerate(self.quantiles):
values_for_quantile = quantile_values[:, i]
fig.add_trace(go.Scatter(x=np.arange(96), y=values_for_quantile, name=f"Prediction (Q={q})", line=dict(dash='dash')))
fig.add_trace(
go.Scatter(
x=np.arange(96),
y=values_for_quantile,
name=f"Prediction (Q={q})",
line=dict(dash="dash"),
)
)
fig.update_layout(title="Quantile Values")
fig.update_yaxes(range=[-1, 1])
return fig

View File

@@ -5,7 +5,7 @@ from trainers.trainer import Trainer
from trainers.autoregressive_trainer import AutoRegressiveTrainer
from data.preprocessing import DataProcessor
from utils.clearml import ClearMLHelper
from losses import PinballLoss, NonAutoRegressivePinballLoss
from losses import PinballLoss, NonAutoRegressivePinballLoss, CRPSLoss
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import numpy as np
@@ -14,18 +14,36 @@ import matplotlib.pyplot as plt
class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
def __init__(self, model: torch.nn.Module, optimizer: torch.optim.Optimizer, data_processor: DataProcessor, quantiles: list, device: torch.device, clearml_helper: ClearMLHelper = None, debug: bool = True):
def __init__(
self,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
data_processor: DataProcessor,
quantiles: list,
device: torch.device,
clearml_helper: ClearMLHelper = None,
debug: bool = True,
):
quantiles_tensor = torch.tensor(quantiles)
quantiles_tensor = quantiles_tensor.to(device)
self.quantiles = quantiles
criterion = PinballLoss(quantiles=quantiles_tensor)
super().__init__(model=model, optimizer=optimizer, criterion=criterion, data_processor=data_processor, device=device, clearml_helper=clearml_helper, debug=debug)
super().__init__(
model=model,
optimizer=optimizer,
criterion=criterion,
data_processor=data_processor,
device=device,
clearml_helper=clearml_helper,
debug=debug,
)
def log_final_metrics(self, task, dataloader, train: bool = True):
metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
transformed_metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
metrics = {metric.__class__.__name__: 0.0 for metric in self.metrics_to_track}
transformed_metrics = {
metric.__class__.__name__: 0.0 for metric in self.metrics_to_track
}
with torch.no_grad():
total_amount_samples = len(dataloader.dataset) - 95
@@ -33,20 +51,33 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
for idx in tqdm(range(total_amount_samples)):
_, outputs, samples, targets = self.auto_regressive(dataloader, idx)
inversed_samples = torch.tensor(self.data_processor.inverse_transform(samples))
inversed_targets = torch.tensor(self.data_processor.inverse_transform(targets))
inversed_samples = self.data_processor.inverse_transform(samples)
inversed_targets = self.data_processor.inverse_transform(targets)
inversed_outputs = self.data_processor.inverse_transform(outputs)
outputs = outputs.to(self.device)
targets = targets.to(self.device)
samples = samples.to(self.device)
inversed_samples = inversed_samples.to(self.device)
inversed_targets = inversed_targets.to(self.device)
inversed_outputs = inversed_outputs.to(self.device)
for metric in self.metrics_to_track:
if metric.__class__ != PinballLoss:
transformed_metrics[metric.__class__.__name__] += metric(samples, targets)
metrics[metric.__class__.__name__] += metric(inversed_samples, inversed_targets)
if metric.__class__ != PinballLoss and metric.__class__ != CRPSLoss:
transformed_metrics[metric.__class__.__name__] += metric(
samples, targets
)
metrics[metric.__class__.__name__] += metric(
inversed_samples, inversed_targets
)
else:
transformed_metrics[metric.__class__.__name__] += metric(outputs, targets)
transformed_metrics[metric.__class__.__name__] += metric(
outputs, targets
)
metrics[metric.__class__.__name__] += metric(
inversed_outputs, inversed_targets
)
for metric in self.metrics_to_track:
metrics[metric.__class__.__name__] /= total_amount_samples
@@ -55,11 +86,15 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
for metric_name, metric_value in metrics.items():
if PinballLoss.__name__ in metric_name:
continue
name = f'train_{metric_name}' if train else f'test_{metric_name}'
name = f"train_{metric_name}" if train else f"test_{metric_name}"
task.get_logger().report_single_value(name=name, value=metric_value)
for metric_name, metric_value in transformed_metrics.items():
name = f'train_transformed_{metric_name}' if train else f'test_transformed_{metric_name}'
name = (
f"train_transformed_{metric_name}"
if train
else f"test_transformed_{metric_name}"
)
task.get_logger().report_single_value(name=name, value=metric_value)
def get_plot(self, current_day, next_day, predictions, show_legend: bool = True):
@@ -75,12 +110,21 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
fig.add_trace(go.Scatter(x=96 + np.arange(96), y=next_day_np, name="Next Day"))
for i, q in enumerate(self.quantiles):
fig.add_trace(go.Scatter(x=96 + np.arange(96), y=predictions_np[:, i],
name=f"Prediction (Q={q})", line=dict(dash='dash')))
fig.add_trace(
go.Scatter(
x=96 + np.arange(96),
y=predictions_np[:, i],
name=f"Prediction (Q={q})",
line=dict(dash="dash"),
)
)
# Update the layout
fig.update_layout(title="Predictions and Quantiles of the Linear Model", showlegend=show_legend)
fig.update_layout(
title="Predictions and Quantiles of the Linear Model",
showlegend=show_legend,
)
return fig
def auto_regressive(self, data_loader, idx, sequence_length: int = 96):
@@ -100,15 +144,22 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
predictions_full.append(prediction.squeeze(0))
# sample from the distribution
sample = self.sample_from_dist(self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy())
sample = self.sample_from_dist(
self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy()
)
predictions_sampled.append(sample)
for i in range(sequence_length - 1):
new_features = torch.cat((prev_features[1:97].cpu(), torch.tensor([predictions_sampled[-1]])), dim=0)
new_features = torch.cat(
(prev_features[1:96].cpu(), torch.tensor([predictions_sampled[-1]])),
dim=0,
)
new_features = new_features.float()
# get the other needed features
other_features, new_target = data_loader.dataset.random_day_autoregressive(idx + i + 1)
other_features, new_target = data_loader.dataset.random_day_autoregressive(
idx + i + 1
)
if other_features is not None:
prev_features = torch.cat((new_features, other_features), dim=0)
@@ -120,19 +171,32 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
# predict
with torch.no_grad():
prediction = self.model(new_features.unsqueeze(0).to(self.device))
prediction = self.model(prev_features.unsqueeze(0).to(self.device))
predictions_full.append(prediction.squeeze(0))
# sample from the distribution
sample = self.sample_from_dist(self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy())
sample = self.sample_from_dist(
self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy()
)
predictions_sampled.append(sample)
return initial_sequence.cpu(), torch.stack(predictions_full).cpu(), torch.tensor(predictions_sampled).reshape(-1, 1), torch.stack(target_full).cpu()
return (
initial_sequence.cpu(),
torch.stack(predictions_full).cpu(),
torch.tensor(predictions_sampled).reshape(-1, 1),
torch.stack(target_full).cpu(),
)
@staticmethod
def sample_from_dist(quantiles, output_values):
# Interpolate the inverse CDF
inverse_cdf = interp1d(quantiles, output_values, kind='linear', bounds_error=False, fill_value="extrapolate")
inverse_cdf = interp1d(
quantiles,
output_values,
kind="linear",
bounds_error=False,
fill_value="extrapolate",
)
# generate one random uniform number
uniform_random_numbers = np.random.uniform(0, 1, 1000)
@@ -143,8 +207,9 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
# Return the mean of the samples
return np.mean(samples)
def plot_quantile_percentages(self, task, data_loader, train: bool = True, iteration: int = None):
def plot_quantile_percentages(
self, task, data_loader, train: bool = True, iteration: int = None
):
total = 0
quantile_counter = {q: 0 for q in self.quantiles.cpu().numpy()}
@@ -156,12 +221,16 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
# output shape: (batch_size, num_quantiles)
# target shape: (batch_size, 1)
for i, q in enumerate(self.quantiles.cpu().numpy()):
quantile_counter[q] += np.sum(targets.squeeze(-1).cpu().numpy() < output[:, i].cpu().numpy())
quantile_counter[q] += np.sum(
targets.squeeze(-1).cpu().numpy() < output[:, i].cpu().numpy()
)
total += len(targets)
# to numpy array of length len(quantiles)
percentages = np.array([quantile_counter[q] / total for q in self.quantiles.cpu().numpy()])
percentages = np.array(
[quantile_counter[q] / total for q in self.quantiles.cpu().numpy()]
)
bar_width = 0.35
index = np.arange(len(self.quantiles.cpu().numpy()))
@@ -169,73 +238,132 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
# Plotting the bars
fig, ax = plt.subplots(figsize=(15, 10))
bar1 = ax.bar(index, self.quantiles.cpu().numpy(), bar_width, label='Ideal', color='brown')
bar2 = ax.bar(index + bar_width, percentages, bar_width, label='NN model', color='blue')
bar1 = ax.bar(
index, self.quantiles.cpu().numpy(), bar_width, label="Ideal", color="brown"
)
bar2 = ax.bar(
index + bar_width, percentages, bar_width, label="NN model", color="blue"
)
# Adding the percentage values above the bars for bar2
for rect in bar2:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.005 * height,
f'{height:.2}', ha='center', va='bottom') # Format the number as a percentage
ax.text(
rect.get_x() + rect.get_width() / 2.0,
1.005 * height,
f"{height:.2}",
ha="center",
va="bottom",
) # Format the number as a percentage
series_name = "Training Set" if train else "Test Set"
# Adding labels and title
ax.set_xlabel('Quantile')
ax.set_ylabel('Fraction of data under quantile forecast')
ax.set_title(f'Quantile Performance Comparison ({series_name})')
ax.set_xlabel("Quantile")
ax.set_ylabel("Fraction of data under quantile forecast")
ax.set_title(f"Quantile Performance Comparison ({series_name})")
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(self.quantiles.cpu().numpy())
ax.legend()
task.get_logger().report_matplotlib_figure(title='Quantile Performance Comparison', series=series_name, report_image=True, figure=plt, iteration=iteration)
task.get_logger().report_matplotlib_figure(
title="Quantile Performance Comparison",
series=series_name,
report_image=True,
figure=plt,
iteration=iteration,
)
plt.close()
class NonAutoRegressiveQuantileRegression(Trainer):
def __init__(self, model: torch.nn.Module, optimizer: torch.optim.Optimizer, data_processor: DataProcessor, quantiles: list, device: torch.device, clearml_helper: ClearMLHelper = None, debug: bool = True):
def __init__(
self,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
data_processor: DataProcessor,
quantiles: list,
device: torch.device,
clearml_helper: ClearMLHelper = None,
debug: bool = True,
):
quantiles_tensor = torch.tensor(quantiles)
quantiles_tensor = quantiles_tensor.to(device)
self.quantiles = quantiles
criterion = NonAutoRegressivePinballLoss(quantiles=quantiles_tensor)
super().__init__(model=model, optimizer=optimizer, criterion=criterion, data_processor=data_processor, device=device, clearml_helper=clearml_helper, debug=debug)
super().__init__(
model=model,
optimizer=optimizer,
criterion=criterion,
data_processor=data_processor,
device=device,
clearml_helper=clearml_helper,
debug=debug,
)
@staticmethod
def sample_from_dist(quantiles, output_values):
reshaped_values = output_values.reshape(-1, len(quantiles))
samples = []
for row in reshaped_values:
inverse_cdf = interp1d(quantiles, row, kind='linear', bounds_error=False, fill_value="extrapolate")
inverse_cdf = interp1d(
quantiles,
row,
kind="linear",
bounds_error=False,
fill_value="extrapolate",
)
uniform_random_numbers = np.random.uniform(0, 1, 1000)
new_samples = inverse_cdf(uniform_random_numbers)
samples.append(np.mean(new_samples))
return np.array(samples)
def log_final_metrics(self, task, dataloader, train: bool = True):
metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
transformed_metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
metrics = {metric.__class__.__name__: 0.0 for metric in self.metrics_to_track}
transformed_metrics = {
metric.__class__.__name__: 0.0 for metric in self.metrics_to_track
}
with torch.no_grad():
for inputs, targets in dataloader:
inputs, targets = inputs.to(self.device), targets
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.model(inputs)
outputted_samples = [self.sample_from_dist(self.quantiles.cpu(), output.cpu().numpy()) for output in outputs]
# to tensor
outputted_samples = [
self.sample_from_dist(self.quantiles.cpu(), output.cpu().numpy())
for output in outputs
]
outputted_samples = torch.tensor(outputted_samples)
inversed_outputs_samples = self.data_processor.inverse_transform(
outputted_samples
)
inversed_outputs = torch.tensor(self.data_processor.inverse_transform(outputted_samples))
inversed_inputs = torch.tensor(self.data_processor.inverse_transform(targets))
outputs = outputs.reshape(inputs.shape[0], -1, len(self.quantiles))
inversed_outputs = self.data_processor.inverse_transform(outputs)
inversed_targets = self.data_processor.inverse_transform(targets)
# set on same device
inversed_outputs = inversed_outputs.to(self.device)
inversed_inputs = inversed_inputs.to(self.device)
inversed_outputs_samples = inversed_outputs_samples.to(self.device)
inversed_targets = inversed_targets.to(self.device)
outputted_samples = outputted_samples.to(self.device)
inversed_outputs = inversed_outputs.to(self.device)
for metric in self.metrics_to_track:
transformed_metrics[metric.__class__.__name__] += metric(outputted_samples, targets.to(self.device))
metrics[metric.__class__.__name__] += metric(inversed_outputs, inversed_inputs)
if metric.__class__ != PinballLoss and metric.__class__ != CRPSLoss:
transformed_metrics[metric.__class__.__name__] += metric(
outputted_samples, targets
)
metrics[metric.__class__.__name__] += metric(
inversed_outputs_samples, inversed_targets
)
else:
transformed_metrics[metric.__class__.__name__] += metric(
outputs, targets
)
metrics[metric.__class__.__name__] += metric(
inversed_outputs, inversed_targets
)
for metric in self.metrics_to_track:
metrics[metric.__class__.__name__] /= len(dataloader)
@@ -243,28 +371,31 @@ class NonAutoRegressiveQuantileRegression(Trainer):
for metric_name, metric_value in metrics.items():
if train:
metric_name = f'train_{metric_name}'
metric_name = f"train_{metric_name}"
else:
metric_name = f'test_{metric_name}'
task.get_logger().report_single_value(name=metric_name, value=metric_value)
metric_name = f"test_{metric_name}"
task.get_logger().report_single_value(
name=metric_name, value=metric_value
)
for metric_name, metric_value in transformed_metrics.items():
if train:
metric_name = f'train_transformed_{metric_name}'
metric_name = f"train_transformed_{metric_name}"
else:
metric_name = f'test_transformed_{metric_name}'
metric_name = f"test_transformed_{metric_name}"
task.get_logger().report_single_value(name=metric_name, value=metric_value)
task.get_logger().report_single_value(
name=metric_name, value=metric_value
)
def get_plot(self, current_day, next_day, predictions, show_legend: bool = True):
fig = go.Figure()
# Convert to numpy for plotting
current_day_np = current_day.view(-1).cpu().numpy()
next_day_np = next_day.view(-1).cpu().numpy()
# reshape predictions to (n, len(quantiles))$
predictions_np = predictions.cpu().numpy().reshape(-1, len(self.quantiles))
@@ -273,11 +404,16 @@ class NonAutoRegressiveQuantileRegression(Trainer):
fig.add_trace(go.Scatter(x=96 + np.arange(96), y=next_day_np, name="Next Day"))
for i, q in enumerate(self.quantiles):
fig.add_trace(go.Scatter(x=96 + np.arange(96), y=predictions_np[:, i],
name=f"Prediction (Q={q})", line=dict(dash='dash')))
fig.add_trace(
go.Scatter(
x=96 + np.arange(96),
y=predictions_np[:, i],
name=f"Prediction (Q={q})",
line=dict(dash="dash"),
)
)
# Update the layout
fig.update_layout(title="Predictions and Quantiles", showlegend=show_legend)
return fig
return fig