Rewrote dataset to be able to include new features

This commit is contained in:
Victor Mylle
2023-11-08 23:17:47 +00:00
parent 56c763a6f4
commit 2f48363292
10 changed files with 311218 additions and 118 deletions

View File

@@ -10,19 +10,16 @@ from plotly.subplots import make_subplots
from trainers.trainer import Trainer
class AutoRegressiveTrainer(Trainer):
def debug_plots(self, task, train: bool, samples, epoch):
X, y = samples
X = X.to(self.device)
num_samples = len(X)
def debug_plots(self, task, train: bool, data_loader, sample_indices, epoch):
num_samples = len(sample_indices)
rows = num_samples # One row per sample since we only want one column
cols = 1
fig = make_subplots(rows=rows, cols=cols, subplot_titles=[f'Sample {i+1}' for i in range(num_samples)])
for i, (current_day, next_day) in enumerate(zip(X, y)):
predictions = self.predict_auto_regressive(current_day)
sub_fig = self.get_plot(current_day, next_day, predictions, show_legend=(i == 0))
for i, idx in enumerate(sample_indices):
initial, predictions, target = self.auto_regressive(data_loader, idx)
sub_fig = self.get_plot(initial, target, predictions, show_legend=(i == 0))
row = i + 1
col = 1
@@ -30,7 +27,7 @@ class AutoRegressiveTrainer(Trainer):
for trace in sub_fig.data:
fig.add_trace(trace, row=row, col=col)
loss = self.criterion(predictions.to(self.device), next_day.to(self.device)).item()
loss = self.criterion(predictions.to(self.device), target.to(self.device)).item()
fig['layout']['annotations'][i].update(text=f"{loss.__class__.__name__}: {loss:.6f}")
@@ -46,14 +43,38 @@ class AutoRegressiveTrainer(Trainer):
figure=fig
)
def auto_regressive(self, data_loader, idx, sequence_length: int = 96):
self.model.eval()
target_full = []
predictions_full = []
def predict_auto_regressive(self, initial_sequence: torch.Tensor, sequence_length: int = 96):
initial_sequence = initial_sequence.to(self.device)
prev_features, target = data_loader.dataset[idx]
prev_features = prev_features.to(self.device)
return predict_auto_regressive(self.model, initial_sequence, sequence_length)
initial_sequence = prev_features[:96]
def random_day_prediction(self):
current_day_features, next_day_features = self.data_processor.get_random_test_day()
target_full.append(target)
with torch.no_grad():
prediction = self.model(prev_features.unsqueeze(0))
predictions_full.append(prediction.squeeze(-1))
predictions = self.predict_auto_regressive(current_day_features)
return current_day_features, next_day_features, predictions
for i in range(sequence_length - 1):
new_features = torch.cat((prev_features[1:97].cpu(), prediction.squeeze(-1).cpu()), dim=0)
# get the other needed features
other_features, new_target = data_loader.dataset.random_day_autoregressive(idx + i + 1)
if other_features is not None:
prev_features = torch.cat((new_features, other_features), dim=0)
else:
prev_features = new_features
# add target to target_full
target_full.append(new_target)
# predict
with torch.no_grad():
prediction = self.model(new_features.unsqueeze(0).to(self.device))
predictions_full.append(prediction.squeeze(-1))
return initial_sequence.cpu(), torch.stack(predictions_full).cpu(), torch.stack(target_full).cpu()

View File

@@ -20,10 +20,6 @@ class QuantileTrainer(AutoRegressiveTrainer):
criterion = PinballLoss(quantiles=quantiles_tensor)
super().__init__(model=model, optimizer=optimizer, criterion=criterion, data_processor=data_processor, device=device, clearml_helper=clearml_helper, debug=debug)
def predict_auto_regressive(self, initial_sequence: torch.Tensor, sequence_length: int = 96):
initial_sequence = initial_sequence.to(self.device)
return predict_auto_regressive_quantile(self.model, self.sample_from_dist, initial_sequence, self.quantiles, sequence_length)
def log_final_metrics(self, task, dataloader, train: bool = True):
metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
@@ -84,6 +80,52 @@ class QuantileTrainer(AutoRegressiveTrainer):
fig.update_layout(title="Predictions and Quantiles of the Linear Model", showlegend=show_legend)
return fig
def auto_regressive(self, data_loader, idx, sequence_length: int = 96):
self.model.eval()
target_full = []
predictions_sampled = []
predictions_full = []
prev_features, target = data_loader.dataset[idx]
prev_features = prev_features.to(self.device)
initial_sequence = prev_features[:96]
target_full.append(target)
with torch.no_grad():
prediction = self.model(prev_features.unsqueeze(0))
predictions_full.append(prediction.squeeze(0))
# sample from the distribution
sample = self.sample_from_dist(self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy())
predictions_sampled.append(sample)
for i in range(sequence_length - 1):
new_features = torch.cat((prev_features[1:97].cpu(), torch.tensor([predictions_sampled[-1]])), dim=0)
new_features = new_features.float()
# get the other needed features
other_features, new_target = data_loader.dataset.random_day_autoregressive(idx + i + 1)
if other_features is not None:
prev_features = torch.cat((new_features, other_features), dim=0)
else:
prev_features = new_features
# add target to target_full
target_full.append(new_target)
# predict
with torch.no_grad():
prediction = self.model(new_features.unsqueeze(0).to(self.device))
predictions_full.append(prediction.squeeze(0))
# sample from the distribution
sample = self.sample_from_dist(self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy())
predictions_sampled.append(sample)
return initial_sequence.cpu(), torch.stack(predictions_full).cpu(), torch.stack(target_full).cpu()
@staticmethod
def sample_from_dist(quantiles, output_values):

View File

@@ -70,23 +70,22 @@ class Trainer:
return task
def random_samples(self, train: bool = True, num_samples: int = 10):
random_X = []
random_Y = []
train_loader, test_loader = self.data_processor.get_dataloaders(predict_sequence_length=self.model.output_size)
for _ in range(num_samples):
X, y = self.data_processor.get_random_day(train=train)
random_X.append(X)
random_Y.append(y)
if train:
loader = train_loader
else:
loader = test_loader
indices = np.random.randint(0, len(loader.dataset) - 1, size=num_samples)
return indices
random_X = torch.stack(random_X)
random_Y = torch.stack(random_Y)
return random_X, random_Y
def train(self, epochs: int):
train_loader, test_loader = self.data_processor.get_dataloaders(predict_sequence_length=self.model.output_size)
train_random_X, train_random_y = self.random_samples(train=True)
test_random_X, test_random_y = self.random_samples(train=False)
train_samples = self.random_samples(train=True)
test_samples = self.random_samples(train=False)
task = self.init_clearml_task()
@@ -129,8 +128,8 @@ class Trainer:
if epoch % self.plot_every_n_epochs == 0:
self.debug_plots(task, True, (train_random_X, train_random_y), epoch)
self.debug_plots(task, False, (test_random_X, test_random_y), epoch)
self.debug_plots(task, True, train_loader, train_samples, epoch)
self.debug_plots(task, False, test_loader, test_samples, epoch)
if task:
self.finish_training(task=task)
@@ -144,6 +143,7 @@ class Trainer:
with torch.no_grad():
for inputs, targets in dataloader:
inputs, targets = inputs.to(self.device), targets
outputs = self.model(inputs)
inversed_outputs = torch.tensor(self.data_processor.inverse_transform(outputs))
@@ -215,22 +215,25 @@ class Trainer:
return fig
def debug_plots(self, task, train: bool, samples, epoch):
X, y = samples
X = X.to(self.device)
num_samples = len(X)
def debug_plots(self, task, train: bool, data_loader, sample_indices, epoch):
num_samples = len(sample_indices)
rows = num_samples # One row per sample since we only want one column
cols = 1
fig = make_subplots(rows=rows, cols=cols, subplot_titles=[f'Sample {i+1}' for i in range(num_samples)])
for i, (current_day, next_day) in enumerate(zip(X, y)):
for i, idx in enumerate(sample_indices):
features, target = data_loader.dataset[idx]
features = features.to(self.device)
target = target.to(self.device)
self.model.eval()
with torch.no_grad():
predictions = self.model(current_day).cpu()
predictions = self.model(features).cpu()
sub_fig = self.get_plot(current_day, next_day, predictions, show_legend=(i == 0))
sub_fig = self.get_plot(features[:96], target, predictions, show_legend=(i == 0))
row = i + 1
col = 1
@@ -239,7 +242,7 @@ class Trainer:
fig.add_trace(trace, row=row, col=col)
loss = self.criterion(predictions.to(self.device), next_day.squeeze(-1).to(self.device)).item()
loss = self.criterion(predictions.to(self.device), target.squeeze(-1).to(self.device)).item()
fig['layout']['annotations'][i].update(text=f"{loss.__class__.__name__}: {loss:.6f}")