Added trainer for Diffusion model
This commit is contained in:
12
Reports/Presentatie_December/feedback.md
Normal file
12
Reports/Presentatie_December/feedback.md
Normal file
@@ -0,0 +1,12 @@
|
||||
Compare bid ladders -> Why using the one from yesterday? Is it lot worse to use? Plot reconstructed price with one from yesterday and one from today.
|
||||
|
||||
Introduction about electricity market more visual and explain better. Also like the reconstruction of the CRPS
|
||||
|
||||
Show formula of CRPS and explain it better. (Root squared error !!!!)
|
||||
|
||||
Overconfidence of quantiles always but just show that the ondergrens is too high and the bovengrens is too low.
|
||||
|
||||
Determine thresholds for each sample and take mean then.
|
||||
|
||||
Showing CDF of the quantiles -> retransform to MWh
|
||||
|
||||
@@ -118,5 +118,6 @@ Test data: 01-01-2023 until 08-10–2023
|
||||
| Policy | Threshold Step Size (€) | Total Profit (€) | Charge Cycles |
|
||||
|--------|---------------------|--------------|---------------|
|
||||
| Baseline (charge: 150, discharge: 175) | 25 | 251202.59 | 725 |
|
||||
| Policy using predicted NRV | 50 | 325362.81 | 856 |
|
||||
| Policy using predicted NRV | 25 | 334058.65 | 862 |
|
||||
| Policy using predicted NRV (mean reconstructed imbalance price) | 50 | 325362.81 | 856 |
|
||||
| Policy using predicted NRV (mean reconstructed imbalance price) | 25 | 334058.65 | 862 |
|
||||
| Policy using predicted NRV (mean thresholds) | 25 | 339846.9062 | 842 |
|
||||
496
src/models/diffusion.ipynb
Normal file
496
src/models/diffusion.ipynb
Normal file
File diff suppressed because one or more lines are too long
47
src/models/diffusion_model.py
Normal file
47
src/models/diffusion_model.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
class DiffusionModel(nn.Module):
|
||||
def __init__(self, time_dim: int = 64):
|
||||
super(DiffusionModel, self).__init__()
|
||||
self.time_dim = time_dim
|
||||
self.layers = nn.ModuleList()
|
||||
|
||||
def pos_encoding(self, t, channels):
|
||||
inv_freq = 1.0 / (
|
||||
10000 ** (torch.arange(0, channels, 2).float() / channels)
|
||||
).to(t.device)
|
||||
pos_enc_a = torch.sin(t.repeat(1, channels // 2) * inv_freq)
|
||||
pos_enc_b = torch.cos(t.repeat(1, channels // 2) * inv_freq)
|
||||
pos_enc = torch.cat((pos_enc_a, pos_enc_b), dim=-1)
|
||||
return pos_enc
|
||||
|
||||
|
||||
def forward(self, x, t, inputs):
|
||||
t = t.unsqueeze(-1).type(torch.float)
|
||||
t = self.pos_encoding(t, self.time_dim)
|
||||
|
||||
x = torch.cat((x, t, inputs), dim=-1)
|
||||
|
||||
for layer in self.layers[:-1]:
|
||||
x = layer(x)
|
||||
if not isinstance(layer, nn.ReLU):
|
||||
x = torch.cat((x, t, inputs), dim=-1)
|
||||
|
||||
x = self.layers[-1](x)
|
||||
return x
|
||||
|
||||
class SimpleDiffusionModel(DiffusionModel):
|
||||
def __init__(self, input_size: int, hidden_sizes: list, other_inputs_dim: int, time_dim: int = 64):
|
||||
super(SimpleDiffusionModel, self).__init__(time_dim)
|
||||
|
||||
self.other_inputs_dim = other_inputs_dim
|
||||
|
||||
self.layers.append(nn.Linear(input_size + time_dim + other_inputs_dim, hidden_sizes[0]))
|
||||
self.layers.append(nn.ReLU())
|
||||
|
||||
for i in range(1, len(hidden_sizes)):
|
||||
self.layers.append(nn.Linear(hidden_sizes[i - 1] + time_dim + other_inputs_dim, hidden_sizes[i]))
|
||||
self.layers.append(nn.ReLU())
|
||||
|
||||
self.layers.append(nn.Linear(hidden_sizes[-1] + time_dim + other_inputs_dim, input_size))
|
||||
2160
src/notebooks/diffusion-training.ipynb
Normal file
2160
src/notebooks/diffusion-training.ipynb
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -2,6 +2,7 @@ import pandas as pd
|
||||
import numpy as np
|
||||
import itertools
|
||||
from tqdm import tqdm
|
||||
import torch
|
||||
|
||||
imbalance_prices = "data/imbalance_prices.csv"
|
||||
|
||||
@@ -126,98 +127,65 @@ class BaselinePolicy():
|
||||
|
||||
return df, df_test
|
||||
|
||||
def get_optimal_thresholds(self, imbalance_prices, charge_thresholds, discharge_thresholds, tqdm=True):
|
||||
df = None
|
||||
|
||||
def get_optimal_thresholds(self, imbalance_prices, charge_thresholds, discharge_thresholds):
|
||||
threshold_pairs = itertools.product(charge_thresholds, discharge_thresholds)
|
||||
threshold_pairs = filter(lambda x: x[0] < x[1], threshold_pairs)
|
||||
|
||||
# disable tqdm if necessary
|
||||
if tqdm:
|
||||
threshold_pairs = tqdm(threshold_pairs)
|
||||
threshold_pairs = list(threshold_pairs)
|
||||
|
||||
for charge_threshold, discharge_threshold in threshold_pairs:
|
||||
self.battery.reset()
|
||||
total_charging_cost = 0
|
||||
total_discharging_profit = 0
|
||||
number_of_charges = 0
|
||||
number_of_discharges = 0
|
||||
mean_charging_price = 0
|
||||
mean_discharging_price = 0
|
||||
charge_thresholds = torch.tensor([x[0] for x in threshold_pairs])
|
||||
discharge_thresholds = torch.tensor([x[1] for x in threshold_pairs])
|
||||
|
||||
for index, price in enumerate(imbalance_prices):
|
||||
if price < charge_threshold:
|
||||
total_charging_cost += self.battery.charge() * price
|
||||
mean_charging_price += price
|
||||
number_of_charges += 1
|
||||
elif price > discharge_threshold:
|
||||
total_discharging_profit += self.battery.discharge() * price
|
||||
mean_discharging_price += price
|
||||
number_of_discharges += 1
|
||||
next_day_charge_thresholds, next_day_discharge_thresholds = [], []
|
||||
|
||||
if number_of_charges == 0:
|
||||
mean_charging_price = 0
|
||||
else:
|
||||
mean_charging_price /= number_of_charges
|
||||
for ip in imbalance_prices:
|
||||
new_imbalance_prices = ip.repeat(len(charge_thresholds), 1)
|
||||
|
||||
if number_of_discharges == 0:
|
||||
mean_discharging_price = 0
|
||||
else:
|
||||
mean_discharging_price /= number_of_discharges
|
||||
profits, charge_cycles = self.simulate(new_imbalance_prices, charge_thresholds, discharge_thresholds)
|
||||
|
||||
new_df = pd.DataFrame([[charge_threshold, discharge_threshold, total_charging_cost, total_discharging_profit, total_discharging_profit - total_charging_cost, self.battery.charge_cycles, mean_charging_price, mean_discharging_price]], columns=["Charge threshold", "Discharge threshold", "Charging Cost", "Discharging Profit", "Total Profit", "Charge cycles", "Mean charging price", "Mean discharging price"])
|
||||
if df is None:
|
||||
df = new_df
|
||||
else:
|
||||
df = pd.concat([df, new_df])
|
||||
sorted_profits, sorted_indices = torch.sort(profits, descending=True)
|
||||
|
||||
df = df.sort_values(by=['Total Profit'], ascending=False)
|
||||
# Reorder other tensors based on sorted indices
|
||||
sorted_charge_thresholds = charge_thresholds[sorted_indices]
|
||||
sorted_discharge_thresholds = discharge_thresholds[sorted_indices]
|
||||
|
||||
return df
|
||||
# get the optimal thresholds
|
||||
next_day_charge_threshold = sorted_charge_thresholds[0]
|
||||
next_day_discharge_threshold = sorted_discharge_thresholds[0]
|
||||
|
||||
def simulate(self, imbalance_prices, charge_threshold, discharge_threshold):
|
||||
self.battery.reset()
|
||||
total_charging_cost = 0
|
||||
total_discharging_profit = 0
|
||||
number_of_charges = 0
|
||||
number_of_discharges = 0
|
||||
mean_charging_price = 0
|
||||
mean_discharging_price = 0
|
||||
next_day_charge_thresholds.append(next_day_charge_threshold)
|
||||
next_day_discharge_thresholds.append(next_day_discharge_threshold)
|
||||
|
||||
# for each timestep also print what is happening
|
||||
for i, price in enumerate(imbalance_prices):
|
||||
if price < charge_threshold:
|
||||
charge = self.battery.charge()
|
||||
total_charging_cost += charge * price
|
||||
mean_charging_price += price
|
||||
number_of_charges += 1
|
||||
# print(f"{i}: Charging battery with {charge}MWh at {price}€/MWh")
|
||||
# return float tensors
|
||||
return torch.tensor(next_day_charge_thresholds, dtype=torch.float), torch.tensor(next_day_discharge_thresholds, dtype=torch.float)
|
||||
|
||||
elif price > discharge_threshold:
|
||||
discharge = self.battery.discharge()
|
||||
total_discharging_profit += discharge * price
|
||||
mean_discharging_price += price
|
||||
number_of_discharges += 1
|
||||
# print(f"{i}: Discharging battery with {discharge}MWh at {price}€/MWh")
|
||||
def simulate(self, price_matrix, charge_thresholds: torch.tensor, discharge_thresholds: torch.tensor):
|
||||
batch_size = price_matrix.shape[0]
|
||||
|
||||
if number_of_charges == 0:
|
||||
mean_charging_price = 0
|
||||
else:
|
||||
mean_charging_price /= number_of_charges
|
||||
charge_matrix = torch.zeros(price_matrix.shape)
|
||||
|
||||
if number_of_discharges == 0:
|
||||
mean_discharging_price = 0
|
||||
else:
|
||||
mean_discharging_price /= number_of_discharges
|
||||
charge_thresholds_reshaped = charge_thresholds.repeat(price_matrix.shape[1], 1).T
|
||||
discharge_thresholds_reshaped = discharge_thresholds.repeat(price_matrix.shape[1], 1).T
|
||||
|
||||
# print(f"Total charging cost: {total_charging_cost}")
|
||||
# print(f"Total discharging profit: {total_discharging_profit}")
|
||||
# print(f"Total profit: {total_discharging_profit - total_charging_cost}")
|
||||
# print(f"Charge cycles: {self.battery.charge_cycles}")
|
||||
# print(f"Mean charging price: {mean_charging_price}")
|
||||
# print(f"Mean discharging price: {mean_discharging_price}")
|
||||
charge_matrix[price_matrix < charge_thresholds_reshaped] = 1
|
||||
|
||||
return total_discharging_profit - total_charging_cost, self.battery.charge_cycles
|
||||
charge_matrix[price_matrix > discharge_thresholds_reshaped] = -1
|
||||
|
||||
battery_states = torch.zeros(batch_size)
|
||||
profits = torch.zeros(batch_size)
|
||||
charge_cycles = torch.zeros(batch_size)
|
||||
|
||||
for i in range(price_matrix.shape[1]):
|
||||
discharge_mask = ~((charge_matrix[:, i] == -1) & (battery_states == 0))
|
||||
charge_mask = ~((charge_matrix[:, i] == 1) & (battery_states == self.battery.capacity))
|
||||
|
||||
mask = discharge_mask & charge_mask
|
||||
|
||||
battery_states[mask] += charge_matrix[:, i][mask] * self.battery.power / 4
|
||||
profits[mask] += -charge_matrix[:, i][mask] * price_matrix[:, i][mask] * self.battery.power / 4
|
||||
charge_cycles[mask] += torch.abs(charge_matrix[:, i][mask]) * (self.battery.power / 4) / self.battery.capacity
|
||||
|
||||
return profits, charge_cycles
|
||||
|
||||
|
||||
|
||||
|
||||
207
src/trainers/diffusion_trainer.py
Normal file
207
src/trainers/diffusion_trainer.py
Normal file
@@ -0,0 +1,207 @@
|
||||
from clearml import Task
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torchinfo import summary
|
||||
from tqdm import tqdm
|
||||
from src.data.preprocessing import DataProcessor
|
||||
|
||||
from src.models.diffusion_model import DiffusionModel
|
||||
import numpy as np
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
import matplotlib.patches as mpatches
|
||||
|
||||
class DiffusionTrainer:
|
||||
def __init__(self, model: nn.Module, data_processor: DataProcessor, device: torch.device):
|
||||
self.model = model
|
||||
self.device = device
|
||||
|
||||
self.noise_steps = 1000
|
||||
self.beta_start = 1e-4
|
||||
self.beta_end = 0.02
|
||||
self.ts_length = 96
|
||||
|
||||
self.data_processor = data_processor
|
||||
|
||||
self.beta = torch.linspace(self.beta_start, self.beta_end, self.noise_steps).to(self.device)
|
||||
self.alpha = 1. - self.beta
|
||||
self.alpha_hat = torch.cumprod(self.alpha, dim=0)
|
||||
|
||||
def noise_time_series(self, x: torch.tensor, t: int):
|
||||
""" Add noise to time series
|
||||
Args:
|
||||
x (torch.tensor): shape (batch_size, time_steps)
|
||||
t (int): index of time step
|
||||
"""
|
||||
sqrt_alpha_hat = torch.sqrt(self.alpha_hat[t])[:, None]
|
||||
sqrt_one_minus_alpha_hat = torch.sqrt(1. - self.alpha_hat[t])[:, None]
|
||||
noise = torch.randn_like(x)
|
||||
return sqrt_alpha_hat * x + sqrt_one_minus_alpha_hat * noise, noise
|
||||
|
||||
def sample_timesteps(self, n: int):
|
||||
""" Sample timesteps for noise
|
||||
Args:
|
||||
n (int): number of samples
|
||||
"""
|
||||
return torch.randint(low=1, high=self.noise_steps, size=(n,))
|
||||
|
||||
def sample(self, model: DiffusionModel, n: int, inputs: torch.tensor):
|
||||
inputs = inputs.repeat(n, 1).to(self.device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
x = torch.randn(inputs.shape[0], self.ts_length).to(self.device)
|
||||
for i in tqdm(reversed(range(1, self.noise_steps)), position=0):
|
||||
t = (torch.ones(inputs.shape[0]) * i).long().to(self.device)
|
||||
predicted_noise = model(x, t, inputs)
|
||||
alpha = self.alpha[t][:, None]
|
||||
alpha_hat = self.alpha_hat[t][:, None]
|
||||
beta = self.beta[t][:, None]
|
||||
|
||||
if i > 1:
|
||||
noise = torch.randn_like(x)
|
||||
else:
|
||||
noise = torch.zeros_like(x)
|
||||
|
||||
x = 1/torch.sqrt(alpha) * (x-((1-alpha) / (torch.sqrt(1 - alpha_hat))) * predicted_noise) + torch.sqrt(beta) * noise
|
||||
model.train()
|
||||
return x
|
||||
|
||||
def random_samples(self, train: bool = True, num_samples: int = 10):
|
||||
train_loader, test_loader = self.data_processor.get_dataloaders(
|
||||
predict_sequence_length=96
|
||||
)
|
||||
|
||||
if train:
|
||||
loader = train_loader
|
||||
else:
|
||||
loader = test_loader
|
||||
|
||||
indices = np.random.randint(0, len(loader.dataset) - 1, size=num_samples)
|
||||
return indices
|
||||
|
||||
def init_clearml_task(self, task):
|
||||
task.add_tags(self.model.__class__.__name__)
|
||||
task.add_tags(self.__class__.__name__)
|
||||
|
||||
input_data = torch.randn(1024, 96).to(self.device)
|
||||
time_steps = torch.randn(1024).long().to(self.device)
|
||||
other_input_data = torch.randn(1024, self.model.other_inputs_dim).to(self.device)
|
||||
|
||||
task.set_configuration_object("model", str(summary(self.model, input_data=[input_data, time_steps, other_input_data])))
|
||||
|
||||
self.data_processor = task.connect(self.data_processor, name="data_processor")
|
||||
|
||||
def train(self, epochs: int, learning_rate: float, task: Task = None):
|
||||
optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
|
||||
criterion = nn.MSELoss()
|
||||
self.model.to(self.device)
|
||||
|
||||
if task:
|
||||
self.init_clearml_task(task)
|
||||
|
||||
train_loader, test_loader = self.data_processor.get_dataloaders(
|
||||
predict_sequence_length=self.ts_length
|
||||
)
|
||||
|
||||
train_sample_indices = self.random_samples(train=True, num_samples=10)
|
||||
test_sample_indices = self.random_samples(train=False, num_samples=10)
|
||||
|
||||
for epoch in range(epochs):
|
||||
running_loss = 0.0
|
||||
for i, k in enumerate(train_loader):
|
||||
time_series, base_pattern = k[1], k[0]
|
||||
time_series = time_series.to(self.device)
|
||||
base_pattern = base_pattern.to(self.device)
|
||||
|
||||
t = self.sample_timesteps(time_series.shape[0]).to(self.device)
|
||||
x_t, noise = self.noise_time_series(time_series, t)
|
||||
predicted_noise = self.model(x_t, t, base_pattern)
|
||||
loss = criterion(predicted_noise, noise)
|
||||
|
||||
running_loss += loss.item()
|
||||
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
running_loss /= len(train_loader.dataset)
|
||||
|
||||
if task:
|
||||
task.get_logger().report_scalar(
|
||||
title=criterion.__class__.__name__,
|
||||
series='train',
|
||||
iteration=epoch,
|
||||
value=loss.item(),
|
||||
)
|
||||
|
||||
if epoch % 100 == 0 and epoch != 0:
|
||||
self.debug_plots(task, True, train_loader, train_sample_indices, epoch)
|
||||
self.debug_plots(task, False, test_loader, test_sample_indices, epoch)
|
||||
|
||||
if task:
|
||||
task.close()
|
||||
|
||||
|
||||
def debug_plots(self, task, training: bool, data_loader, sample_indices, epoch):
|
||||
for i, idx in enumerate(sample_indices):
|
||||
features, target, _ = data_loader.dataset[idx]
|
||||
|
||||
features = features.to(self.device)
|
||||
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
samples = self.sample(self.model, 100, features).cpu().numpy()
|
||||
|
||||
ci_99_upper = np.quantile(samples, 0.99, axis=0)
|
||||
ci_99_lower = np.quantile(samples, 0.01, axis=0)
|
||||
|
||||
ci_95_upper = np.quantile(samples, 0.95, axis=0)
|
||||
ci_95_lower = np.quantile(samples, 0.05, axis=0)
|
||||
|
||||
ci_90_upper = np.quantile(samples, 0.9, axis=0)
|
||||
ci_90_lower = np.quantile(samples, 0.1, axis=0)
|
||||
|
||||
ci_50_upper = np.quantile(samples, 0.5, axis=0)
|
||||
ci_50_lower = np.quantile(samples, 0.5, axis=0)
|
||||
|
||||
sns.set_theme()
|
||||
time_steps = np.arange(0, 96)
|
||||
|
||||
fig, ax = plt.subplots(figsize=(20, 10))
|
||||
ax.plot(time_steps, samples.mean(axis=0), label="Mean of NRV samples", linewidth=3)
|
||||
# ax.fill_between(time_steps, ci_lower, ci_upper, color='b', alpha=0.2, label='Full Interval')
|
||||
|
||||
ax.fill_between(time_steps, ci_99_lower, ci_99_upper, color='b', alpha=0.2, label='99% Interval')
|
||||
ax.fill_between(time_steps, ci_95_lower, ci_95_upper, color='b', alpha=0.2, label='95% Interval')
|
||||
ax.fill_between(time_steps, ci_90_lower, ci_90_upper, color='b', alpha=0.2, label='90% Interval')
|
||||
ax.fill_between(time_steps, ci_50_lower, ci_50_upper, color='b', alpha=0.2, label='50% Interval')
|
||||
|
||||
ax.plot(target, label="Real NRV", linewidth=3)
|
||||
# full_interval_patch = mpatches.Patch(color='b', alpha=0.2, label='Full Interval')
|
||||
ci_99_patch = mpatches.Patch(color='b', alpha=0.3, label='99% Interval')
|
||||
ci_95_patch = mpatches.Patch(color='b', alpha=0.4, label='95% Interval')
|
||||
ci_90_patch = mpatches.Patch(color='b', alpha=0.5, label='90% Interval')
|
||||
ci_50_patch = mpatches.Patch(color='b', alpha=0.6, label='50% Interval')
|
||||
|
||||
|
||||
ax.legend(handles=[ci_99_patch, ci_95_patch, ci_90_patch, ci_50_patch, ax.lines[0], ax.lines[1]])
|
||||
|
||||
task.get_logger().report_matplotlib_figure(
|
||||
title="Training" if training else "Testing",
|
||||
series=f'Sample {i}',
|
||||
iteration=epoch,
|
||||
figure=fig,
|
||||
)
|
||||
|
||||
plt.close()
|
||||
|
||||
def test(self, data_loader: torch.utils.data.DataLoader):
|
||||
for inputs, targets, _ in data_loader:
|
||||
inputs, targets = inputs.to(self.device), targets.to(self.device)
|
||||
|
||||
sample = self.sample(self.model, 10, inputs)
|
||||
|
||||
# reduce sample from (batch_size, time_steps) to (batch_size / 10, time_steps) by taking mean of each 10 samples
|
||||
sample = sample.view(-1, 10, self.ts_length)
|
||||
sample = torch.mean(sample, dim=1)
|
||||
@@ -48,17 +48,30 @@ class ImbalancePriceCalculator:
|
||||
return datetime in self.bid_ladder.index
|
||||
|
||||
|
||||
def get_imbalance_price(self, datetime, volume):
|
||||
if volume < 0:
|
||||
volume = -volume
|
||||
bid_ladder = self.bid_ladder.loc[self.bid_ladder.index == datetime]["bid_ladder_dec"].values[0]
|
||||
else:
|
||||
bid_ladder = self.bid_ladder.loc[self.bid_ladder.index == datetime]["bid_ladder_inc"].values[0]
|
||||
def get_imbalance_price_vectorized(self, datetimes, volumes):
|
||||
# Convert inputs to numpy arrays
|
||||
datetimes = np.array(datetimes)
|
||||
|
||||
for bid in bid_ladder:
|
||||
if bid[0] > volume:
|
||||
return bid[1]
|
||||
return bid_ladder[-1][1]
|
||||
|
||||
# Fetch bid ladders for each datetime and determine if it's inc or dec based on original volume sign
|
||||
bid_ladders = np.where(np.array(volumes) < 0,
|
||||
self.bid_ladder.loc[datetimes, "bid_ladder_dec"].values,
|
||||
self.bid_ladder.loc[datetimes, "bid_ladder_inc"].values)
|
||||
|
||||
volumes = np.abs(np.array(volumes)) # Make all volumes positive
|
||||
|
||||
# Vectorized operation to get prices from bid ladders
|
||||
prices = []
|
||||
for i, bid_ladder in enumerate(bid_ladders):
|
||||
volume = volumes[i]
|
||||
for bid in bid_ladder:
|
||||
if bid[0] > volume:
|
||||
prices.append(bid[1])
|
||||
break
|
||||
else:
|
||||
prices.append(bid_ladder[-1][1]) # Append last price if no break occurred
|
||||
|
||||
return np.array(prices)
|
||||
|
||||
|
||||
def plot(self, datetime):
|
||||
@@ -88,58 +101,70 @@ class ImbalancePriceCalculator:
|
||||
|
||||
fig.show()
|
||||
|
||||
def get_imbalance_prices_2023_for_date(self, date, NRV_predictions):
|
||||
imbalance_prices = []
|
||||
for i in range(1, len(NRV_predictions)):
|
||||
datetime = date + pd.Timedelta(hours=i-1)
|
||||
datetime = pytz.utc.localize(datetime)
|
||||
imbalance_prices.append(self.get_imbalance_price_2023(datetime, NRV_predictions[i-1], NRV_predictions[i]))
|
||||
return [x[1] for x in imbalance_prices]
|
||||
def get_imbalance_prices_2023_for_date_vectorized(self, date, NRV_predictions_matrix):
|
||||
# Initialize an empty list for the 2D results
|
||||
all_imbalance_prices = []
|
||||
|
||||
def get_imbalance_price_2023(self, datetime, NRV_PREV, NRV):
|
||||
MIP = self.get_imbalance_price(datetime, abs(NRV))
|
||||
MDP = self.get_imbalance_price(datetime, -abs(NRV))
|
||||
# Iterate over each row in the NRV_predictions matrix
|
||||
for NRV_predictions in NRV_predictions_matrix:
|
||||
# Create a range of datetimes for the entire day
|
||||
hours = pd.TimedeltaIndex(range(len(NRV_predictions) - 1), 'H')
|
||||
datetimes = date + hours
|
||||
datetimes = datetimes.tz_localize(pytz.utc)
|
||||
|
||||
return calculate_imbalance_price_2023(-NRV_PREV, -NRV, MIP, MDP)
|
||||
# Apply the vectorized imbalance price function to each row
|
||||
imbalance_prices = self.get_imbalance_price_2023(datetimes, NRV_predictions[:-1], NRV_predictions[1:])
|
||||
|
||||
# Extract the imbalance prices and add to the 2D list
|
||||
all_imbalance_prices.append(imbalance_prices[1])
|
||||
|
||||
return all_imbalance_prices
|
||||
|
||||
def get_imbalance_price_2023(self, datetimes, NRV_PREVS, NRVS):
|
||||
# create numpy arrays
|
||||
datetimes = np.array(datetimes)
|
||||
NRV_PREVS = np.array(NRV_PREVS)
|
||||
NRVS = np.array(NRVS)
|
||||
|
||||
MIPS = self.get_imbalance_price_vectorized(datetimes, np.abs(NRVS))
|
||||
MDPS = self.get_imbalance_price_vectorized(datetimes, -np.abs(NRVS))
|
||||
|
||||
return calculate_imbalance_price(-NRV_PREVS, -NRVS, MIPS, MDPS)
|
||||
|
||||
def calculate_imbalance_price(SI_PREV, SI, MIP, MDP):
|
||||
# Convert parameters to numpy arrays for vectorized operations
|
||||
SI_PREV = np.array(SI_PREV)
|
||||
SI = np.array(SI)
|
||||
MIP = np.array(MIP)
|
||||
MDP = np.array(MDP)
|
||||
|
||||
# parameters a, b, c, d
|
||||
a = 0 # EUR/MWh
|
||||
b = 200 # EUR/MWh
|
||||
c = 450 # MW
|
||||
d = 65 # MW
|
||||
|
||||
# SI = -NRV
|
||||
def calculate_imbalance_price_2023(SI_PREV: float, SI: float, MIP: float, MDP: float):
|
||||
# parameters a, b, c, d, x
|
||||
a = 0 # EUR/MWh
|
||||
b = 200 # EUR/MWh
|
||||
c = 450 # MW
|
||||
d = 65 # MW
|
||||
# x = average abs(SI) and abs(SI_PREV)
|
||||
x = (abs(SI_PREV) + abs(SI)) / 2
|
||||
x = (np.abs(SI_PREV) + np.abs(SI)) / 2
|
||||
|
||||
cp = 0
|
||||
if SI <= 0:
|
||||
if MIP > 200 and MIP <= 400:
|
||||
cp = (400-MIP)/200
|
||||
else:
|
||||
cp = 1
|
||||
else:
|
||||
if MDP >= 0:
|
||||
cp = 1
|
||||
elif MDP >= -200 and MDP < 0:
|
||||
cp = (MDP+200)/200
|
||||
# Calculate cp
|
||||
cp = np.zeros_like(SI)
|
||||
cp[(SI <= 0) & ((MIP > 200) & (MIP <= 400))] = (400 - MIP[(SI <= 0) & ((MIP > 200) & (MIP <= 400))]) / 200
|
||||
cp[(SI <= 0) & ~((MIP > 200) & (MIP <= 400))] = 1
|
||||
cp[(SI > 0) & (MDP >= 0)] = 1
|
||||
cp[(SI > 0) & (MDP >= -200) & (MDP < 0)] = (MDP[(SI > 0) & (MDP >= -200) & (MDP < 0)] + 200) / 200
|
||||
|
||||
# Calculate alpha
|
||||
alpha = np.zeros_like(SI)
|
||||
alpha[np.abs(SI) > 150] = (a + b / (1 + np.exp((c - x[np.abs(SI) > 150]) / d))) * cp[np.abs(SI) > 150]
|
||||
|
||||
alpha = 0
|
||||
if abs(SI) > 150:
|
||||
alpha = (a + b/(1+np.exp((c-x)/d))) * cp
|
||||
|
||||
# Calculate imbalance prices
|
||||
pos_imbalance_price = MIP + alpha
|
||||
neg_imbalance_price = MDP - alpha
|
||||
|
||||
# imbalance_price based on SI
|
||||
imbalance_price = 0
|
||||
if SI < 0:
|
||||
imbalance_price = pos_imbalance_price
|
||||
elif SI > 0:
|
||||
imbalance_price = neg_imbalance_price
|
||||
else:
|
||||
imbalance_price = (pos_imbalance_price + neg_imbalance_price) / 2
|
||||
imbalance_price = np.zeros_like(SI)
|
||||
imbalance_price[SI < 0] = pos_imbalance_price[SI < 0]
|
||||
imbalance_price[SI > 0] = neg_imbalance_price[SI > 0]
|
||||
imbalance_price[SI == 0] = (pos_imbalance_price[SI == 0] + neg_imbalance_price[SI == 0]) / 2
|
||||
|
||||
# return alpha, imbalance price
|
||||
return alpha, imbalance_price
|
||||
Reference in New Issue
Block a user