other changes

This commit is contained in:
Victor Mylle
2024-01-15 12:31:56 +00:00
parent 5f2418a205
commit 67cc6d4bb9
7 changed files with 855 additions and 482 deletions

View File

@@ -9,4 +9,3 @@ Overconfidence of quantiles always but just show that the ondergrens is too high
Determine thresholds for each sample and take mean then. Determine thresholds for each sample and take mean then.
Showing CDF of the quantiles -> retransform to MWh Showing CDF of the quantiles -> retransform to MWh

View File

@@ -117,7 +117,32 @@ Test data: 01-01-2023 until 08-102023
| Policy | Threshold Step Size (€) | Total Profit (€) | Charge Cycles | | Policy | Threshold Step Size (€) | Total Profit (€) | Charge Cycles |
|--------|---------------------|--------------|---------------| |--------|---------------------|--------------|---------------|
| Baseline (charge: 150, discharge: 175) | 25 | 251202.59 | 725 | | Baseline (charge: 150, discharge: 175) | 25 | 251202.59 |  725 |
| Policy using predicted NRV (mean reconstructed imbalance price) | 50 | 325362.81 | 856 | | Baseline (yesterday imbalance price) | 25 | 342980.0938 | 903 |
| Policy using predicted NRV (mean reconstructed imbalance price) | 25 | 334058.65 | 862 | | GRU Predicted NRV (mean reconstructed imbalance price) | 50 | 325362.81 |  856 |
| Policy using predicted NRV (mean thresholds) | 25 | 339846.9062 | 842 | | GRU Predicted NRV (mean reconstructed imbalance price) | 25 | 334058.65 | 862 |
| GRU Predicted NRV (mean thresholds) | 25 | 339846.9062 | 842 |
| Diffusion Predicted NRV (mean thresholds) | 25 | 338168.0312 | 886 |
# Penalty parameter for charge cycles
| Policy | Penalty parameter | Total Profit (€) | Charge Cycles |
|--------|---------------------|--------------|-----------------|
| Baseline (yesterday imbalance price) | 10 | 335968.9062 | 895.4375 |
| Baseline (yesterday imbalance price) | 20 | 328577.2812 | 886.5625 |
| Baseline (yesterday imbalance price) | 50 | 306301.9688 | 850.9375 |
| Baseline (yesterday imbalance price) | 150 | 251367.0469 | 749.7500 |
| Diffusion Predicted NRV | 10 | 331288.7812 | 880.3750 |
| Diffusion Predicted NRV | 20 | 324568.7812 | 871.5000 |
| Diffusion Predicted NRV | 50 | 304761.1875 | 837.3125 |
| Diffusion Predicted NRV | 150 | 238441.5625 | 670.1875 |
# TODO
- [ ] Baseline
- [ ] Profit penalty parameter als over charge cycles voor een dag -> parameter bepalen op training data (convex probleem) (< 400 charge cycles per jaar) (over een dag kijken hoeveel charge cycles -> profit - penalty * charge cycles erover, (misschien belonen als eronder charge cycles))
- [ ] Meer verschil bekijken tussen GRU en diffusion
- [ ] Andere lagen voor diffusion model (GRU, kijken naar TSDiff)
- [ ] Policies met andere modellen (Linear, Non Linear)

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,53 @@
import argparse
from clearml import Task, Model
from src.data import DataProcessor, DataConfig
import torch
# argparse to parse task id and model type
parser = argparse.ArgumentParser()
parser.add_argument('--task_id', type=int, default=None)
parser.add_argument('--model_type', type=str, default=None)
args = parser.parse_args()
assert args.task_id is not None, "Please specify task id"
assert args.model_type is not None, "Please specify model type"
def load_model(task_id: str):
"""
Load model from task id
"""
task = Task.get_task(task_id=task_id)
configuration = task.get_parameters_as_dict()
data_features = configuration['data_features']
### Data Config ###
data_config = DataConfig()
for key, value in data_features.items():
setattr(data_config, key, bool(value))
data_config.PV_FORECAST = False
data_config.PV_HISTORY = False
data_config.QUARTER = False
data_config.DAY_OF_WEEK = False
### Data Processor ###
data_processor = DataProcessor(data_config, path="../../", lstm=False)
data_processor.set_batch_size(8192)
data_processor.set_full_day_skip(True)
### Model ###
output_model_id = task.output_models_id["checkpoint"]
clearml_model = Model(model_id=output_model_id)
filename = clearml_model.get_weights()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = torch.load(filename)
model.to(device)
model.eval()
_, test_loader = data_processor.get_dataloaders(
predict_sequence_length=96
)
return configuration, model, test_loader

View File

@@ -11,21 +11,6 @@
"text": [ "text": [
"InsecureRequestWarning: Certificate verification is disabled! Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings\n" "InsecureRequestWarning: Certificate verification is disabled! Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings\n"
] ]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Index(['datetime', 'nrv', 'load_forecast', 'total_load', 'wind_forecast',\n",
" 'wind_history', 'nominal_net_position', 'quarter', 'day_of_week'],\n",
" dtype='object')\n",
"Index(['datetime', 'nrv', 'load_forecast', 'total_load', 'wind_forecast',\n",
" 'wind_history', 'nominal_net_position', 'quarter', 'day_of_week'],\n",
" dtype='object')\n",
"Index(['datetime', 'nrv', 'load_forecast', 'total_load', 'wind_forecast',\n",
" 'wind_history', 'nominal_net_position', 'quarter', 'day_of_week'],\n",
" dtype='object')\n"
]
} }
], ],
"source": [ "source": [
@@ -84,19 +69,13 @@
"\n", "\n",
"model_parameters = configuration['model_parameters']\n", "model_parameters = configuration['model_parameters']\n",
"\n", "\n",
"# time_embedding = TimeEmbedding(data_processor.get_time_feature_size(), int(model_parameters[\"time_feature_embedding\"]))\n",
"# lstm_model = GRUModel(time_embedding.output_dim(inputDim), len(quantiles), hidden_size=int(model_parameters[\"hidden_size\"]), num_layers=int(model_parameters[\"num_layers\"]), dropout=float(model_parameters[\"dropout\"]))\n",
"# model = nn.Sequential(time_embedding, lstm_model)\n",
"\n",
"model = SimpleDiffusionModel(96, list(map(int, model_parameters[\"hidden_sizes\"].strip('[]').split(','))), other_inputs_dim=int(inputDim[1]), time_dim=int(model_parameters[\"time_dim\"]))\n",
"\n",
"output_model_id = task.output_models_id[\"checkpoint\"]\n", "output_model_id = task.output_models_id[\"checkpoint\"]\n",
"clearml_model = Model(model_id=output_model_id)\n", "clearml_model = Model(model_id=output_model_id)\n",
"filename = clearml_model.get_weights()\n", "filename = clearml_model.get_weights()\n",
"\n", "\n",
"model.load_state_dict(torch.load(filename))\n",
"\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
"model = torch.load(\"model.pt\")\n",
"\n",
"model.to(device)\n", "model.to(device)\n",
"model.eval()\n", "model.eval()\n",
"\n", "\n",
@@ -232,7 +211,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@@ -512,7 +491,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 6,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@@ -562,7 +541,18 @@
" return reconstructed_profit, real_profit\n", " return reconstructed_profit, real_profit\n",
"\n", "\n",
"def get_next_day_profits_for_date(date, global_charging_threshold: float = None, global_discharging_threshold:float = None, print_results=True):\n", "def get_next_day_profits_for_date(date, global_charging_threshold: float = None, global_discharging_threshold:float = None, print_results=True):\n",
" charge_thresholds = np.arange(-100, 250, 25)\n",
" discharge_thresholds = np.arange(-100, 250, 25)\n",
"\n",
" \n", " \n",
" # yesterday_imbalance_prices = get_imbalance_prices(date.date() - datetime.timedelta(days=1))\n",
" # real_imbalance_prices = get_imbalance_prices(date.date())\n",
"\n",
" # yesterday_charge_thresholds, yesterday_discharge_thresholds = baseline_policy.get_optimal_thresholds(torch.tensor([yesterday_imbalance_prices]*1000), charge_thresholds, discharge_thresholds)\n",
" # yesterday_profit, yesterday_charge_cycles = baseline_policy.simulate(torch.tensor([[real_imbalance_prices]]), torch.tensor([yesterday_charge_thresholds.mean(axis=0)]), torch.tensor([yesterday_discharge_thresholds.mean(axis=0)]))\n",
"\n",
" # return (0, 0), (yesterday_profit, yesterday_charge_cycles)\n",
"\n",
" # start_time = time.time()\n", " # start_time = time.time()\n",
" initial, nrvs, target = get_predicted_NRV(date)\n", " initial, nrvs, target = get_predicted_NRV(date)\n",
" # print(f\"Time to get predicted NRV: {time.time() - start_time} seconds\")\n", " # print(f\"Time to get predicted NRV: {time.time() - start_time} seconds\")\n",
@@ -590,6 +580,7 @@
" # determine day ahead thresholds\n", " # determine day ahead thresholds\n",
" charge_thresholds, discharge_thresholds = baseline_policy.get_optimal_thresholds(reconstructed_imbalance_prices, charge_thresholds, discharge_thresholds)\n", " charge_thresholds, discharge_thresholds = baseline_policy.get_optimal_thresholds(reconstructed_imbalance_prices, charge_thresholds, discharge_thresholds)\n",
"\n", "\n",
" \n",
" # print(f\"Time to determine optimal thresholds: {time.time() - start_time} seconds\")\n", " # print(f\"Time to determine optimal thresholds: {time.time() - start_time} seconds\")\n",
"\n", "\n",
" # get the optimal thresholds\n", " # get the optimal thresholds\n",
@@ -597,9 +588,16 @@
" next_day_discharge_threshold = discharge_thresholds.mean(axis=0)\n", " next_day_discharge_threshold = discharge_thresholds.mean(axis=0)\n",
"\n", "\n",
" # get the profit for the day ahead thresholds on the real imbalance prices\n", " # get the profit for the day ahead thresholds on the real imbalance prices\n",
" next_day_profit, next_day_charge_cycles = baseline_policy.simulate(torch.tensor([real_imbalance_prices]), torch.tensor([next_day_charge_threshold]), torch.tensor([next_day_discharge_threshold]))\n", " next_day_profit, next_day_charge_cycles = baseline_policy.simulate(torch.tensor([[real_imbalance_prices]]), torch.tensor([next_day_charge_threshold]), torch.tensor([next_day_discharge_threshold]))\n",
" \n",
" yesterday_imbalance_prices = get_imbalance_prices(date.date() - datetime.timedelta(days=1))\n",
" yesterday_charge_thresholds, yesterday_discharge_thresholds = baseline_policy.get_optimal_thresholds(torch.tensor([yesterday_imbalance_prices]), charge_thresholds, discharge_thresholds)\n",
" yesterday_profit, yesterday_charge_cycles = baseline_policy.simulate(torch.tensor([[real_imbalance_prices]]), torch.tensor([yesterday_charge_thresholds.mean(axis=0)]), torch.tensor([yesterday_discharge_thresholds.mean(axis=0)]))\n",
"\n",
" return (next_day_profit, next_day_charge_cycles), (yesterday_profit, yesterday_charge_cycles)\n",
"\n",
" if global_charging_threshold is not None and global_discharging_threshold is not None:\n", " if global_charging_threshold is not None and global_discharging_threshold is not None:\n",
" global_profit, global_charge_cycles = baseline_policy.simulate(torch.tensor([real_imbalance_prices]), torch.tensor([global_charging_threshold]), torch.tensor([global_discharging_threshold]))\n", " global_profit, global_charge_cycles = baseline_policy.simulate(torch.tensor([[real_imbalance_prices]]), torch.tensor([global_charging_threshold]), torch.tensor([global_discharging_threshold]))\n",
" else:\n", " else:\n",
" return next_day_profit, next_day_charge_cycles\n", " return next_day_profit, next_day_charge_cycles\n",
" return (next_day_profit, next_day_charge_cycles), (global_profit, global_charge_cycles)\n", " return (next_day_profit, next_day_charge_cycles), (global_profit, global_charge_cycles)\n",
@@ -626,6 +624,7 @@
" global_total_profit += global_profit\n", " global_total_profit += global_profit\n",
" global_total_charge_cycles += global_charge_cycles\n", " global_total_charge_cycles += global_charge_cycles\n",
" except Exception as e:\n", " except Exception as e:\n",
" raise e\n",
" print(f\"Error for date {date}\")\n", " print(f\"Error for date {date}\")\n",
" continue\n", " continue\n",
"\n", "\n",
@@ -634,7 +633,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 5, "execution_count": 7,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@@ -644,246 +643,28 @@
" 0%| | 0/346 [00:00<?, ?it/s]" " 0%| | 0/346 [00:00<?, ?it/s]"
] ]
}, },
{
"name": "stderr",
"output_type": "stream",
"text": [
"/tmp/ipykernel_453602/2967650124.py:57: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at /opt/conda/conda-bld/pytorch_1682343967769/work/torch/csrc/utils/tensor_new.cpp:245.)\n",
" reconstructed_imbalance_prices = torch.tensor(reconstructed_imbalance_prices)\n",
" 27%|██▋ | 94/346 [03:17<08:49, 2.10s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-04-05 00:00:00\n",
"Error for date 2023-04-06 00:00:00\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 49%|████▉ | 170/346 [05:51<06:07, 2.09s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-06-20 00:00:00\n",
"Error for date 2023-06-21 00:00:00\n",
"Error for date 2023-06-22 00:00:00\n",
"Error for date 2023-06-23 00:00:00\n",
"Error for date 2023-06-24 00:00:00\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 64%|██████▎ | 220/346 [07:26<04:23, 2.09s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-08-09 00:00:00\n",
"Error for date 2023-08-10 00:00:00\n",
"Error for date 2023-08-11 00:00:00\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 65%|██████▍ | 224/346 [07:28<02:14, 1.10s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-08-13 00:00:00\n",
"Error for date 2023-08-14 00:00:00\n",
"Error for date 2023-08-15 00:00:00\n",
"Error for date 2023-08-16 00:00:00\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 71%|███████ | 246/346 [08:06<03:28, 2.08s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-09-04 00:00:00\n",
"Error for date 2023-09-05 00:00:00\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 73%|███████▎ | 252/346 [08:14<02:42, 1.73s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-09-10 00:00:00\n",
"Error for date 2023-09-11 00:00:00\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 77%|███████▋ | 266/346 [08:39<02:44, 2.05s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-09-24 00:00:00\n",
"Error for date 2023-09-25 00:00:00\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 78%|███████▊ | 270/346 [08:43<01:51, 1.46s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-09-28 00:00:00\n",
"Error for date 2023-09-29 00:00:00\n",
"Error for date 2023-09-30 00:00:00\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 81%|████████ | 280/346 [08:58<02:00, 1.83s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-10-08 00:00:00\n",
"Error for date 2023-10-09 00:00:00\n",
"Error for date 2023-10-10 00:00:00\n",
"Error for date 2023-10-11 00:00:00\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 94%|█████████▎| 324/346 [09:02<00:02, 10.02it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-10-14 00:00:00\n",
"Error for date 2023-10-15 00:00:00\n",
"Error for date 2023-10-16 00:00:00\n",
"Error for date 2023-10-17 00:00:00\n",
"Error for date 2023-10-18 00:00:00\n",
"Error for date 2023-10-19 00:00:00\n",
"Error for date 2023-10-20 00:00:00\n",
"Error for date 2023-10-21 00:00:00\n",
"Error for date 2023-10-22 00:00:00\n",
"Error for date 2023-10-23 00:00:00\n",
"Error for date 2023-10-24 00:00:00\n",
"Error for date 2023-10-25 00:00:00\n",
"Error for date 2023-10-26 00:00:00\n",
"Error for date 2023-10-27 00:00:00\n",
"Error for date 2023-10-28 00:00:00\n",
"Error for date 2023-10-29 00:00:00\n",
"Error for date 2023-10-30 00:00:00\n",
"Error for date 2023-10-31 00:00:00\n",
"Error for date 2023-11-01 00:00:00\n",
"Error for date 2023-11-02 00:00:00\n",
"Error for date 2023-11-03 00:00:00\n",
"Error for date 2023-11-04 00:00:00\n",
"Error for date 2023-11-05 00:00:00\n",
"Error for date 2023-11-06 00:00:00\n",
"Error for date 2023-11-07 00:00:00\n",
"Error for date 2023-11-08 00:00:00\n",
"Error for date 2023-11-09 00:00:00\n",
"Error for date 2023-11-10 00:00:00\n",
"Error for date 2023-11-11 00:00:00\n",
"Error for date 2023-11-12 00:00:00\n",
"Error for date 2023-11-13 00:00:00\n",
"Error for date 2023-11-14 00:00:00\n",
"Error for date 2023-11-15 00:00:00\n",
"Error for date 2023-11-16 00:00:00\n",
"Error for date 2023-11-17 00:00:00\n",
"Error for date 2023-11-18 00:00:00\n",
"Error for date 2023-11-19 00:00:00\n",
"Error for date 2023-11-20 00:00:00\n",
"Error for date 2023-11-21 00:00:00\n",
"Error for date 2023-11-22 00:00:00\n",
"Error for date 2023-11-23 00:00:00\n",
"Error for date 2023-11-24 00:00:00\n",
"Error for date 2023-11-25 00:00:00\n",
"Error for date 2023-11-26 00:00:00\n",
"Error for date 2023-11-27 00:00:00\n",
"Error for date 2023-11-28 00:00:00\n",
"Error for date 2023-11-29 00:00:00\n",
"Error for date 2023-11-30 00:00:00\n",
"Error for date 2023-12-01 00:00:00\n",
"Error for date 2023-12-02 00:00:00\n",
"Error for date 2023-12-03 00:00:00\n",
"Error for date 2023-12-04 00:00:00\n",
"Error for date 2023-12-05 00:00:00\n",
"Error for date 2023-12-06 00:00:00\n",
"Error for date 2023-12-07 00:00:00\n",
"Error for date 2023-12-08 00:00:00\n",
"Error for date 2023-12-09 00:00:00\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 346/346 [09:02<00:00, 1.57s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error for date 2023-12-10 00:00:00\n",
"Error for date 2023-12-11 00:00:00\n",
"Error for date 2023-12-12 00:00:00\n",
"Next day profit: (tensor([339174.7500]), tensor([1780.]))\n",
"Global profit: (tensor([251202.5781]), tensor([1448.2500]))\n"
]
},
{ {
"name": "stderr", "name": "stderr",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"\n" "\n"
] ]
},
{
"ename": "AttributeError",
"evalue": "'collections.OrderedDict' object has no attribute 'eval'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[7], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m next_day_profit, global_profit \u001b[38;5;241m=\u001b[39m \u001b[43mnext_day_test_set\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mNext day profit: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnext_day_profit\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mGlobal profit: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mglobal_profit\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n",
"Cell \u001b[0;32mIn[6], line 129\u001b[0m, in \u001b[0;36mnext_day_test_set\u001b[0;34m()\u001b[0m\n\u001b[1;32m 127\u001b[0m global_total_charge_cycles \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m global_charge_cycles\n\u001b[1;32m 128\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m--> 129\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 130\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError for date \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdate\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 131\u001b[0m \u001b[38;5;28;01mcontinue\u001b[39;00m\n",
"Cell \u001b[0;32mIn[6], line 122\u001b[0m, in \u001b[0;36mnext_day_test_set\u001b[0;34m()\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m date \u001b[38;5;129;01min\u001b[39;00m tqdm(dates):\n\u001b[1;32m 121\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 122\u001b[0m (next_day_profit, charge_cycles), (global_profit, global_charge_cycles) \u001b[38;5;241m=\u001b[39m \u001b[43mget_next_day_profits_for_date\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdate\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m150\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m175\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprint_results\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m 123\u001b[0m next_day_total_profit \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m next_day_profit\n\u001b[1;32m 124\u001b[0m next_day_total_charge_cycles \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m charge_cycles\n",
"Cell \u001b[0;32mIn[6], line 59\u001b[0m, in \u001b[0;36mget_next_day_profits_for_date\u001b[0;34m(date, global_charging_threshold, global_discharging_threshold, print_results)\u001b[0m\n\u001b[1;32m 47\u001b[0m discharge_thresholds \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39marange(\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m100\u001b[39m, \u001b[38;5;241m250\u001b[39m, \u001b[38;5;241m25\u001b[39m)\n\u001b[1;32m 50\u001b[0m \u001b[38;5;66;03m# yesterday_imbalance_prices = get_imbalance_prices(date.date() - datetime.timedelta(days=1))\u001b[39;00m\n\u001b[1;32m 51\u001b[0m \u001b[38;5;66;03m# real_imbalance_prices = get_imbalance_prices(date.date())\u001b[39;00m\n\u001b[1;32m 52\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 57\u001b[0m \n\u001b[1;32m 58\u001b[0m \u001b[38;5;66;03m# start_time = time.time()\u001b[39;00m\n\u001b[0;32m---> 59\u001b[0m initial, nrvs, target \u001b[38;5;241m=\u001b[39m \u001b[43mget_predicted_NRV\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdate\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 60\u001b[0m \u001b[38;5;66;03m# print(f\"Time to get predicted NRV: {time.time() - start_time} seconds\")\u001b[39;00m\n\u001b[1;32m 61\u001b[0m \n\u001b[1;32m 62\u001b[0m \u001b[38;5;66;03m# start_time = time.time()\u001b[39;00m\n\u001b[1;32m 63\u001b[0m \u001b[38;5;66;03m# repeat initial value by nrv.shape[0] times\u001b[39;00m\n\u001b[1;32m 64\u001b[0m initial \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mrepeat(initial, nrvs\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m])\n",
"Cell \u001b[0;32mIn[2], line 59\u001b[0m, in \u001b[0;36mget_predicted_NRV_diffusion\u001b[0;34m(date)\u001b[0m\n\u001b[1;32m 56\u001b[0m prev_features \u001b[38;5;241m=\u001b[39m prev_features\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[1;32m 57\u001b[0m targets \u001b[38;5;241m=\u001b[39m targets\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[0;32m---> 59\u001b[0m samples \u001b[38;5;241m=\u001b[39m \u001b[43msample_diffusion\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1000\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprev_features\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 61\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m initial_sequence\u001b[38;5;241m.\u001b[39mcpu()\u001b[38;5;241m.\u001b[39mnumpy()[\u001b[38;5;241m0\u001b[39m][\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m], samples\u001b[38;5;241m.\u001b[39mcpu()\u001b[38;5;241m.\u001b[39mnumpy(), targets\u001b[38;5;241m.\u001b[39mcpu()\u001b[38;5;241m.\u001b[39mnumpy()\n",
"Cell \u001b[0;32mIn[2], line 28\u001b[0m, in \u001b[0;36msample_diffusion\u001b[0;34m(model, n, inputs)\u001b[0m\n\u001b[1;32m 25\u001b[0m alpha_hat \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mcumprod(alpha, dim\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m 27\u001b[0m inputs \u001b[38;5;241m=\u001b[39m inputs\u001b[38;5;241m.\u001b[39mrepeat(n, \u001b[38;5;241m1\u001b[39m)\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[0;32m---> 28\u001b[0m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43meval\u001b[49m()\n\u001b[1;32m 29\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m torch\u001b[38;5;241m.\u001b[39mno_grad():\n\u001b[1;32m 30\u001b[0m x \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mrandn(inputs\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m], ts_length)\u001b[38;5;241m.\u001b[39mto(device)\n",
"\u001b[0;31mAttributeError\u001b[0m: 'collections.OrderedDict' object has no attribute 'eval'"
]
} }
], ],
"source": [ "source": [

View File

@@ -138,52 +138,52 @@ class BaselinePolicy():
next_day_charge_thresholds, next_day_discharge_thresholds = [], [] next_day_charge_thresholds, next_day_discharge_thresholds = [], []
for ip in imbalance_prices: # imbalance_prices: (1000, 96) -> (1000, threshold_pairs, 96)
new_imbalance_prices = ip.repeat(len(charge_thresholds), 1) imbalance_prices = imbalance_prices.unsqueeze(1).repeat(1, charge_thresholds.shape[0], 1)
profits, charge_cycles = self.simulate(new_imbalance_prices, charge_thresholds, discharge_thresholds) profits, charge_cycles = self.simulate(imbalance_prices, charge_thresholds, discharge_thresholds)
sorted_profits, sorted_indices = torch.sort(profits, descending=True) # get the index of the best threshold pair for each day (1000, 96) -> (1000)
best_threshold_indices = torch.argmax(profits, dim=1)
# Reorder other tensors based on sorted indices # get the best threshold pair for each day (1000)
sorted_charge_thresholds = charge_thresholds[sorted_indices] next_day_charge_thresholds = charge_thresholds[best_threshold_indices]
sorted_discharge_thresholds = discharge_thresholds[sorted_indices] next_day_discharge_thresholds = discharge_thresholds[best_threshold_indices]
# get the optimal thresholds next_day_charge_thresholds = next_day_charge_thresholds.float()
next_day_charge_threshold = sorted_charge_thresholds[0] next_day_discharge_thresholds = next_day_discharge_thresholds.float()
next_day_discharge_threshold = sorted_discharge_thresholds[0]
next_day_charge_thresholds.append(next_day_charge_threshold) return next_day_charge_thresholds, next_day_discharge_thresholds
next_day_discharge_thresholds.append(next_day_discharge_threshold)
# return float tensors def simulate(self, price_matrix, charge_thresholds: torch.tensor, discharge_thresholds: torch.tensor, charge_cycles_penalty: float = 250):
return torch.tensor(next_day_charge_thresholds, dtype=torch.float), torch.tensor(next_day_discharge_thresholds, dtype=torch.float) batch_size, num_thresholds, num_time_steps = price_matrix.shape
def simulate(self, price_matrix, charge_thresholds: torch.tensor, discharge_thresholds: torch.tensor): # Reshape thresholds for broadcasting
batch_size = price_matrix.shape[0] charge_thresholds = charge_thresholds.view(1, num_thresholds, 1).expand(batch_size, -1, num_time_steps)
discharge_thresholds = discharge_thresholds.view(1, num_thresholds, 1).expand(batch_size, -1, num_time_steps)
charge_matrix = torch.zeros(price_matrix.shape) charge_matrix = torch.zeros_like(price_matrix)
charge_thresholds_reshaped = charge_thresholds.repeat(price_matrix.shape[1], 1).T charge_matrix[price_matrix < charge_thresholds] = 1
discharge_thresholds_reshaped = discharge_thresholds.repeat(price_matrix.shape[1], 1).T charge_matrix[price_matrix > discharge_thresholds] = -1
charge_matrix[price_matrix < charge_thresholds_reshaped] = 1 battery_states = torch.zeros(batch_size, num_thresholds)
profits = torch.zeros_like(battery_states)
charge_cycles = torch.zeros_like(battery_states)
charge_matrix[price_matrix > discharge_thresholds_reshaped] = -1 for i in range(num_time_steps):
discharge_mask = ~((charge_matrix[:, :, i] == -1) & (battery_states == 0))
battery_states = torch.zeros(batch_size) charge_mask = ~((charge_matrix[:, :, i] == 1) & (battery_states == self.battery.capacity))
profits = torch.zeros(batch_size)
charge_cycles = torch.zeros(batch_size)
for i in range(price_matrix.shape[1]):
discharge_mask = ~((charge_matrix[:, i] == -1) & (battery_states == 0))
charge_mask = ~((charge_matrix[:, i] == 1) & (battery_states == self.battery.capacity))
mask = discharge_mask & charge_mask mask = discharge_mask & charge_mask
battery_states[mask] += charge_matrix[:, i][mask] * self.battery.power / 4 battery_states[mask] += charge_matrix[:, :, i][mask] * self.battery.power / 4
profits[mask] += -charge_matrix[:, i][mask] * price_matrix[:, i][mask] * self.battery.power / 4 profits[mask] += -charge_matrix[:, :, i][mask] * price_matrix[:, :, i][mask] * self.battery.power / 4
charge_cycles[mask] += torch.abs(charge_matrix[:, i][mask]) * (self.battery.power / 4) / self.battery.capacity charge_cycles[mask] += torch.abs(charge_matrix[:, :, i][mask]) * (self.battery.power / 4) / self.battery.capacity / 2
# penalize for excess charge cycles
excess_charge_cycles = (charge_cycles - 400/365).clamp(min=0)
profits -= excess_charge_cycles * charge_cycles_penalty
return profits, charge_cycles return profits, charge_cycles

View File

@@ -87,7 +87,10 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
crps_from_samples_metric = [] crps_from_samples_metric = []
with torch.no_grad(): with torch.no_grad():
total_samples = len(dataloader.dataset) - 96
for _, _, idx_batch in tqdm(dataloader): for _, _, idx_batch in tqdm(dataloader):
idx_batch = [idx for idx in idx_batch if idx < total_samples]
if len(idx_batch) == 0: if len(idx_batch) == 0:
continue continue