diff --git a/Result-Reports/November_1.md b/Result-Reports/November_1.md
index 11587e2..8271bba 100644
--- a/Result-Reports/November_1.md
+++ b/Result-Reports/November_1.md
@@ -17,12 +17,18 @@
- [x] Non autoregressive Quantile Regression
- [x] Fix debug plots for quantile regression -> predict quantiles and look if true value is below a quantile, if so 1 else 0 and average these over all samples
- [ ] Full day debug plots for quantile regression
-- [ ] CPRS Metrics
+- [x] CPRS Metrics
- [ ] Time as input parameter:
-- [ ] Cosine per year, day,
-- [ ] 4 Quarter features
-- [ ] Probabilistic Baseline -> Quantiles on Training Data -> Breedte bekijken -> Gebruiken voor CPRS en plotjes
- - Day-ahead implicit net position( ())
+- [x] Probabilistic Baseline -> Quantiles on Training Data -> Breedte bekijken -> Gebruiken voor CPRS en plotjes
+- [ ] Day-ahead implicit net position
+
+- [x] Faster sampling for quantile regression
+- [ ] Quantile plots for other model (Linear, GRU) (Check if better)
+- [ ] Check example plots to see if metrics correspond with what seen on plots
+- [ ] Time step (96 values) to embedding layer
+- [x] Mean of nrv per time step plotten (done for probabilistic baseline)
+- [x] Convert back to MW on plots
+
## 2. Autoregressive vs Non-Autoregressive
Training data: 2015 - 2022 \
@@ -146,3 +152,9 @@ Hidden Units: 1024
| Quantile Regression | [Link](https://clearml.victormylle.be/projects/2e46d4af6f1e4c399cf9f5aa30bc8795/experiments/df5669968cf64c42ba7a97fc2d745b76/info-output/metrics/scalar?columns=selected&columns=type&columns=name&columns=tags&columns=status&columns=project.name&columns=users&columns=started&columns=last_update&columns=last_iteration&columns=parent.name&order=-last_update&filter=) | - | - | 105.53107468002209 | 21656.24950570062 |
+# CRPS Metric
+| Model | Experiment | Train-MAE | Train-MSE | Train-CRPS | Test-MAE | Test-MSE | Test-CRPS |
+| --- | --- | --- | --- | --- | --- | --- | --- |
+| Probabilistic Baseline | [Link](https://clearml.victormylle.be/projects/2e46d4af6f1e4c399cf9f5aa30bc8795/experiments/599152a9e44d4ba6a5bcb603e5041b01/info-output/metrics/scalar?columns=selected&columns=type&columns=name&columns=tags&columns=status&columns=project.name&columns=users&columns=started&columns=last_update&columns=last_iteration&columns=parent.name&order=-last_update&filter=) | - | - | 72.78830217810247 | - | - | 75.9605281456783 |
+| Non-Autoregressive Quantile | [Link](https://clearml.victormylle.be/projects/2e46d4af6f1e4c399cf9f5aa30bc8795/experiments/c50a62963dd649c387f1122ccee61d2f/info-output/metrics/scalar?columns=selected&columns=type&columns=name&columns=tags&columns=status&columns=project.name&columns=users&columns=started&columns=last_update&columns=last_iteration&columns=parent.name&order=-last_update&filter=) | 98.43774474341907 | 17433.701092295152 | 70.63047790527344 | 104.28421422336042 | 20851.083458159148 | 74.81269836425781 |
+| Auto-Regressive Quantile | [Link](https://clearml.victormylle.be/projects/2e46d4af6f1e4c399cf9f5aa30bc8795/experiments/8db35a590cfa46f081c7f4caf93d711d/info-output/metrics/scalar?columns=selected&columns=type&columns=name&columns=tags&columns=status&columns=project.name&columns=users&columns=started&columns=last_update&columns=last_iteration&columns=parent.name&order=-last_update&filter=) | - | - | - | 107.24027992397264 | 22016.697427833686 | 68.19192504882812 |
\ No newline at end of file
diff --git a/src/data/dataset.py b/src/data/dataset.py
index 68271d1..0b45a09 100644
--- a/src/data/dataset.py
+++ b/src/data/dataset.py
@@ -62,7 +62,7 @@ class NrvDataset(Dataset):
# get indices of all 00:15 timestamps
if self.full_day_skip:
start_of_day_indices = self.dataframe[
- self.dataframe["datetime"].dt.time == pd.Timestamp("00:15:00").time()
+ self.dataframe["datetime"].dt.time != pd.Timestamp("00:15:00").time()
].index
skip_indices.extend(start_of_day_indices)
skip_indices = list(set(skip_indices))
@@ -147,7 +147,7 @@ class NrvDataset(Dataset):
print(f"Actual index: {actual_idx}")
raise ValueError("There are nan values in the features.")
- return all_features, nrv_target
+ return all_features, nrv_target, idx
def random_day_autoregressive(self, idx: int):
idx = self.valid_indices[idx]
@@ -205,3 +205,26 @@ class NrvDataset(Dataset):
all_features = torch.cat(features, dim=0)
return all_features, target
+
+ def get_batch(self, idx: list):
+ features = []
+ targets = []
+ for i in idx:
+ f, t, _ = self.__getitem__(i)
+ features.append(f)
+ targets.append(t)
+
+ return torch.stack(features), torch.stack(targets)
+
+ def get_batch_autoregressive(self, idx: list):
+ features = []
+ targets = []
+ for i in idx:
+ f, t = self.random_day_autoregressive(i)
+ features.append(f)
+ targets.append(t)
+
+ if None in features:
+ return None, torch.stack(targets)
+
+ return torch.stack(features), torch.stack(targets)
diff --git a/src/data/preprocessing.py b/src/data/preprocessing.py
index 8bab7e8..6aaeaa0 100644
--- a/src/data/preprocessing.py
+++ b/src/data/preprocessing.py
@@ -167,7 +167,10 @@ class DataProcessor:
)
def get_train_dataloader(
- self, transform: bool = True, predict_sequence_length: int = 96
+ self,
+ transform: bool = True,
+ predict_sequence_length: int = 96,
+ shuffle: bool = True,
):
train_df = self.all_features.copy()
@@ -194,7 +197,7 @@ class DataProcessor:
full_day_skip=self.full_day_skip,
predict_sequence_length=predict_sequence_length,
)
- return self.get_dataloader(train_dataset)
+ return self.get_dataloader(train_dataset, shuffle=shuffle)
def get_test_dataloader(
self, transform: bool = True, predict_sequence_length: int = 96
@@ -262,5 +265,5 @@ class DataProcessor:
data_loader = self.get_train_dataloader(
predict_sequence_length=self.output_size
)
- input, _ = next(iter(data_loader))
+ input, _, _ = next(iter(data_loader))
return input.shape[-1]
diff --git a/src/losses/crps_metric.py b/src/losses/crps_metric.py
index c8f3055..c010a78 100644
--- a/src/losses/crps_metric.py
+++ b/src/losses/crps_metric.py
@@ -15,7 +15,7 @@ class CRPSLoss(nn.Module):
# preds shape: [batch_size, num_quantiles]
# unsqueeze target
- target = target.unsqueeze(-1)
+ # target = target.unsqueeze(-1)
mask = (preds > target).float()
test = self.quantiles_tensor - mask
diff --git a/src/losses/pinball_loss.py b/src/losses/pinball_loss.py
index b48f5c4..8660d86 100644
--- a/src/losses/pinball_loss.py
+++ b/src/losses/pinball_loss.py
@@ -1,24 +1,27 @@
import torch
from torch import nn
+
class PinballLoss(nn.Module):
def __init__(self, quantiles):
super(PinballLoss, self).__init__()
self.quantiles_tensor = torch.tensor(quantiles, dtype=torch.float32)
-
+ self.quantiles = self.quantiles_tensor.tolist()
+
def forward(self, pred, target):
error = target - pred
upper = self.quantiles_tensor * error
- lower = (self.quantiles_tensor - 1) * error
+ lower = (self.quantiles_tensor - 1) * error
losses = torch.max(lower, upper)
loss = torch.mean(torch.mean(losses, dim=0))
return loss
-
+
class NonAutoRegressivePinballLoss(nn.Module):
def __init__(self, quantiles):
super(NonAutoRegressivePinballLoss, self).__init__()
self.quantiles_tensor = torch.tensor(quantiles, dtype=torch.float32)
+ self.quantiles = self.quantiles_tensor.tolist()
def forward(self, pred, target):
pred = pred.reshape(-1, 96, len(self.quantiles_tensor))
diff --git a/src/notebooks/training.ipynb b/src/notebooks/training.ipynb
index bafe8d3..cd8a587 100644
--- a/src/notebooks/training.ipynb
+++ b/src/notebooks/training.ipynb
@@ -33,15 +33,15 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"#### Data Processor ####\n",
"data_config = DataConfig()\n",
"data_config.NRV_HISTORY = True\n",
- "data_config.LOAD_HISTORY = True\n",
- "data_config.LOAD_FORECAST = True\n",
+ "data_config.LOAD_HISTORY = False\n",
+ "data_config.LOAD_FORECAST = False\n",
"\n",
"data_config.WIND_FORECAST = False\n",
"data_config.WIND_HISTORY = False\n",
@@ -60,35 +60,33 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
- "Can't get url information for git repo in /workspaces/Thesis/src/notebooks\n"
+ "InsecureRequestWarning: Certificate verification is disabled! Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
- "ClearML Task: created new task id=599152a9e44d4ba6a5bcb603e5041b01\n",
- "ClearML results page: http://192.168.1.182:8080/projects/2e46d4af6f1e4c399cf9f5aa30bc8795/experiments/599152a9e44d4ba6a5bcb603e5041b01/output/log\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "JSON serialization of artifact 'dictionary' failed, reverting to pickle\n"
+ "ClearML Task: created new task id=8423d146953041eba8d7b4c27d7ed6a5\n",
+ "ClearML results page: http://192.168.1.182:8080/projects/2e46d4af6f1e4c399cf9f5aa30bc8795/experiments/8423d146953041eba8d7b4c27d7ed6a5/output/log\n",
+ "2023-11-23 23:07:35,461 - clearml.Task - INFO - Storing jupyter notebook directly as code\n",
+ "2023-11-23 23:07:39,250 - clearml - WARNING - JSON serialization of artifact 'dictionary' failed, reverting to pickle\n"
]
}
],
"source": [
+ "data_processor.set_full_day_skip(True)\n",
"quantiles = [0.01, 0.05, 0.1, 0.15, 0.4, 0.5, 0.6, 0.85, 0.9, 0.95, 0.99]\n",
- "trainer = ProbabilisticBaselineTrainer(quantiles=quantiles, data_processor=data_processor, clearml_helper=clearml_helper)\n",
+ "trainer = ProbabilisticBaselineTrainer(\n",
+ " quantiles=quantiles, data_processor=data_processor, clearml_helper=clearml_helper\n",
+ ")\n",
"trainer.add_metrics_to_track([CRPSLoss(quantiles=quantiles)])\n",
"trainer.train()"
]
@@ -160,9 +158,32 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 5,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "ename": "ParserError",
+ "evalue": "Error tokenizing data. C error: Calling read(nbytes) on source failed. Try engine='python'.",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mParserError\u001b[0m Traceback (most recent call last)",
+ "\u001b[1;32m/workspaces/Thesis/src/notebooks/training.ipynb Cell 8\u001b[0m line \u001b[0;36m1\n\u001b[1;32m 11\u001b[0m data_config \u001b[39m=\u001b[39m DataConfig()\n\u001b[1;32m 12\u001b[0m data_config\u001b[39m.\u001b[39mLOAD_FORECAST \u001b[39m=\u001b[39m \u001b[39mFalse\u001b[39;00m\n\u001b[0;32m---> 13\u001b[0m data_processor \u001b[39m=\u001b[39m DataProcessor(data_config)\n\u001b[1;32m 14\u001b[0m data_processor\u001b[39m.\u001b[39mset_batch_size(\u001b[39m1024\u001b[39m)\n\u001b[1;32m 17\u001b[0m data_processor\u001b[39m.\u001b[39mset_train_range((datetime(year\u001b[39m=\u001b[39m\u001b[39m2015\u001b[39m, month\u001b[39m=\u001b[39m\u001b[39m1\u001b[39m, day\u001b[39m=\u001b[39m\u001b[39m1\u001b[39m, tzinfo\u001b[39m=\u001b[39mpytz\u001b[39m.\u001b[39mUTC), datetime(year\u001b[39m=\u001b[39m\u001b[39m2022\u001b[39m, month\u001b[39m=\u001b[39m\u001b[39m11\u001b[39m, day\u001b[39m=\u001b[39m\u001b[39m30\u001b[39m, tzinfo\u001b[39m=\u001b[39mpytz\u001b[39m.\u001b[39mUTC)))\n",
+ "File \u001b[0;32m/workspaces/Thesis/src/notebooks/../data/preprocessing.py:52\u001b[0m, in \u001b[0;36mDataProcessor.__init__\u001b[0;34m(self, data_config)\u001b[0m\n\u001b[1;32m 50\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mhistory_features \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mget_nrv_history()\n\u001b[1;32m 51\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mfuture_features \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mget_load_forecast()\n\u001b[0;32m---> 52\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mpv_forecast \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mget_pv_forecast()\n\u001b[1;32m 53\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mwind_forecast \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mget_wind_forecast()\n\u001b[1;32m 55\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mall_features \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mhistory_features\u001b[39m.\u001b[39mmerge(\n\u001b[1;32m 56\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mfuture_features, on\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mdatetime\u001b[39m\u001b[39m\"\u001b[39m, how\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mleft\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 57\u001b[0m )\n",
+ "File \u001b[0;32m/workspaces/Thesis/src/notebooks/../data/preprocessing.py:132\u001b[0m, in \u001b[0;36mDataProcessor.get_pv_forecast\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 131\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mget_pv_forecast\u001b[39m(\u001b[39mself\u001b[39m):\n\u001b[0;32m--> 132\u001b[0m df \u001b[39m=\u001b[39m pd\u001b[39m.\u001b[39;49mread_csv(pv_forecast_data_path, delimiter\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39m;\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n\u001b[1;32m 134\u001b[0m df \u001b[39m=\u001b[39m df\u001b[39m.\u001b[39mrename(\n\u001b[1;32m 135\u001b[0m columns\u001b[39m=\u001b[39m{\u001b[39m\"\u001b[39m\u001b[39mdayahead11hforecast\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m\"\u001b[39m\u001b[39mpv_forecast\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39mDatetime\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m\"\u001b[39m\u001b[39mdatetime\u001b[39m\u001b[39m\"\u001b[39m}\n\u001b[1;32m 136\u001b[0m )\n\u001b[1;32m 137\u001b[0m df \u001b[39m=\u001b[39m df[[\u001b[39m\"\u001b[39m\u001b[39mdatetime\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39mpv_forecast\u001b[39m\u001b[39m\"\u001b[39m]]\n",
+ "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/pandas/io/parsers/readers.py:912\u001b[0m, in \u001b[0;36mread_csv\u001b[0;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, date_format, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options, dtype_backend)\u001b[0m\n\u001b[1;32m 899\u001b[0m kwds_defaults \u001b[39m=\u001b[39m _refine_defaults_read(\n\u001b[1;32m 900\u001b[0m dialect,\n\u001b[1;32m 901\u001b[0m delimiter,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 908\u001b[0m dtype_backend\u001b[39m=\u001b[39mdtype_backend,\n\u001b[1;32m 909\u001b[0m )\n\u001b[1;32m 910\u001b[0m kwds\u001b[39m.\u001b[39mupdate(kwds_defaults)\n\u001b[0;32m--> 912\u001b[0m \u001b[39mreturn\u001b[39;00m _read(filepath_or_buffer, kwds)\n",
+ "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/pandas/io/parsers/readers.py:583\u001b[0m, in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m 580\u001b[0m \u001b[39mreturn\u001b[39;00m parser\n\u001b[1;32m 582\u001b[0m \u001b[39mwith\u001b[39;00m parser:\n\u001b[0;32m--> 583\u001b[0m \u001b[39mreturn\u001b[39;00m parser\u001b[39m.\u001b[39;49mread(nrows)\n",
+ "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/pandas/io/parsers/readers.py:1704\u001b[0m, in \u001b[0;36mTextFileReader.read\u001b[0;34m(self, nrows)\u001b[0m\n\u001b[1;32m 1697\u001b[0m nrows \u001b[39m=\u001b[39m validate_integer(\u001b[39m\"\u001b[39m\u001b[39mnrows\u001b[39m\u001b[39m\"\u001b[39m, nrows)\n\u001b[1;32m 1698\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m 1699\u001b[0m \u001b[39m# error: \"ParserBase\" has no attribute \"read\"\u001b[39;00m\n\u001b[1;32m 1700\u001b[0m (\n\u001b[1;32m 1701\u001b[0m index,\n\u001b[1;32m 1702\u001b[0m columns,\n\u001b[1;32m 1703\u001b[0m col_dict,\n\u001b[0;32m-> 1704\u001b[0m ) \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_engine\u001b[39m.\u001b[39;49mread( \u001b[39m# type: ignore[attr-defined]\u001b[39;49;00m\n\u001b[1;32m 1705\u001b[0m nrows\n\u001b[1;32m 1706\u001b[0m )\n\u001b[1;32m 1707\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m:\n\u001b[1;32m 1708\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mclose()\n",
+ "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py:234\u001b[0m, in \u001b[0;36mCParserWrapper.read\u001b[0;34m(self, nrows)\u001b[0m\n\u001b[1;32m 232\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m 233\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlow_memory:\n\u001b[0;32m--> 234\u001b[0m chunks \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_reader\u001b[39m.\u001b[39;49mread_low_memory(nrows)\n\u001b[1;32m 235\u001b[0m \u001b[39m# destructive to chunks\u001b[39;00m\n\u001b[1;32m 236\u001b[0m data \u001b[39m=\u001b[39m _concatenate_chunks(chunks)\n",
+ "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/pandas/_libs/parsers.pyx:814\u001b[0m, in \u001b[0;36mpandas._libs.parsers.TextReader.read_low_memory\u001b[0;34m()\u001b[0m\n",
+ "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/pandas/_libs/parsers.pyx:875\u001b[0m, in \u001b[0;36mpandas._libs.parsers.TextReader._read_rows\u001b[0;34m()\u001b[0m\n",
+ "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/pandas/_libs/parsers.pyx:850\u001b[0m, in \u001b[0;36mpandas._libs.parsers.TextReader._tokenize_rows\u001b[0;34m()\u001b[0m\n",
+ "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/pandas/_libs/parsers.pyx:861\u001b[0m, in \u001b[0;36mpandas._libs.parsers.TextReader._check_tokenize_status\u001b[0;34m()\u001b[0m\n",
+ "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/pandas/_libs/parsers.pyx:2029\u001b[0m, in \u001b[0;36mpandas._libs.parsers.raise_parser_error\u001b[0;34m()\u001b[0m\n",
+ "\u001b[0;31mParserError\u001b[0m: Error tokenizing data. C error: Calling read(nbytes) on source failed. Try engine='python'."
+ ]
+ }
+ ],
"source": [
"#### Hyperparameters ####\n",
"inputDim = 96\n",
@@ -203,18 +224,18 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
- "/workspaces/Thesis/src/notebooks/../trainers/quantile_trainer.py:27: UserWarning:\n",
+ "/workspaces/Thesis/src/notebooks/../trainers/quantile_trainer.py:70: UserWarning:\n",
"\n",
"To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
"\n",
- "/workspaces/Thesis/src/notebooks/../losses/pinball_loss.py:7: UserWarning:\n",
+ "/workspaces/Thesis/src/notebooks/../losses/pinball_loss.py:8: UserWarning:\n",
"\n",
"To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
"\n"
@@ -224,30 +245,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "ClearML Task: created new task id=b2fd376e79b14ba4b26b0436cb130cfe\n",
- "ClearML results page: http://192.168.1.182:8080/projects/2e46d4af6f1e4c399cf9f5aa30bc8795/experiments/b2fd376e79b14ba4b26b0436cb130cfe/output/log\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Can't get url information for git repo in /workspaces/Thesis/src/notebooks\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
+ "ClearML Task: created new task id=215dd7634cf2475693ea6081e2ab7559\n",
+ "ClearML results page: http://192.168.1.182:8080/projects/2e46d4af6f1e4c399cf9f5aa30bc8795/experiments/215dd7634cf2475693ea6081e2ab7559/output/log\n",
"Early stopping triggered\n"
]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|██████████| 25804/25804 [20:36<00:00, 20.87it/s]\n"
- ]
}
],
"source": [
@@ -259,9 +260,10 @@
"\n",
"# quantiles = torch.tensor([0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99]).to(\"cuda\")\n",
"quantiles = torch.tensor(\n",
- " [0.01, 0.05, 0.005, 0.1, 0.15, 0.4, 0.5, 0.6, 0.85, 0.9, 0.95, 0.99]\n",
+ " [0.01, 0.05, 0.1, 0.15, 0.3, 0.4, 0.5, 0.6, 0.7, 0.85, 0.9, 0.95, 0.99]\n",
").to(\"cuda\")\n",
"\n",
+ "# model = LinearRegression(inputDim, len(quantiles))\n",
"model = NonLinearRegression(inputDim, len(quantiles), hiddenSize=1024, numLayers=5)\n",
"optimizer = torch.optim.Adam(model.parameters(), lr=learningRate)\n",
"\n",
@@ -292,18 +294,18 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
- "/workspaces/Thesis/src/notebooks/../trainers/quantile_trainer.py:290: UserWarning:\n",
+ "/workspaces/Thesis/src/notebooks/../trainers/quantile_trainer.py:335: UserWarning:\n",
"\n",
"To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
"\n",
- "/workspaces/Thesis/src/notebooks/../losses/pinball_loss.py:21: UserWarning:\n",
+ "/workspaces/Thesis/src/notebooks/../losses/pinball_loss.py:23: UserWarning:\n",
"\n",
"To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
"\n"
@@ -313,21 +315,8 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "ClearML Task: created new task id=c50a62963dd649c387f1122ccee61d2f\n",
- "ClearML results page: http://192.168.1.182:8080/projects/2e46d4af6f1e4c399cf9f5aa30bc8795/experiments/c50a62963dd649c387f1122ccee61d2f/output/log\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Can't get url information for git repo in /workspaces/Thesis/src/notebooks\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
+ "ClearML Task: created new task id=160b4938ae3145db9ef8b55e71452987\n",
+ "ClearML results page: http://192.168.1.182:8080/projects/2e46d4af6f1e4c399cf9f5aa30bc8795/experiments/160b4938ae3145db9ef8b55e71452987/output/log\n",
"Early stopping triggered\n"
]
},
@@ -335,7 +324,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "/workspaces/Thesis/src/notebooks/../trainers/quantile_trainer.py:338: UserWarning:\n",
+ "/workspaces/Thesis/src/notebooks/../trainers/quantile_trainer.py:366: UserWarning:\n",
"\n",
"Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at /opt/conda/conda-bld/pytorch_1682343967769/work/torch/csrc/utils/tensor_new.cpp:245.)\n",
"\n"
@@ -377,12 +366,111 @@
"trainer.train(epochs=epochs)"
]
},
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "torch.Size([3, 192])\n",
+ "torch.Size([3, 96])\n"
+ ]
+ }
+ ],
+ "source": [
+ "\n",
+ "\n",
+ "dataset = data_processor.get_train_dataloader().dataset\n",
+ "dataset.predict_sequence_length = 1\n",
+ "dataset.data_config.LOAD_HISTORY = True\n",
+ "\n",
+ "\n",
+ "def auto_regressive_batch(dataset, idx_batch, sequence_length):\n",
+ " target_full = [] # (batch_size, sequence_length)\n",
+ " predictions_samples = [] # (batch_size, sequence_length)\n",
+ " predictions_full = [] # (batch_size, sequence_length, quantiles)\n",
+ "\n",
+ " prev_features, targets = dataset.get_batch(idx_batch)\n",
+ "\n",
+ " initial_sequence = prev_features[:, :96]\n",
+ "\n",
+ " target_full = targets[:, 0]\n",
+ " self.\n",
+ "\n",
+ "\n",
+ "\n",
+ "auto_regressive_batch(dataset, [0, 1, 2], 50)"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
- "source": []
+ "source": [
+ "def auto_regressive(self, data_loader, idx, sequence_length: int = 96):\n",
+ " self.model.eval()\n",
+ " target_full = []\n",
+ " predictions_sampled = []\n",
+ " predictions_full = []\n",
+ "\n",
+ " prev_features, target = data_loader.dataset[idx]\n",
+ " prev_features = prev_features.to(self.device)\n",
+ "\n",
+ " initial_sequence = prev_features[:96]\n",
+ "\n",
+ " target_full.append(target)\n",
+ " with torch.no_grad():\n",
+ " prediction = self.model(prev_features.unsqueeze(0))\n",
+ " predictions_full.append(prediction.squeeze(0))\n",
+ "\n",
+ " # sample from the distribution\n",
+ " sample = self.sample_from_dist(\n",
+ " self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy()\n",
+ " )\n",
+ " predictions_sampled.append(sample)\n",
+ "\n",
+ " for i in range(sequence_length - 1):\n",
+ " new_features = torch.cat(\n",
+ " (prev_features[1:96].cpu(), torch.tensor([predictions_sampled[-1]])),\n",
+ " dim=0,\n",
+ " )\n",
+ " new_features = new_features.float()\n",
+ "\n",
+ " # get the other needed features\n",
+ " other_features, new_target = data_loader.dataset.random_day_autoregressive(\n",
+ " idx + i + 1\n",
+ " )\n",
+ "\n",
+ " if other_features is not None:\n",
+ " prev_features = torch.cat((new_features, other_features), dim=0)\n",
+ " else:\n",
+ " prev_features = new_features\n",
+ "\n",
+ " # add target to target_full\n",
+ " target_full.append(new_target)\n",
+ "\n",
+ " # predict\n",
+ " with torch.no_grad():\n",
+ " prediction = self.model(prev_features.unsqueeze(0).to(self.device))\n",
+ " predictions_full.append(prediction.squeeze(0))\n",
+ "\n",
+ " # sample from the distribution\n",
+ " sample = self.sample_from_dist(\n",
+ " self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy()\n",
+ " )\n",
+ " predictions_sampled.append(sample)\n",
+ "\n",
+ " return (\n",
+ " initial_sequence.cpu(),\n",
+ " torch.stack(predictions_full).cpu(),\n",
+ " torch.tensor(predictions_sampled).reshape(-1, 1),\n",
+ " torch.stack(target_full).cpu(),\n",
+ " )"
+ ]
}
],
"metadata": {
diff --git a/src/trainers/autoregressive_trainer.py b/src/trainers/autoregressive_trainer.py
index 930b3d2..8c57158 100644
--- a/src/trainers/autoregressive_trainer.py
+++ b/src/trainers/autoregressive_trainer.py
@@ -45,12 +45,16 @@ class AutoRegressiveTrainer(Trainer):
)
for i, idx in enumerate(sample_indices):
- auto_regressive_output = self.auto_regressive(data_loader, idx)
+ auto_regressive_output = self.auto_regressive(data_loader.dataset, [idx])
if len(auto_regressive_output) == 3:
initial, predictions, target = auto_regressive_output
else:
initial, predictions, _, target = auto_regressive_output
+ initial = initial.squeeze(0)
+ predictions = predictions.squeeze(0)
+ target = target.squeeze(0)
+
sub_fig = self.get_plot(initial, target, predictions, show_legend=(i == 0))
row = i + 1
@@ -64,13 +68,13 @@ class AutoRegressiveTrainer(Trainer):
).item()
fig["layout"]["annotations"][i].update(
- text=f"{loss.__class__.__name__}: {loss:.6f}"
+ text=f"{self.criterion.__class__.__name__}: {loss:.6f}"
)
# y axis same for all plots
- fig.update_yaxes(range=[-1, 1], col=1)
+ # fig.update_yaxes(range=[-1, 1], col=1)
- fig.update_layout(height=300 * rows)
+ fig.update_layout(height=1000 * rows)
task.get_logger().report_plotly(
title=f"{'Training' if train else 'Test'} Samples",
series="full_day",
@@ -140,7 +144,7 @@ class AutoRegressiveTrainer(Trainer):
total_amount_samples = len(dataloader.dataset) - 95
for idx in tqdm(range(total_amount_samples)):
- _, outputs, targets = self.auto_regressive(dataloader, idx)
+ _, outputs, targets = self.auto_regressive(dataloader.dataset, idx)
inversed_outputs = torch.tensor(
self.data_processor.inverse_transform(outputs)
diff --git a/src/trainers/probabilistic_baseline.py b/src/trainers/probabilistic_baseline.py
index 6836f3e..aaefbf0 100644
--- a/src/trainers/probabilistic_baseline.py
+++ b/src/trainers/probabilistic_baseline.py
@@ -52,6 +52,11 @@ class ProbabilisticBaselineTrainer(Trainer):
for i in range(96):
time_steps[i].extend(inputs[:, i].numpy())
+ mean_fig = self.plot_mean_nrv(time_steps)
+ task.get_logger().report_plotly(
+ title=f"Train NRV", series="Mean NRV", figure=mean_fig
+ )
+
all_quantiles = []
for i, time_values in enumerate(time_steps):
quantiles = np.quantile(time_values, self.quantiles)
@@ -84,7 +89,7 @@ class ProbabilisticBaselineTrainer(Trainer):
quantile_values_tensor = torch.tensor(quantile_values)
quantile_values_expanded = quantile_values_tensor.unsqueeze(0)
- for _, targets in dataloader:
+ for _, targets, _ in dataloader:
# Expand quantile_values for each batch
quantile_values_batch = quantile_values_expanded.repeat(
targets.size(0), 1, 1
@@ -157,3 +162,19 @@ class ProbabilisticBaselineTrainer(Trainer):
fig.update_yaxes(range=[-1, 1])
return fig
+
+ def plot_mean_nrv(self, timesteps):
+ # create ndarray of time steps
+ timesteps = np.array(timesteps)
+
+ timesteps = self.data_processor.inverse_transform(timesteps)
+
+ # for every row calculate mean
+ mean = np.mean(timesteps, axis=1)
+
+ # plot mean
+ fig = go.Figure()
+ fig.add_trace(go.Scatter(x=np.arange(96), y=mean, name="Mean"))
+ fig.update_layout(title="Mean NRV")
+
+ return fig
diff --git a/src/trainers/quantile_trainer.py b/src/trainers/quantile_trainer.py
index 72eaf3e..2e000d2 100644
--- a/src/trainers/quantile_trainer.py
+++ b/src/trainers/quantile_trainer.py
@@ -13,6 +13,49 @@ from tqdm import tqdm
import matplotlib.pyplot as plt
+def sample_from_dist(quantiles, output_values):
+ # both to numpy
+ quantiles = quantiles.cpu().numpy()
+
+ if isinstance(output_values, torch.Tensor):
+ output_values = output_values.cpu().numpy()
+
+ reshaped_values = output_values.reshape(-1, len(quantiles))
+
+ uniform_random_numbers = np.random.uniform(0, 1, (reshaped_values.shape[0], 1000))
+
+ idx_below = np.searchsorted(quantiles, uniform_random_numbers, side="right") - 1
+ idx_above = np.clip(idx_below + 1, 0, len(quantiles) - 1)
+
+ # handle edge case where idx_below is -1
+ idx_below = np.clip(idx_below, 0, len(quantiles) - 1)
+
+ y_below = reshaped_values[np.arange(reshaped_values.shape[0])[:, None], idx_below]
+ y_above = reshaped_values[np.arange(reshaped_values.shape[0])[:, None], idx_above]
+
+ # Calculate the slopes for interpolation
+ x_below = quantiles[idx_below]
+ x_above = quantiles[idx_above]
+
+ # Interpolate
+ # Ensure all variables are NumPy arrays
+ x_below_np = x_below.cpu().numpy() if isinstance(x_below, torch.Tensor) else x_below
+ x_above_np = x_above.cpu().numpy() if isinstance(x_above, torch.Tensor) else x_above
+ y_below_np = y_below.cpu().numpy() if isinstance(y_below, torch.Tensor) else y_below
+ y_above_np = y_above.cpu().numpy() if isinstance(y_above, torch.Tensor) else y_above
+
+ # Compute slopes for interpolation
+ slopes_np = (y_above_np - y_below_np) / (
+ np.clip(x_above_np - x_below_np, 1e-6, np.inf)
+ )
+
+ # Perform the interpolation
+ new_samples = y_below_np + slopes_np * (uniform_random_numbers - x_below_np)
+
+ # Return the mean of the samples
+ return np.mean(new_samples, axis=1)
+
+
class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
def __init__(
self,
@@ -46,19 +89,26 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
}
with torch.no_grad():
- total_amount_samples = len(dataloader.dataset) - 95
+ total_samples = len(dataloader.dataset) - 96
+ batches = 0
+ for _, _, idx_batch in dataloader:
+ idx_batch = [idx for idx in idx_batch if idx < total_samples]
- for idx in tqdm(range(total_amount_samples)):
- _, outputs, samples, targets = self.auto_regressive(dataloader, idx)
+ if len(idx_batch) == 0:
+ continue
+
+ _, outputs, samples, targets = self.auto_regressive(
+ dataloader.dataset, idx_batch=idx_batch
+ )
+
+ samples = samples.to(self.device)
+ outputs = outputs.to(self.device)
+ targets = targets.to(self.device)
inversed_samples = self.data_processor.inverse_transform(samples)
inversed_targets = self.data_processor.inverse_transform(targets)
inversed_outputs = self.data_processor.inverse_transform(outputs)
- outputs = outputs.to(self.device)
- targets = targets.to(self.device)
- samples = samples.to(self.device)
-
inversed_samples = inversed_samples.to(self.device)
inversed_targets = inversed_targets.to(self.device)
inversed_outputs = inversed_outputs.to(self.device)
@@ -66,10 +116,10 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
for metric in self.metrics_to_track:
if metric.__class__ != PinballLoss and metric.__class__ != CRPSLoss:
transformed_metrics[metric.__class__.__name__] += metric(
- samples, targets
+ samples, targets.squeeze(-1)
)
metrics[metric.__class__.__name__] += metric(
- inversed_samples, inversed_targets
+ inversed_samples, inversed_targets.squeeze(-1)
)
else:
transformed_metrics[metric.__class__.__name__] += metric(
@@ -78,10 +128,11 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
metrics[metric.__class__.__name__] += metric(
inversed_outputs, inversed_targets
)
+ batches += 1
for metric in self.metrics_to_track:
- metrics[metric.__class__.__name__] /= total_amount_samples
- transformed_metrics[metric.__class__.__name__] /= total_amount_samples
+ metrics[metric.__class__.__name__] /= batches
+ transformed_metrics[metric.__class__.__name__] /= batches
for metric_name, metric_value in metrics.items():
if PinballLoss.__name__ in metric_name:
@@ -97,7 +148,14 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
)
task.get_logger().report_single_value(name=name, value=metric_value)
- def get_plot(self, current_day, next_day, predictions, show_legend: bool = True):
+ def get_plot(
+ self,
+ current_day,
+ next_day,
+ predictions,
+ show_legend: bool = True,
+ retransform: bool = True,
+ ):
fig = go.Figure()
# Convert to numpy for plotting
@@ -105,6 +163,11 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
next_day_np = next_day.view(-1).cpu().numpy()
predictions_np = predictions.cpu().numpy()
+ if retransform:
+ current_day_np = self.data_processor.inverse_transform(current_day_np)
+ next_day_np = self.data_processor.inverse_transform(next_day_np)
+ predictions_np = self.data_processor.inverse_transform(predictions_np)
+
# Add traces for current and next day
fig.add_trace(go.Scatter(x=np.arange(96), y=current_day_np, name="Current Day"))
fig.add_trace(go.Scatter(x=96 + np.arange(96), y=next_day_np, name="Next Day"))
@@ -127,86 +190,68 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
return fig
- def auto_regressive(self, data_loader, idx, sequence_length: int = 96):
- self.model.eval()
- target_full = []
- predictions_sampled = []
- predictions_full = []
-
- prev_features, target = data_loader.dataset[idx]
+ def auto_regressive(self, dataset, idx_batch, sequence_length: int = 96):
+ prev_features, targets = dataset.get_batch(idx_batch)
prev_features = prev_features.to(self.device)
+ targets = targets.to(self.device)
- initial_sequence = prev_features[:96]
+ initial_sequence = prev_features[:, :96]
- target_full.append(target)
+ target_full = targets[:, 0].unsqueeze(1) # (batch_size, 1)
with torch.no_grad():
- prediction = self.model(prev_features.unsqueeze(0))
- predictions_full.append(prediction.squeeze(0))
-
- # sample from the distribution
- sample = self.sample_from_dist(
- self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy()
- )
- predictions_sampled.append(sample)
+ new_predictions_full = self.model(prev_features) # (batch_size, quantiles)
+ samples = (
+ torch.tensor(sample_from_dist(self.quantiles, new_predictions_full))
+ .unsqueeze(1)
+ .to(self.device)
+ ) # (batch_size, 1)
+ predictions_samples = samples
+ predictions_full = new_predictions_full.unsqueeze(1)
for i in range(sequence_length - 1):
new_features = torch.cat(
- (prev_features[1:96].cpu(), torch.tensor([predictions_sampled[-1]])),
- dim=0,
- )
+ (prev_features[:, 1:96], samples), dim=1
+ ) # (batch_size, 96)
+
new_features = new_features.float()
- # get the other needed features
- other_features, new_target = data_loader.dataset.random_day_autoregressive(
- idx + i + 1
- )
+ other_features, new_targets = dataset.get_batch_autoregressive(
+ np.array(idx_batch) + i + 1
+ ) # (batch_size, new_features)
if other_features is not None:
- prev_features = torch.cat((new_features, other_features), dim=0)
+ prev_features = torch.cat(
+ new_features, other_features, dim=1
+ ) # (batch_size, 96 + new_features)
else:
prev_features = new_features
- # add target to target_full
- target_full.append(new_target)
+ target_full = torch.cat(
+ (target_full, new_targets.to(self.device)), dim=1
+ ) # (batch_size, sequence_length)
- # predict
with torch.no_grad():
- prediction = self.model(prev_features.unsqueeze(0).to(self.device))
- predictions_full.append(prediction.squeeze(0))
+ new_predictions_full = self.model(
+ prev_features
+ ) # (batch_size, quantiles)
+ predictions_full = torch.cat(
+ (predictions_full, new_predictions_full.unsqueeze(1)), dim=1
+ ) # (batch_size, sequence_length, quantiles)
- # sample from the distribution
- sample = self.sample_from_dist(
- self.quantiles.cpu(), prediction.squeeze(-1).cpu().numpy()
- )
- predictions_sampled.append(sample)
+ samples = (
+ torch.tensor(sample_from_dist(self.quantiles, new_predictions_full))
+ .unsqueeze(-1)
+ .to(self.device)
+ ) # (batch_size, 1)
+ predictions_samples = torch.cat((predictions_samples, samples), dim=1)
return (
- initial_sequence.cpu(),
- torch.stack(predictions_full).cpu(),
- torch.tensor(predictions_sampled).reshape(-1, 1),
- torch.stack(target_full).cpu(),
+ initial_sequence,
+ predictions_full,
+ predictions_samples,
+ target_full.unsqueeze(-1),
)
- @staticmethod
- def sample_from_dist(quantiles, output_values):
- # Interpolate the inverse CDF
- inverse_cdf = interp1d(
- quantiles,
- output_values,
- kind="linear",
- bounds_error=False,
- fill_value="extrapolate",
- )
-
- # generate one random uniform number
- uniform_random_numbers = np.random.uniform(0, 1, 1000)
-
- # Apply the inverse CDF to the uniform random numbers
- samples = inverse_cdf(uniform_random_numbers)
-
- # Return the mean of the samples
- return np.mean(samples)
-
def plot_quantile_percentages(
self, task, data_loader, train: bool = True, iteration: int = None
):
@@ -214,7 +259,7 @@ class AutoRegressiveQuantileTrainer(AutoRegressiveTrainer):
quantile_counter = {q: 0 for q in self.quantiles.cpu().numpy()}
with torch.no_grad():
- for inputs, targets in data_loader:
+ for inputs, targets, _ in data_loader:
inputs = inputs.to("cuda")
output = self.model(inputs)
@@ -302,23 +347,6 @@ class NonAutoRegressiveQuantileRegression(Trainer):
debug=debug,
)
- @staticmethod
- def sample_from_dist(quantiles, output_values):
- reshaped_values = output_values.reshape(-1, len(quantiles))
- samples = []
- for row in reshaped_values:
- inverse_cdf = interp1d(
- quantiles,
- row,
- kind="linear",
- bounds_error=False,
- fill_value="extrapolate",
- )
- uniform_random_numbers = np.random.uniform(0, 1, 1000)
- new_samples = inverse_cdf(uniform_random_numbers)
- samples.append(np.mean(new_samples))
- return np.array(samples)
-
def log_final_metrics(self, task, dataloader, train: bool = True):
metrics = {metric.__class__.__name__: 0.0 for metric in self.metrics_to_track}
transformed_metrics = {
@@ -326,12 +354,12 @@ class NonAutoRegressiveQuantileRegression(Trainer):
}
with torch.no_grad():
- for inputs, targets in dataloader:
+ for inputs, targets, _ in dataloader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.model(inputs)
outputted_samples = [
- self.sample_from_dist(self.quantiles.cpu(), output.cpu().numpy())
+ sample_from_dist(self.quantiles.cpu(), output.cpu().numpy())
for output in outputs
]
@@ -359,10 +387,10 @@ class NonAutoRegressiveQuantileRegression(Trainer):
)
else:
transformed_metrics[metric.__class__.__name__] += metric(
- outputs, targets
+ outputs, targets.unsqueeze(-1)
)
metrics[metric.__class__.__name__] += metric(
- inversed_outputs, inversed_targets
+ inversed_outputs, inversed_targets.unsqueeze(-1)
)
for metric in self.metrics_to_track:
diff --git a/src/trainers/trainer.py b/src/trainers/trainer.py
index 3e84814..b0dd0fb 100644
--- a/src/trainers/trainer.py
+++ b/src/trainers/trainer.py
@@ -7,8 +7,18 @@ import numpy as np
import plotly.subplots as sp
from plotly.subplots import make_subplots
+
class Trainer:
- def __init__(self, model: torch.nn.Module, optimizer: torch.optim.Optimizer, criterion: torch.nn.Module, data_processor: DataProcessor, device: torch.device, clearml_helper: ClearMLHelper = None, debug: bool = True):
+ def __init__(
+ self,
+ model: torch.nn.Module,
+ optimizer: torch.optim.Optimizer,
+ criterion: torch.nn.Module,
+ data_processor: DataProcessor,
+ device: torch.device,
+ clearml_helper: ClearMLHelper = None,
+ debug: bool = True,
+ ):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
@@ -49,7 +59,7 @@ class Trainer:
task = self.clearml_helper.get_task(task_name=task_name)
if self.debug:
- task.add_tags('Debug')
+ task.add_tags("Debug")
change_description = input("Enter a change description: ")
if change_description:
@@ -70,9 +80,11 @@ class Trainer:
task.connect(self.data_processor.data_config, name="data_features")
return task
-
+
def random_samples(self, train: bool = True, num_samples: int = 10):
- train_loader, test_loader = self.data_processor.get_dataloaders(predict_sequence_length=self.model.output_size)
+ train_loader, test_loader = self.data_processor.get_dataloaders(
+ predict_sequence_length=self.model.output_size
+ )
if train:
loader = train_loader
@@ -82,10 +94,11 @@ class Trainer:
indices = np.random.randint(0, len(loader.dataset) - 1, size=num_samples)
return indices
-
def train(self, epochs: int):
try:
- train_loader, test_loader = self.data_processor.get_dataloaders(predict_sequence_length=self.model.output_size)
+ train_loader, test_loader = self.data_processor.get_dataloaders(
+ predict_sequence_length=self.model.output_size
+ )
train_samples = self.random_samples(train=True)
test_samples = self.random_samples(train=False)
@@ -99,7 +112,7 @@ class Trainer:
self.model.train()
running_loss = 0.0
- for inputs, targets in train_loader:
+ for inputs, targets, _ in train_loader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
@@ -110,33 +123,48 @@ class Trainer:
self.optimizer.step()
running_loss += loss.item()
-
running_loss /= len(train_loader.dataset)
test_loss = self.test(test_loader)
if self.patience is not None:
- if self.best_score is None or test_loss < self.best_score + self.delta:
+ if (
+ self.best_score is None
+ or test_loss < self.best_score + self.delta
+ ):
self.save_checkpoint(test_loss, task, epoch)
counter = 0
else:
counter += 1
if counter >= self.patience:
- print('Early stopping triggered')
+ print("Early stopping triggered")
break
if task:
- task.get_logger().report_scalar(title=self.criterion.__class__.__name__, series="train", value=running_loss, iteration=epoch)
- task.get_logger().report_scalar(title=self.criterion.__class__.__name__, series="test", value=test_loss, iteration=epoch)
-
+ task.get_logger().report_scalar(
+ title=self.criterion.__class__.__name__,
+ series="train",
+ value=running_loss,
+ iteration=epoch,
+ )
+ task.get_logger().report_scalar(
+ title=self.criterion.__class__.__name__,
+ series="test",
+ value=test_loss,
+ iteration=epoch,
+ )
if epoch % self.plot_every_n_epochs == 0:
self.debug_plots(task, True, train_loader, train_samples, epoch)
self.debug_plots(task, False, test_loader, test_samples, epoch)
- if hasattr(self, 'plot_quantile_percentages'):
- self.plot_quantile_percentages(task, train_loader, True, epoch)
- self.plot_quantile_percentages(task, test_loader, False, epoch)
+ if hasattr(self, "plot_quantile_percentages"):
+ self.plot_quantile_percentages(
+ task, train_loader, True, epoch
+ )
+ self.plot_quantile_percentages(
+ task, test_loader, False, epoch
+ )
if task:
self.finish_training(task=task)
@@ -147,23 +175,32 @@ class Trainer:
task.set_archived(True)
raise
-
def log_final_metrics(self, task, dataloader, train: bool = True):
- metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
- transformed_metrics = { metric.__class__.__name__: 0.0 for metric in self.metrics_to_track }
+ metrics = {metric.__class__.__name__: 0.0 for metric in self.metrics_to_track}
+ transformed_metrics = {
+ metric.__class__.__name__: 0.0 for metric in self.metrics_to_track
+ }
with torch.no_grad():
- for inputs, targets in dataloader:
+ for inputs, targets, _ in dataloader:
inputs, targets = inputs.to(self.device), targets
outputs = self.model(inputs)
- inversed_outputs = torch.tensor(self.data_processor.inverse_transform(outputs))
- inversed_inputs = torch.tensor(self.data_processor.inverse_transform(targets))
+ inversed_outputs = torch.tensor(
+ self.data_processor.inverse_transform(outputs)
+ )
+ inversed_inputs = torch.tensor(
+ self.data_processor.inverse_transform(targets)
+ )
for metric in self.metrics_to_track:
- transformed_metrics[metric.__class__.__name__] += metric(outputs, targets.to(self.device))
- metrics[metric.__class__.__name__] += metric(inversed_outputs, inversed_inputs)
+ transformed_metrics[metric.__class__.__name__] += metric(
+ outputs, targets.to(self.device)
+ )
+ metrics[metric.__class__.__name__] += metric(
+ inversed_outputs, inversed_inputs
+ )
for metric in self.metrics_to_track:
metrics[metric.__class__.__name__] /= len(dataloader)
@@ -171,74 +208,109 @@ class Trainer:
for metric_name, metric_value in metrics.items():
if train:
- metric_name = f'train_{metric_name}'
+ metric_name = f"train_{metric_name}"
else:
- metric_name = f'test_{metric_name}'
-
- task.get_logger().report_single_value(name=metric_name, value=metric_value)
+ metric_name = f"test_{metric_name}"
+
+ task.get_logger().report_single_value(
+ name=metric_name, value=metric_value
+ )
for metric_name, metric_value in transformed_metrics.items():
if train:
- metric_name = f'train_transformed_{metric_name}'
+ metric_name = f"train_transformed_{metric_name}"
else:
- metric_name = f'test_transformed_{metric_name}'
+ metric_name = f"test_transformed_{metric_name}"
- task.get_logger().report_single_value(name=metric_name, value=metric_value)
+ task.get_logger().report_single_value(
+ name=metric_name, value=metric_value
+ )
def finish_training(self, task):
if self.best_score is not None:
- self.model.load_state_dict(torch.load('checkpoint.pt'))
+ self.model.load_state_dict(torch.load("checkpoint.pt"))
self.model.eval()
- train_loader, test_loader = self.data_processor.get_dataloaders(predict_sequence_length=self.model.output_size)
+ train_loader, test_loader = self.data_processor.get_dataloaders(
+ predict_sequence_length=self.model.output_size
+ )
- if not hasattr(self, 'plot_quantile_percentages'):
+ if not hasattr(self, "plot_quantile_percentages"):
self.log_final_metrics(task, train_loader, train=True)
self.log_final_metrics(task, test_loader, train=False)
-
def test(self, test_loader: torch.utils.data.DataLoader):
self.model.eval()
test_loss = 0
with torch.no_grad():
- for data, target in test_loader:
+ for data, target, _ in test_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
-
+
test_loss += self.criterion(output, target).item()
test_loss /= len(test_loader.dataset)
return test_loss
-
+
def save_checkpoint(self, val_loss, task, iteration: int):
- torch.save(self.model.state_dict(), 'checkpoint.pt')
- task.update_output_model(model_path='checkpoint.pt', iteration=iteration, auto_delete_file=False)
+ torch.save(self.model.state_dict(), "checkpoint.pt")
+ task.update_output_model(
+ model_path="checkpoint.pt", iteration=iteration, auto_delete_file=False
+ )
self.best_score = val_loss
-
- def get_plot(self, current_day, next_day, predictions, show_legend: bool = True):
+
+ def get_plot(
+ self,
+ current_day,
+ next_day,
+ predictions,
+ show_legend: bool = True,
+ retransform: bool = True,
+ ):
+ if retransform:
+ current_day = self.data_processor.inverse_transform(current_day)
+ next_day = self.data_processor.inverse_transform(next_day)
+ predictions = self.data_processor.inverse_transform(predictions)
+
fig = go.Figure()
- fig.add_trace(go.Scatter(x=np.arange(96), y=current_day.view(-1).cpu().numpy(), name="Current Day"))
- fig.add_trace(go.Scatter(x=96 + np.arange(96), y=next_day.view(-1).cpu().numpy(), name="Next Day"))
+ fig.add_trace(
+ go.Scatter(
+ x=np.arange(96),
+ y=current_day.view(-1).cpu().numpy(),
+ name="Current Day",
+ )
+ )
+ fig.add_trace(
+ go.Scatter(
+ x=96 + np.arange(96), y=next_day.view(-1).cpu().numpy(), name="Next Day"
+ )
+ )
- fig.add_trace(go.Scatter(x=96 + np.arange(96), y=predictions.reshape(-1), name="Predictions"))
+ fig.add_trace(
+ go.Scatter(
+ x=96 + np.arange(96), y=predictions.reshape(-1), name="Predictions"
+ )
+ )
fig.update_layout(title="Predictions of the Linear Model")
return fig
-
def debug_plots(self, task, train: bool, data_loader, sample_indices, epoch):
num_samples = len(sample_indices)
rows = num_samples # One row per sample since we only want one column
cols = 1
-
- fig = make_subplots(rows=rows, cols=cols, subplot_titles=[f'Sample {i+1}' for i in range(num_samples)])
+
+ fig = make_subplots(
+ rows=rows,
+ cols=cols,
+ subplot_titles=[f"Sample {i+1}" for i in range(num_samples)],
+ )
for i, idx in enumerate(sample_indices):
-
- features, target = data_loader.dataset[idx]
+ features, target, _ = data_loader.dataset[idx]
features = features.to(self.device)
target = target.to(self.device)
@@ -247,29 +319,29 @@ class Trainer:
with torch.no_grad():
predictions = self.model(features).cpu()
- sub_fig = self.get_plot(features[:96], target, predictions, show_legend=(i == 0))
-
+ sub_fig = self.get_plot(
+ features[:96], target, predictions, show_legend=(i == 0)
+ )
+
row = i + 1
col = 1
-
+
for trace in sub_fig.data:
fig.add_trace(trace, row=row, col=col)
-
# loss = self.criterion(predictions.to(self.device), target.squeeze(-1).to(self.device)).item()
# fig['layout']['annotations'][i].update(text=f"{loss.__class__.__name__}: {loss:.6f}")
# y axis same for all plots
- fig.update_yaxes(range=[-1, 1], col=1)
+ # fig.update_yaxes(range=[-1, 1], col=1)
-
- fig.update_layout(height=300 * rows)
+ fig.update_layout(height=1000 * rows)
task.get_logger().report_plotly(
title=f"{'Training' if train else 'Test'} Samples",
series="full_day",
iteration=epoch,
- figure=fig
+ figure=fig,
)
def debug_scatter_plot(self, task, train: bool, samples, epoch):
@@ -285,7 +357,11 @@ class Trainer:
rows = -(-num_samples // 2) # Ceiling division to handle odd number of samples
cols = 2
- fig = make_subplots(rows=rows, cols=cols, subplot_titles=[f'Sample {i+1}' for i in range(num_samples)])
+ fig = make_subplots(
+ rows=rows,
+ cols=cols,
+ subplot_titles=[f"Sample {i+1}" for i in range(num_samples)],
+ )
for i, (current_day, next_value, pred) in enumerate(zip(X, y, predictions)):
sub_fig = self.scatter_plot(current_day, pred, next_value)
@@ -299,14 +375,16 @@ class Trainer:
title=f"{'Training' if train else 'Test'} Samples",
series="scatter",
iteration=epoch,
- figure=fig
+ figure=fig,
)
def scatter_plot(self, x, y, real_y):
fig = go.Figure()
# 96 values of x
- fig.add_trace(go.Scatter(x=np.arange(96), y=x.view(-1).cpu().numpy(), name="Current Day"))
+ fig.add_trace(
+ go.Scatter(x=np.arange(96), y=x.view(-1).cpu().numpy(), name="Current Day")
+ )
# add one value of y
fig.add_trace(go.Scatter(x=[96], y=[y.item()], name="Next Day"))
@@ -315,4 +393,4 @@ class Trainer:
fig.add_trace(go.Scatter(x=[96], y=[real_y.item()], name="Real Next Day"))
fig.update_layout(title="Predictions of the Linear Model")
- return fig
\ No newline at end of file
+ return fig