Non-autoregressive Linear baseline update + wrote further at thesis
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
from src.utils.clearml import ClearMLHelper
|
||||
|
||||
#### ClearML ####
|
||||
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
|
||||
clearml_helper = ClearMLHelper(project_name="Thesis/NAQR: Linear")
|
||||
task = clearml_helper.get_task(task_name="NAQR: Non Linear")
|
||||
task.execute_remotely(queue_name="default", exit_process=True)
|
||||
|
||||
@@ -27,23 +27,23 @@ from src.models.time_embedding_layer import TimeEmbedding
|
||||
data_config = DataConfig()
|
||||
|
||||
data_config.NRV_HISTORY = True
|
||||
data_config.LOAD_HISTORY = True
|
||||
data_config.LOAD_FORECAST = True
|
||||
data_config.LOAD_HISTORY = False
|
||||
data_config.LOAD_FORECAST = False
|
||||
|
||||
data_config.WIND_FORECAST = True
|
||||
data_config.WIND_HISTORY = True
|
||||
data_config.WIND_FORECAST = False
|
||||
data_config.WIND_HISTORY = False
|
||||
|
||||
data_config.QUARTER = True
|
||||
data_config.DAY_OF_WEEK = True
|
||||
data_config.PV_FORECAST = False
|
||||
data_config.PV_HISTORY = False
|
||||
|
||||
data_config.NOMINAL_NET_POSITION = True
|
||||
data_config.NOMINAL_NET_POSITION = False
|
||||
|
||||
|
||||
data_config = task.connect(data_config, name="data_features")
|
||||
|
||||
data_processor = DataProcessor(data_config, path="", lstm=False)
|
||||
data_processor.set_batch_size(512)
|
||||
data_processor.set_full_day_skip(False)
|
||||
data_processor.set_full_day_skip(True)
|
||||
|
||||
|
||||
#### Hyperparameters ####
|
||||
@@ -83,17 +83,17 @@ time_embedding = TimeEmbedding(
|
||||
# dropout=model_parameters["dropout"],
|
||||
# )
|
||||
|
||||
non_linear_model = NonLinearRegression(
|
||||
time_embedding.output_dim(inputDim),
|
||||
len(quantiles) * 96,
|
||||
hiddenSize=model_parameters["hidden_size"],
|
||||
numLayers=model_parameters["num_layers"],
|
||||
dropout=model_parameters["dropout"],
|
||||
)
|
||||
# non_linear_model = NonLinearRegression(
|
||||
# time_embedding.output_dim(inputDim),
|
||||
# len(quantiles) * 96,
|
||||
# hiddenSize=model_parameters["hidden_size"],
|
||||
# numLayers=model_parameters["num_layers"],
|
||||
# dropout=model_parameters["dropout"],
|
||||
# )
|
||||
|
||||
# linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
|
||||
linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
|
||||
|
||||
model = nn.Sequential(time_embedding, non_linear_model)
|
||||
model = nn.Sequential(time_embedding, linear_model)
|
||||
model.output_size = 96
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
|
||||
|
||||
@@ -110,7 +110,7 @@ trainer = NonAutoRegressiveQuantileRegression(
|
||||
data_processor,
|
||||
quantiles,
|
||||
"cuda",
|
||||
policy_evaluator=policy_evaluator,
|
||||
policy_evaluator=None,
|
||||
debug=False,
|
||||
)
|
||||
|
||||
@@ -122,32 +122,32 @@ trainer.plot_every(5)
|
||||
trainer.train(task=task, epochs=epochs, remotely=True)
|
||||
|
||||
### Policy Evaluation ###
|
||||
idx_samples = trainer.test_set_samples
|
||||
_, test_loader = trainer.data_processor.get_dataloaders(
|
||||
predict_sequence_length=trainer.model.output_size, full_day_skip=False
|
||||
)
|
||||
# idx_samples = trainer.test_set_samples
|
||||
# _, test_loader = trainer.data_processor.get_dataloaders(
|
||||
# predict_sequence_length=trainer.model.output_size, full_day_skip=False
|
||||
# )
|
||||
|
||||
# policy_evaluator.evaluate_test_set(idx_samples, test_loader)
|
||||
# policy_evaluator.plot_profits_table()
|
||||
# policy_evaluator.plot_thresholds_per_day()
|
||||
|
||||
optimal_penalty, profit, charge_cycles = (
|
||||
policy_evaluator.optimize_penalty_for_target_charge_cycles(
|
||||
idx_samples=idx_samples,
|
||||
test_loader=test_loader,
|
||||
initial_penalty=1000,
|
||||
target_charge_cycles=283,
|
||||
learning_rate=15,
|
||||
max_iterations=150,
|
||||
tolerance=1,
|
||||
)
|
||||
)
|
||||
# optimal_penalty, profit, charge_cycles = (
|
||||
# policy_evaluator.optimize_penalty_for_target_charge_cycles(
|
||||
# idx_samples=idx_samples,
|
||||
# test_loader=test_loader,
|
||||
# initial_penalty=1000,
|
||||
# target_charge_cycles=283,
|
||||
# learning_rate=15,
|
||||
# max_iterations=150,
|
||||
# tolerance=1,
|
||||
# )
|
||||
# )
|
||||
|
||||
print(
|
||||
f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
|
||||
)
|
||||
task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty)
|
||||
task.get_logger().report_single_value(name="Optimal Profit", value=profit)
|
||||
task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles)
|
||||
# print(
|
||||
# f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
|
||||
# )
|
||||
# task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty)
|
||||
# task.get_logger().report_single_value(name="Optimal Profit", value=profit)
|
||||
# task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles)
|
||||
|
||||
task.close()
|
||||
|
||||
Reference in New Issue
Block a user