Updated training scripts
This commit is contained in:
@@ -2,9 +2,7 @@ from src.utils.clearml import ClearMLHelper
|
||||
|
||||
#### ClearML ####
|
||||
clearml_helper = ClearMLHelper(project_name="Thesis/NrvForecast")
|
||||
task = clearml_helper.get_task(
|
||||
task_name="Non Autoregressive Quantile Regression: Non Linear"
|
||||
)
|
||||
task = clearml_helper.get_task(task_name="AQR: Non Linear")
|
||||
task.execute_remotely(queue_name="default", exit_process=True)
|
||||
|
||||
from src.policies.PolicyEvaluator import PolicyEvaluator
|
||||
@@ -60,16 +58,16 @@ if quantiles is None:
|
||||
quantiles = [0.01, 0.05, 0.1, 0.15, 0.3, 0.4, 0.5, 0.6, 0.7, 0.85, 0.9, 0.95, 0.99]
|
||||
task.set_parameter("general/quantiles", quantiles)
|
||||
else:
|
||||
# if string, convert to list "[0.01, 0.05, 0.1, 0.15, 0.3, 0.4, 0.5, 0.6, 0.7, 0.85, 0.9, 0.95, 0.99]""
|
||||
# if string, convert to list "[0.01, 0.05, 0.1, 0.15, 0.3, 0.4, 0.5, 0.6, 0.7, 0.85, 0.9, 0.95, 0.99]"
|
||||
if isinstance(quantiles, str):
|
||||
quantiles = eval(quantiles)
|
||||
|
||||
model_parameters = {
|
||||
"learning_rate": 0.0001,
|
||||
"hidden_size": 256,
|
||||
"num_layers": 4,
|
||||
"hidden_size": 512,
|
||||
"num_layers": 5,
|
||||
"dropout": 0.2,
|
||||
"time_feature_embedding": 16,
|
||||
"time_feature_embedding": 8,
|
||||
}
|
||||
|
||||
model_parameters = task.connect(model_parameters, name="model_parameters")
|
||||
@@ -77,7 +75,14 @@ model_parameters = task.connect(model_parameters, name="model_parameters")
|
||||
time_embedding = TimeEmbedding(
|
||||
data_processor.get_time_feature_size(), model_parameters["time_feature_embedding"]
|
||||
)
|
||||
# lstm_model = GRUModel(time_embedding.output_dim(inputDim), len(quantiles), hidden_size=model_parameters["hidden_size"], num_layers=model_parameters["num_layers"], dropout=model_parameters["dropout"])
|
||||
# lstm_model = GRUModel(
|
||||
# time_embedding.output_dim(inputDim),
|
||||
# len(quantiles),
|
||||
# hidden_size=model_parameters["hidden_size"],
|
||||
# num_layers=model_parameters["num_layers"],
|
||||
# dropout=model_parameters["dropout"],
|
||||
# )
|
||||
|
||||
non_linear_model = NonLinearRegression(
|
||||
time_embedding.output_dim(inputDim),
|
||||
len(quantiles),
|
||||
@@ -85,10 +90,11 @@ non_linear_model = NonLinearRegression(
|
||||
numLayers=model_parameters["num_layers"],
|
||||
dropout=model_parameters["dropout"],
|
||||
)
|
||||
|
||||
# linear_model = LinearRegression(time_embedding.output_dim(inputDim), len(quantiles))
|
||||
|
||||
model = nn.Sequential(time_embedding, non_linear_model)
|
||||
model.output_size = 96
|
||||
model.output_size = 1
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=model_parameters["learning_rate"])
|
||||
|
||||
### Policy Evaluator ###
|
||||
@@ -122,18 +128,37 @@ trainer = AutoRegressiveQuantileTrainer(
|
||||
trainer.add_metrics_to_track(
|
||||
[PinballLoss(quantiles), MSELoss(), L1Loss(), CRPSLoss(quantiles)]
|
||||
)
|
||||
trainer.early_stopping(patience=30)
|
||||
trainer.early_stopping(patience=10)
|
||||
trainer.plot_every(5)
|
||||
trainer.train(task=task, epochs=epochs, remotely=True)
|
||||
|
||||
### Policy Evaluation ###
|
||||
idx_samples = trainer.test_set_samples
|
||||
_, test_loader = trainer.data_processor.get_dataloaders(
|
||||
predict_sequence_length=trainer.model.output_size, full_day_skip=True
|
||||
predict_sequence_length=trainer.model.output_size, full_day_skip=False
|
||||
)
|
||||
|
||||
policy_evaluator.evaluate_test_set(idx_samples, test_loader)
|
||||
policy_evaluator.plot_profits_table()
|
||||
policy_evaluator.plot_thresholds_per_day()
|
||||
# policy_evaluator.evaluate_test_set(idx_samples, test_loader)
|
||||
# policy_evaluator.plot_profits_table()
|
||||
# policy_evaluator.plot_thresholds_per_day()
|
||||
|
||||
optimal_penalty, profit, charge_cycles = (
|
||||
policy_evaluator.optimize_penalty_for_target_charge_cycles(
|
||||
idx_samples=idx_samples,
|
||||
test_loader=test_loader,
|
||||
initial_penalty=1000,
|
||||
target_charge_cycles=283,
|
||||
learning_rate=15,
|
||||
max_iterations=150,
|
||||
tolerance=1,
|
||||
)
|
||||
)
|
||||
|
||||
print(
|
||||
f"Optimal Penalty: {optimal_penalty}, Profit: {profit}, Charge Cycles: {charge_cycles}"
|
||||
)
|
||||
task.get_logger().report_single_value(name="Optimal Penalty", value=optimal_penalty)
|
||||
task.get_logger().report_single_value(name="Optimal Profit", value=profit)
|
||||
task.get_logger().report_single_value(name="Optimal Charge Cycles", value=charge_cycles)
|
||||
|
||||
task.close()
|
||||
|
||||
Reference in New Issue
Block a user