Fixed policy evaluation for autoregressive
This commit is contained in:
@@ -49,7 +49,7 @@ data_processor.set_full_day_skip(False)
|
||||
|
||||
|
||||
#### Hyperparameters ####
|
||||
data_processor.set_output_size(96)
|
||||
data_processor.set_output_size(1)
|
||||
inputDim = data_processor.get_input_size()
|
||||
epochs = 300
|
||||
|
||||
@@ -80,7 +80,7 @@ time_embedding = TimeEmbedding(
|
||||
# lstm_model = GRUModel(time_embedding.output_dim(inputDim), len(quantiles), hidden_size=model_parameters["hidden_size"], num_layers=model_parameters["num_layers"], dropout=model_parameters["dropout"])
|
||||
non_linear_model = NonLinearRegression(
|
||||
time_embedding.output_dim(inputDim),
|
||||
len(quantiles) * 96,
|
||||
len(quantiles),
|
||||
hiddenSize=model_parameters["hidden_size"],
|
||||
numLayers=model_parameters["num_layers"],
|
||||
dropout=model_parameters["dropout"],
|
||||
@@ -97,18 +97,7 @@ baseline_policy = BaselinePolicy(battery, data_path="")
|
||||
policy_evaluator = PolicyEvaluator(baseline_policy, task)
|
||||
|
||||
#### Trainer ####
|
||||
# trainer = AutoRegressiveQuantileTrainer(
|
||||
# model,
|
||||
# inputDim,
|
||||
# optimizer,
|
||||
# data_processor,
|
||||
# quantiles,
|
||||
# "cuda",
|
||||
# policy_evaluator=policy_evaluator,
|
||||
# debug=False,
|
||||
# )
|
||||
|
||||
trainer = NonAutoRegressiveQuantileRegression(
|
||||
trainer = AutoRegressiveQuantileTrainer(
|
||||
model,
|
||||
inputDim,
|
||||
optimizer,
|
||||
@@ -119,6 +108,17 @@ trainer = NonAutoRegressiveQuantileRegression(
|
||||
debug=False,
|
||||
)
|
||||
|
||||
# trainer = NonAutoRegressiveQuantileRegression(
|
||||
# model,
|
||||
# inputDim,
|
||||
# optimizer,
|
||||
# data_processor,
|
||||
# quantiles,
|
||||
# "cuda",
|
||||
# policy_evaluator=policy_evaluator,
|
||||
# debug=False,
|
||||
# )
|
||||
|
||||
trainer.add_metrics_to_track(
|
||||
[PinballLoss(quantiles), MSELoss(), L1Loss(), CRPSLoss(quantiles)]
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user