Fixed small summary with model architectures until now

This commit is contained in:
Victor Mylle
2023-11-30 21:53:35 +00:00
parent eba10c8f83
commit 120b6aa5bd
23 changed files with 402 additions and 185 deletions

View File

@@ -1,11 +1,17 @@
import torch
import numpy as np
class LinearRegression(torch.nn.Module):
def __init__(self, inputSize, output_size):
super(LinearRegression, self).__init__()
self.inputSize = inputSize
self.output_size = output_size
self.linear = torch.nn.Linear(inputSize, output_size)
# dimension multiplication without first one
dim = inputSize[1:]
dim = [int(x) for x in dim]
dim = np.prod(dim)
self.linear = torch.nn.Linear(dim, output_size)
def forward(self, x):
x = torch.squeeze(x, -1)

View File

@@ -37,9 +37,9 @@ class GRUModel(torch.nn.Module):
def forward(self, x):
# Forward pass through the GRU layers
_, hidden_state = self.gru(x)
x, _ = self.gru(x)
x = x[:, -1, :]
# Use the hidden state from the last time step for the output
output = self.linear(hidden_state[-1])
output = self.linear(x)
return output

View File

@@ -13,7 +13,7 @@ class NonLinearRegression(torch.nn.Module):
# add linear layers with relu
self.layers = torch.nn.ModuleList()
self.layers.append(torch.nn.Linear(inputSize, hiddenSize))
self.layers.append(torch.nn.Linear(inputSize[-1], hiddenSize))
self.layers.append(torch.nn.Dropout(dropout))
for _ in range(numLayers - 2):
self.layers.append(torch.nn.Linear(hiddenSize, hiddenSize))

View File

@@ -10,6 +10,8 @@ class TimeEmbedding(nn.Module):
def forward(self, x):
# Extract the last 'time_features' from the input
if self.time_features == 0:
return x
time_feature = x[..., -1] # Use ellipsis to access the last dimension
# convert to int
time_feature = time_feature.int()
@@ -20,6 +22,8 @@ class TimeEmbedding(nn.Module):
def output_dim(self, input_dim):
if self.time_features == 0:
return input_dim
# Create a list from the input dimension
input_dim_list = list(input_dim)
# Modify the last dimension