diff --git a/main.py b/main.py index bbc70912a9bb490fd82b0b6437583c66019fab00..cca6083603d25bbd69334540955be5e1a9745276 100644 --- a/main.py +++ b/main.py @@ -7,6 +7,11 @@ import losses import optimizers import torch import logging +import torch.optim + +def optimizer(cfg, model): + result = {"Adam" : torch.optim.Adam(model.parameters())} + return result[cfg["Optimizer"]] if __name__ == "__main__": logging.basicConfig(filename='logs/main_unit_test.log', level=logging.INFO) @@ -43,7 +48,7 @@ if __name__ == "__main__": f_loss = losses.RMSLELoss() - optimizer = optimizers.optimizer(cfg, model) + optimizer = optimizer(cfg, model) for t in range(cfg["Training"]["Epochs"]): print("Epoch {}".format(t)) diff --git a/model.py b/model.py index 84716f3bb0cbbe164eefa8c3ede29d935fa9d601..4e6830cb8da450b16918e62505c2cd25403919e0 100644 --- a/model.py +++ b/model.py @@ -15,8 +15,7 @@ class LinearRegression(nn.Module): self.activate = nn.ReLU() def forward(self, x): y = self.regressor(x).view((x.shape[0],-1)) - y = self.activate(y) - return y + return self.activate(y) def build_model(cfg, input_size): return eval(f"{cfg['Model']['Name']}(cfg, input_size)") diff --git a/optimizers.py b/optimizers.py index a2d770421601a470f6579d1d1eb6f44a19dea301..f3ca7900465cc3f662f6af7c98c8d80f00f4dbeb 100644 --- a/optimizers.py +++ b/optimizers.py @@ -1,5 +1,5 @@ import torch.optim def optimizer(cfg, model): - result = {"Adam" : torch.optim.Adam(model.parameters())} + result = {"Adam" : torch.optim.Adam(model.parameters(), lr = 1e-2)} return result[cfg["Optimizer"]] diff --git a/train.py b/train.py index 0217b1269ed626a6ee3c7d0a886ba13dfbff4972..11c9e2e9b6474be5cb3f8132a19207c85d6b5537 100644 --- a/train.py +++ b/train.py @@ -30,4 +30,7 @@ def train(model, loader, f_loss, optimizer, device): # Backward and optimize optimizer.zero_grad() loss.backward() - optimizer.step() \ No newline at end of file + optimizer.step() + + print(model.regressor.weight) + print(model.regressor.bias) \ No newline at end of file