diff --git a/losses.py b/losses.py index 35b896d2526788911c84d52dbd7eb7f307e042a0..435d124b376feeb8d80246378b41e27582340f54 100644 --- a/losses.py +++ b/losses.py @@ -7,4 +7,5 @@ class RMSLELoss(nn.Module): self.mse = nn.MSELoss() def forward(self, pred, actual): - return torch.sqrt(self.mse(torch.log(pred + 1), torch.log(actual + 1))) \ No newline at end of file + resized_actual = actual.view([actual.shape[0], actual.shape[1],1]) + return torch.sqrt(self.mse(torch.log(torch.add(pred,1)), torch.log(torch.add(resized_actual, 1)))) \ No newline at end of file diff --git a/main.py b/main.py index cca6083603d25bbd69334540955be5e1a9745276..9e9d6c28bbf1caae3c8c02221c54ccd12f6e501f 100644 --- a/main.py +++ b/main.py @@ -8,6 +8,7 @@ import optimizers import torch import logging import torch.optim +import torch.nn as nn def optimizer(cfg, model): result = {"Adam" : torch.optim.Adam(model.parameters())} @@ -35,7 +36,7 @@ if __name__ == "__main__": use_cuda, valid_ratio, overwrite_index=True, - max_num_samples=max_num_samples, + max_num_samples=max_num_samples ) if use_cuda : @@ -43,17 +44,29 @@ if __name__ == "__main__": else : device = toch.device('cpu') - model = model.build_model(cfg, 18) + #model = model.build_model(cfg, 18) + + model = nn.Sequential( + nn.Linear(18,1,False), + nn.ReLU() + ) model = model.to(device) + for param in list(model.parameters()): + param = 1 + f_loss = losses.RMSLELoss() optimizer = optimizer(cfg, model) + #optimizer = torch.optim.Adam((model.parameters()), lr = 10000) + for t in range(cfg["Training"]["Epochs"]): print("Epoch {}".format(t)) train(model, train_loader, f_loss, optimizer, device) + + print(list(model.parameters())[0].grad) val_loss = test.test(model, valid_loader, f_loss, device) print(" Validation : Loss : {:.4f}".format(val_loss)) diff --git a/train.py b/train.py index 11c9e2e9b6474be5cb3f8132a19207c85d6b5537..489959fb1a2ed509ed334037a26cc3189635bbe2 100644 --- a/train.py +++ b/train.py @@ -27,10 +27,20 @@ def train(model, loader, f_loss, optimizer, device): outputs = model(inputs) loss = f_loss(outputs, targets) + print("Loss") + print(loss) + + print("outputs") + print(outputs) + + print("targets") + print(targets) + # Backward and optimize optimizer.zero_grad() loss.backward() - optimizer.step() - print(model.regressor.weight) - print(model.regressor.bias) \ No newline at end of file + print("GRads") + print(list(model.parameters())[0].grad) + + optimizer.step() \ No newline at end of file