Skip to content
Snippets Groups Projects
Commit c7e1483e authored by Yandi's avatar Yandi
Browse files

Viewing vanishing gradients

parent b9a8d9b7
No related branches found
No related tags found
1 merge request!1Master into main
......@@ -7,4 +7,5 @@ class RMSLELoss(nn.Module):
self.mse = nn.MSELoss()
def forward(self, pred, actual):
return torch.sqrt(self.mse(torch.log(pred + 1), torch.log(actual + 1)))
\ No newline at end of file
resized_actual = actual.view([actual.shape[0], actual.shape[1],1])
return torch.sqrt(self.mse(torch.log(torch.add(pred,1)), torch.log(torch.add(resized_actual, 1))))
\ No newline at end of file
......@@ -8,6 +8,7 @@ import optimizers
import torch
import logging
import torch.optim
import torch.nn as nn
def optimizer(cfg, model):
result = {"Adam" : torch.optim.Adam(model.parameters())}
......@@ -35,7 +36,7 @@ if __name__ == "__main__":
use_cuda,
valid_ratio,
overwrite_index=True,
max_num_samples=max_num_samples,
max_num_samples=max_num_samples
)
if use_cuda :
......@@ -43,17 +44,29 @@ if __name__ == "__main__":
else :
device = toch.device('cpu')
model = model.build_model(cfg, 18)
#model = model.build_model(cfg, 18)
model = nn.Sequential(
nn.Linear(18,1,False),
nn.ReLU()
)
model = model.to(device)
for param in list(model.parameters()):
param = 1
f_loss = losses.RMSLELoss()
optimizer = optimizer(cfg, model)
#optimizer = torch.optim.Adam((model.parameters()), lr = 10000)
for t in range(cfg["Training"]["Epochs"]):
print("Epoch {}".format(t))
train(model, train_loader, f_loss, optimizer, device)
print(list(model.parameters())[0].grad)
val_loss = test.test(model, valid_loader, f_loss, device)
print(" Validation : Loss : {:.4f}".format(val_loss))
......
......@@ -27,10 +27,20 @@ def train(model, loader, f_loss, optimizer, device):
outputs = model(inputs)
loss = f_loss(outputs, targets)
print("Loss")
print(loss)
print("outputs")
print(outputs)
print("targets")
print(targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(model.regressor.weight)
print(model.regressor.bias)
\ No newline at end of file
print("GRads")
print(list(model.parameters())[0].grad)
optimizer.step()
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment