Skip to content
Snippets Groups Projects
Commit c1356bff authored by Yandi's avatar Yandi
Browse files

[Normalizing the input]

parent 0615e610
No related branches found
No related tags found
1 merge request!1Master into main
...@@ -71,7 +71,6 @@ def create_submission(model, transform, device): ...@@ -71,7 +71,6 @@ def create_submission(model, transform, device):
with torch.no_grad(): with torch.no_grad():
for X in tqdm.tqdm(test_loader): for X in tqdm.tqdm(test_loader):
X = X.to(device) X = X.to(device)
print(X.shape)
############################################# #############################################
# This is where you inject your knowledge # This is where you inject your knowledge
# About your model # About your model
......
...@@ -121,6 +121,7 @@ def get_dataloaders( ...@@ -121,6 +121,7 @@ def get_dataloaders(
target_transform=valid_target_transform, target_transform=valid_target_transform,
num_days=num_days, num_days=num_days,
) )
# The sum of the two folds are not expected to be exactly of # The sum of the two folds are not expected to be exactly of
# max_num_samples # max_num_samples
logging.info(f" - The train fold has {len(train_dataset)} samples") logging.info(f" - The train fold has {len(train_dataset)} samples")
......
...@@ -55,9 +55,9 @@ if __name__ == "__main__": ...@@ -55,9 +55,9 @@ if __name__ == "__main__":
#network = network.build_network(cfg, 18) #network = network.build_network(cfg, 18)
network = nn.Sequential( network = nn.Sequential(
nn.Linear(14,8,False), nn.Linear(14,35,True),
nn.ReLU(), nn.ReLU(),
nn.Linear(8, 35, True), nn.Linear(35, 35, True),
nn.ReLU(), nn.ReLU(),
nn.Linear(35,35,True), nn.Linear(35,35,True),
nn.ReLU(), nn.ReLU(),
...@@ -67,18 +67,23 @@ if __name__ == "__main__": ...@@ -67,18 +67,23 @@ if __name__ == "__main__":
nn.ReLU(), nn.ReLU(),
nn.Linear(35,35,True), nn.Linear(35,35,True),
nn.ReLU(), nn.ReLU(),
nn.Linear(35,35, True),
nn.ReLU(),
nn.Linear(35,1, True), nn.Linear(35,1, True),
nn.ReLU() nn.ReLU()
) )
"""
def init_xavier(module): def init_xavier(module):
if type(module)==nn.Linear: if type(module)==nn.Linear:
nn.init.xavier_uniform_(module.weight) nn.init.constant_(module.weight,1)
"""
network = network.to(device) network = network.to(device)
""" """
for param in list(network.parameters()): for layer in network:
param = 1 layer.apply(init_xavier)
""" """
f_loss = losses.RMSLELoss() f_loss = losses.RMSLELoss()
......
from tqdm import tqdm from tqdm import tqdm
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import torch
def train(model, loader, f_loss, optimizer, device): def train(model, loader, f_loss, optimizer, device):
""" """
...@@ -35,17 +36,19 @@ def train(model, loader, f_loss, optimizer, device): ...@@ -35,17 +36,19 @@ def train(model, loader, f_loss, optimizer, device):
optimizer.zero_grad() optimizer.zero_grad()
loss.backward() loss.backward()
#torch.nn.utils.clip_grad_norm(model.parameters(), 50)
#Y = list(model.parameters())[0].grad.cpu().tolist() Y = list(model.parameters())[0].grad.cpu().tolist()
#gradients.append(np.mean(Y)) gradients.append(np.mean(Y))
#tar.append(np.mean(outputs.cpu().tolist())) tar.append(np.mean(outputs.cpu().tolist()))
#out.append(np.mean(targets.cpu().tolist())) out.append(np.mean(targets.cpu().tolist()))
optimizer.step() optimizer.step()
#visualize_gradients(gradients) visualize_gradients(gradients)
#visualize_gradients(tar) visualize_gradients(tar)
#visualize_gradients(out) visualize_gradients(out)
def visualize_gradients(gradients): def visualize_gradients(gradients):
print(gradients) print(gradients)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment