From c1356bffa3e2d1f87026024045d1fb9827622241 Mon Sep 17 00:00:00 2001
From: Yandi <yandirzm@gmail.com>
Date: Sun, 22 Jan 2023 18:41:45 +0100
Subject: [PATCH] [Normalizing the input]

---
 create_submission.py |  1 -
 dataloader.py        |  1 +
 main.py              | 15 ++++++++++-----
 train.py             | 17 ++++++++++-------
 4 files changed, 21 insertions(+), 13 deletions(-)

diff --git a/create_submission.py b/create_submission.py
index 126caf8..935934f 100644
--- a/create_submission.py
+++ b/create_submission.py
@@ -71,7 +71,6 @@ def create_submission(model, transform, device):
         with torch.no_grad():
             for X in tqdm.tqdm(test_loader):
                 X = X.to(device)
-                print(X.shape)
                 #############################################
                 # This is where you inject your knowledge
                 # About your model
diff --git a/dataloader.py b/dataloader.py
index 0f131ea..9c9df4d 100644
--- a/dataloader.py
+++ b/dataloader.py
@@ -121,6 +121,7 @@ def get_dataloaders(
         target_transform=valid_target_transform,
         num_days=num_days,
     )
+
     # The sum of the two folds are not expected to be exactly of
     # max_num_samples
     logging.info(f"  - The train fold has {len(train_dataset)} samples")
diff --git a/main.py b/main.py
index a759e7e..9b2efb8 100644
--- a/main.py
+++ b/main.py
@@ -55,9 +55,9 @@ if __name__ == "__main__":
     #network = network.build_network(cfg, 18)
 
     network = nn.Sequential(
-        nn.Linear(14,8,False),
+        nn.Linear(14,35,True),
         nn.ReLU(),
-        nn.Linear(8, 35, True),
+        nn.Linear(35, 35, True),
         nn.ReLU(),
         nn.Linear(35,35,True),
         nn.ReLU(),
@@ -67,18 +67,23 @@ if __name__ == "__main__":
         nn.ReLU(),
         nn.Linear(35,35,True),
         nn.ReLU(),
+        nn.Linear(35,35, True),
+        nn.ReLU(),
         nn.Linear(35,1, True),
         nn.ReLU()
     )
 
+    """
     def init_xavier(module):
         if type(module)==nn.Linear:
-            nn.init.xavier_uniform_(module.weight)
+            nn.init.constant_(module.weight,1)
+    """
+
     network = network.to(device)
 
     """
-    for param in list(network.parameters()):
-        param = 1
+    for layer in network:
+        layer.apply(init_xavier)
     """
 
     f_loss = losses.RMSLELoss()
diff --git a/train.py b/train.py
index ef930e9..cb594f8 100644
--- a/train.py
+++ b/train.py
@@ -1,6 +1,7 @@
 from tqdm import tqdm
 import matplotlib.pyplot as plt
 import numpy as np
+import torch
 
 def train(model, loader, f_loss, optimizer, device):
     """
@@ -35,17 +36,19 @@ def train(model, loader, f_loss, optimizer, device):
         optimizer.zero_grad()
         loss.backward()
 
+
+        #torch.nn.utils.clip_grad_norm(model.parameters(), 50)
         
-        #Y = list(model.parameters())[0].grad.cpu().tolist()
+        Y = list(model.parameters())[0].grad.cpu().tolist()
         
-        #gradients.append(np.mean(Y))
-        #tar.append(np.mean(outputs.cpu().tolist()))
-        #out.append(np.mean(targets.cpu().tolist()))
+        gradients.append(np.mean(Y))
+        tar.append(np.mean(outputs.cpu().tolist()))
+        out.append(np.mean(targets.cpu().tolist()))
         
         optimizer.step()
-    #visualize_gradients(gradients)
-    #visualize_gradients(tar)
-    #visualize_gradients(out)
+    visualize_gradients(gradients)
+    visualize_gradients(tar)
+    visualize_gradients(out)
 
 def visualize_gradients(gradients):
     print(gradients)
-- 
GitLab