diff --git a/config.yml b/config.yml
index b946503635467b2d8e887c096509715485c081a1..b448853a01b2640dd32850e38bfedcac7db14d04 100644
--- a/config.yml
+++ b/config.yml
@@ -1,6 +1,6 @@
 # Dataset Configuration
 Dataset:
-  num_days: 1  # Test with sequence of 1 day
+  num_days: 365 # Test with sequence of 1 day
   batch_size: 128
   num_workers: 7
   valid_ratio: 0.2
@@ -15,6 +15,10 @@ Dataset:
 #Optimizer selection
 Optimizer: Adam # in {Adam}
 
+#Training parameters
+Training:
+  Epochs: 30
+  
 #Model selection
 Model:
   Name: LinearRegression
diff --git a/losses.py b/losses.py
new file mode 100644
index 0000000000000000000000000000000000000000..35b896d2526788911c84d52dbd7eb7f307e042a0
--- /dev/null
+++ b/losses.py
@@ -0,0 +1,10 @@
+import torch.nn as nn
+import torch
+
+class RMSLELoss(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.mse = nn.MSELoss()
+        
+    def forward(self, pred, actual):
+        return torch.sqrt(self.mse(torch.log(pred + 1), torch.log(actual + 1)))
\ No newline at end of file
diff --git a/main.py b/main.py
index bfd0fe0c3315179ad6856fd9c0ac32ee6dba1b26..bbc70912a9bb490fd82b0b6437583c66019fab00 100644
--- a/main.py
+++ b/main.py
@@ -4,19 +4,23 @@ import test
 from train import train
 import yaml
 import losses
-import models
+import optimizers
+import torch
+import logging
 
 if __name__ == "__main__":
+    logging.basicConfig(filename='logs/main_unit_test.log', level=logging.INFO)
+
     config_file = open("config.yml")
     cfg = yaml.load(config_file)
 
     use_cuda = torch.cuda.is_available()
     trainpath           = cfg["Dataset"]["_DEFAULT_TRAIN_FILEPATH"]
-    num_days            = cfg["Dataset"]["num_days"]
-    batch_size          = cfg["Dataset"]["batch_size"]
-    num_workers         = cfg["Dataset"]["num_workers"]
-    valid_ratio         = cfg["Dataset"]["valid_ratio"]
-    max_num_samples     = cfg["Dataset"]["max_num_samples"]
+    num_days            = int(cfg["Dataset"]["num_days"])
+    batch_size          = int(cfg["Dataset"]["batch_size"])
+    num_workers         = int(cfg["Dataset"]["num_workers"])
+    valid_ratio         = float(cfg["Dataset"]["valid_ratio"])
+    max_num_samples     = eval(cfg["Dataset"]["max_num_samples"])
 
     train_loader, valid_loader = dataloader.get_dataloaders(
         trainpath,
@@ -34,15 +38,19 @@ if __name__ == "__main__":
     else :
         device = toch.device('cpu')
 
-    model = model.build_model(cfg, input_size)
-
-    f_loss = losses.RMSLE.RMSLE()
+    model = model.build_model(cfg, 18)
+    model = model.to(device)
 
-    optimizer = models.choose_optimizer.optimizer(cfg)
+    f_loss = losses.RMSLELoss()
 
-    train(model = model, loader = train_loader, f_loss = f_loss, optimizer = optimizer, device = device)
+    optimizer = optimizers.optimizer(cfg, model)
+  
+    for t in range(cfg["Training"]["Epochs"]):
+        print("Epoch {}".format(t))
+        train(model, train_loader, f_loss, optimizer, device)
 
-    
+        val_loss = test.test(model, valid_loader, f_loss, device)
+        print(" Validation : Loss : {:.4f}".format(val_loss))
 
     """
     logdir = generate_unique_logpath(top_logdir, "linear")
diff --git a/model.py b/model.py
index 7a393b4e8c3503c75ee50f3f998ef4ed1be3b4c6..84716f3bb0cbbe164eefa8c3ede29d935fa9d601 100644
--- a/model.py
+++ b/model.py
@@ -3,11 +3,24 @@ import torch
 import torch.nn as nn
 import torch.nn.functional as F
 from torch.autograd import Function
-import models
 
+import torch.nn as nn
+
+class LinearRegression(nn.Module):
+    def __init__(self, cfg, input_size):
+        super(LinearRegression, self).__init__()
+        self.input_size = input_size
+        self.bias = cfg["LinearRegression"]["Bias"]
+        self.regressor = nn.Linear(input_size, 1, self.bias)
+        self.activate = nn.ReLU()
+    def forward(self, x):
+        y = self.regressor(x).view((x.shape[0],-1))
+        y = self.activate(y)
+        return y
 
 def build_model(cfg, input_size):    
-    return eval(f"models.{cfg['Model']['Name']}(cfg, input_size)")
+    return eval(f"{cfg['Model']['Name']}(cfg, input_size)")
+
 
 if __name__== "__main__":
     import yaml
diff --git a/optimizers.py b/optimizers.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2d770421601a470f6579d1d1eb6f44a19dea301
--- /dev/null
+++ b/optimizers.py
@@ -0,0 +1,5 @@
+import torch.optim
+
+def optimizer(cfg, model):
+    result = {"Adam" : torch.optim.Adam(model.parameters())}
+    return result[cfg["Optimizer"]]
diff --git a/test.py b/test.py
index 80d9e454853e34adead85e949683cb3c92cc056c..2007732fc856ce8cee1f3868ecb19e759b182387 100644
--- a/test.py
+++ b/test.py
@@ -1,4 +1,5 @@
 import torch
+from tqdm import tqdm
 
 def test(model, loader, f_loss, device):
     """
@@ -21,8 +22,8 @@ def test(model, loader, f_loss, device):
     with torch.no_grad():
         model.eval()
         N = 0
-        tot_loss, correct = 0.0, 0.0
-        for _, (inputs, targets) in enumerate(loader):
+        tot_loss = 0.0
+        for _, (inputs, targets) in tqdm(enumerate(loader), total = len(loader)):
 
             # We got a minibatch from the loader within inputs and targets
             # With a mini batch size of 128, we have the following shapes
@@ -47,6 +48,4 @@ def test(model, loader, f_loss, device):
             # Be carefull, the model is outputing scores and not the probabilities
             # But given the softmax is not altering the rank of its input scores
             # we can compute the label by argmaxing directly the scores
-            predicted_targets = outputs.argmax(dim=1)
-            correct += (predicted_targets == targets).sum().item()
-        return tot_loss/N, correct/N
\ No newline at end of file
+        return tot_loss/N
\ No newline at end of file
diff --git a/train.py b/train.py
index ce6f54c9315a80af78a1d46ba765235fb81fc4f6..0217b1269ed626a6ee3c7d0a886ba13dfbff4972 100644
--- a/train.py
+++ b/train.py
@@ -1,3 +1,5 @@
+from tqdm import tqdm
+
 def train(model, loader, f_loss, optimizer, device):
     """
     Train a model for one epoch, iterating over the loader
@@ -18,7 +20,7 @@ def train(model, loader, f_loss, optimizer, device):
 
     model.train()
 
-    for _, (inputs, targets) in enumerate(loader):
+    for _, (inputs, targets) in tqdm(enumerate(loader), total = len(loader)):
         inputs, targets = inputs.to(device), targets.to(device)
 
         # Compute the forward pass through the network up to the loss