Skip to content
Snippets Groups Projects
Commit f69df593 authored by Yandi's avatar Yandi
Browse files

[Test] First try

parent 6efd6667
No related branches found
No related tags found
1 merge request!1Master into main
# Dataset Configuration
Dataset:
num_days: 1 # Test with sequence of 1 day
num_days: 365 # Test with sequence of 1 day
batch_size: 128
num_workers: 7
valid_ratio: 0.2
......@@ -15,6 +15,10 @@ Dataset:
#Optimizer selection
Optimizer: Adam # in {Adam}
#Training parameters
Training:
Epochs: 30
#Model selection
Model:
Name: LinearRegression
......
import torch.nn as nn
import torch
class RMSLELoss(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
def forward(self, pred, actual):
return torch.sqrt(self.mse(torch.log(pred + 1), torch.log(actual + 1)))
\ No newline at end of file
......@@ -4,19 +4,23 @@ import test
from train import train
import yaml
import losses
import models
import optimizers
import torch
import logging
if __name__ == "__main__":
logging.basicConfig(filename='logs/main_unit_test.log', level=logging.INFO)
config_file = open("config.yml")
cfg = yaml.load(config_file)
use_cuda = torch.cuda.is_available()
trainpath = cfg["Dataset"]["_DEFAULT_TRAIN_FILEPATH"]
num_days = cfg["Dataset"]["num_days"]
batch_size = cfg["Dataset"]["batch_size"]
num_workers = cfg["Dataset"]["num_workers"]
valid_ratio = cfg["Dataset"]["valid_ratio"]
max_num_samples = cfg["Dataset"]["max_num_samples"]
num_days = int(cfg["Dataset"]["num_days"])
batch_size = int(cfg["Dataset"]["batch_size"])
num_workers = int(cfg["Dataset"]["num_workers"])
valid_ratio = float(cfg["Dataset"]["valid_ratio"])
max_num_samples = eval(cfg["Dataset"]["max_num_samples"])
train_loader, valid_loader = dataloader.get_dataloaders(
trainpath,
......@@ -34,15 +38,19 @@ if __name__ == "__main__":
else :
device = toch.device('cpu')
model = model.build_model(cfg, input_size)
f_loss = losses.RMSLE.RMSLE()
model = model.build_model(cfg, 18)
model = model.to(device)
optimizer = models.choose_optimizer.optimizer(cfg)
f_loss = losses.RMSLELoss()
train(model = model, loader = train_loader, f_loss = f_loss, optimizer = optimizer, device = device)
optimizer = optimizers.optimizer(cfg, model)
for t in range(cfg["Training"]["Epochs"]):
print("Epoch {}".format(t))
train(model, train_loader, f_loss, optimizer, device)
val_loss = test.test(model, valid_loader, f_loss, device)
print(" Validation : Loss : {:.4f}".format(val_loss))
"""
logdir = generate_unique_logpath(top_logdir, "linear")
......
......@@ -3,11 +3,24 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
import models
import torch.nn as nn
class LinearRegression(nn.Module):
def __init__(self, cfg, input_size):
super(LinearRegression, self).__init__()
self.input_size = input_size
self.bias = cfg["LinearRegression"]["Bias"]
self.regressor = nn.Linear(input_size, 1, self.bias)
self.activate = nn.ReLU()
def forward(self, x):
y = self.regressor(x).view((x.shape[0],-1))
y = self.activate(y)
return y
def build_model(cfg, input_size):
return eval(f"models.{cfg['Model']['Name']}(cfg, input_size)")
return eval(f"{cfg['Model']['Name']}(cfg, input_size)")
if __name__== "__main__":
import yaml
......
import torch.optim
def optimizer(cfg, model):
result = {"Adam" : torch.optim.Adam(model.parameters())}
return result[cfg["Optimizer"]]
import torch
from tqdm import tqdm
def test(model, loader, f_loss, device):
"""
......@@ -21,8 +22,8 @@ def test(model, loader, f_loss, device):
with torch.no_grad():
model.eval()
N = 0
tot_loss, correct = 0.0, 0.0
for _, (inputs, targets) in enumerate(loader):
tot_loss = 0.0
for _, (inputs, targets) in tqdm(enumerate(loader), total = len(loader)):
# We got a minibatch from the loader within inputs and targets
# With a mini batch size of 128, we have the following shapes
......@@ -47,6 +48,4 @@ def test(model, loader, f_loss, device):
# Be carefull, the model is outputing scores and not the probabilities
# But given the softmax is not altering the rank of its input scores
# we can compute the label by argmaxing directly the scores
predicted_targets = outputs.argmax(dim=1)
correct += (predicted_targets == targets).sum().item()
return tot_loss/N, correct/N
\ No newline at end of file
return tot_loss/N
\ No newline at end of file
from tqdm import tqdm
def train(model, loader, f_loss, optimizer, device):
"""
Train a model for one epoch, iterating over the loader
......@@ -18,7 +20,7 @@ def train(model, loader, f_loss, optimizer, device):
model.train()
for _, (inputs, targets) in enumerate(loader):
for _, (inputs, targets) in tqdm(enumerate(loader), total = len(loader)):
inputs, targets = inputs.to(device), targets.to(device)
# Compute the forward pass through the network up to the loss
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment