Skip to content
Snippets Groups Projects
Commit f69df593 authored by Yandi's avatar Yandi
Browse files

[Test] First try

parent 6efd6667
No related branches found
No related tags found
1 merge request!1Master into main
# Dataset Configuration # Dataset Configuration
Dataset: Dataset:
num_days: 1 # Test with sequence of 1 day num_days: 365 # Test with sequence of 1 day
batch_size: 128 batch_size: 128
num_workers: 7 num_workers: 7
valid_ratio: 0.2 valid_ratio: 0.2
...@@ -15,6 +15,10 @@ Dataset: ...@@ -15,6 +15,10 @@ Dataset:
#Optimizer selection #Optimizer selection
Optimizer: Adam # in {Adam} Optimizer: Adam # in {Adam}
#Training parameters
Training:
Epochs: 30
#Model selection #Model selection
Model: Model:
Name: LinearRegression Name: LinearRegression
......
import torch.nn as nn
import torch
class RMSLELoss(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
def forward(self, pred, actual):
return torch.sqrt(self.mse(torch.log(pred + 1), torch.log(actual + 1)))
\ No newline at end of file
...@@ -4,19 +4,23 @@ import test ...@@ -4,19 +4,23 @@ import test
from train import train from train import train
import yaml import yaml
import losses import losses
import models import optimizers
import torch
import logging
if __name__ == "__main__": if __name__ == "__main__":
logging.basicConfig(filename='logs/main_unit_test.log', level=logging.INFO)
config_file = open("config.yml") config_file = open("config.yml")
cfg = yaml.load(config_file) cfg = yaml.load(config_file)
use_cuda = torch.cuda.is_available() use_cuda = torch.cuda.is_available()
trainpath = cfg["Dataset"]["_DEFAULT_TRAIN_FILEPATH"] trainpath = cfg["Dataset"]["_DEFAULT_TRAIN_FILEPATH"]
num_days = cfg["Dataset"]["num_days"] num_days = int(cfg["Dataset"]["num_days"])
batch_size = cfg["Dataset"]["batch_size"] batch_size = int(cfg["Dataset"]["batch_size"])
num_workers = cfg["Dataset"]["num_workers"] num_workers = int(cfg["Dataset"]["num_workers"])
valid_ratio = cfg["Dataset"]["valid_ratio"] valid_ratio = float(cfg["Dataset"]["valid_ratio"])
max_num_samples = cfg["Dataset"]["max_num_samples"] max_num_samples = eval(cfg["Dataset"]["max_num_samples"])
train_loader, valid_loader = dataloader.get_dataloaders( train_loader, valid_loader = dataloader.get_dataloaders(
trainpath, trainpath,
...@@ -34,15 +38,19 @@ if __name__ == "__main__": ...@@ -34,15 +38,19 @@ if __name__ == "__main__":
else : else :
device = toch.device('cpu') device = toch.device('cpu')
model = model.build_model(cfg, input_size) model = model.build_model(cfg, 18)
model = model.to(device)
f_loss = losses.RMSLE.RMSLE()
optimizer = models.choose_optimizer.optimizer(cfg) f_loss = losses.RMSLELoss()
train(model = model, loader = train_loader, f_loss = f_loss, optimizer = optimizer, device = device) optimizer = optimizers.optimizer(cfg, model)
for t in range(cfg["Training"]["Epochs"]):
print("Epoch {}".format(t))
train(model, train_loader, f_loss, optimizer, device)
val_loss = test.test(model, valid_loader, f_loss, device)
print(" Validation : Loss : {:.4f}".format(val_loss))
""" """
logdir = generate_unique_logpath(top_logdir, "linear") logdir = generate_unique_logpath(top_logdir, "linear")
......
...@@ -3,11 +3,24 @@ import torch ...@@ -3,11 +3,24 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torch.autograd import Function from torch.autograd import Function
import models
import torch.nn as nn
class LinearRegression(nn.Module):
def __init__(self, cfg, input_size):
super(LinearRegression, self).__init__()
self.input_size = input_size
self.bias = cfg["LinearRegression"]["Bias"]
self.regressor = nn.Linear(input_size, 1, self.bias)
self.activate = nn.ReLU()
def forward(self, x):
y = self.regressor(x).view((x.shape[0],-1))
y = self.activate(y)
return y
def build_model(cfg, input_size): def build_model(cfg, input_size):
return eval(f"models.{cfg['Model']['Name']}(cfg, input_size)") return eval(f"{cfg['Model']['Name']}(cfg, input_size)")
if __name__== "__main__": if __name__== "__main__":
import yaml import yaml
......
import torch.optim
def optimizer(cfg, model):
result = {"Adam" : torch.optim.Adam(model.parameters())}
return result[cfg["Optimizer"]]
import torch import torch
from tqdm import tqdm
def test(model, loader, f_loss, device): def test(model, loader, f_loss, device):
""" """
...@@ -21,8 +22,8 @@ def test(model, loader, f_loss, device): ...@@ -21,8 +22,8 @@ def test(model, loader, f_loss, device):
with torch.no_grad(): with torch.no_grad():
model.eval() model.eval()
N = 0 N = 0
tot_loss, correct = 0.0, 0.0 tot_loss = 0.0
for _, (inputs, targets) in enumerate(loader): for _, (inputs, targets) in tqdm(enumerate(loader), total = len(loader)):
# We got a minibatch from the loader within inputs and targets # We got a minibatch from the loader within inputs and targets
# With a mini batch size of 128, we have the following shapes # With a mini batch size of 128, we have the following shapes
...@@ -47,6 +48,4 @@ def test(model, loader, f_loss, device): ...@@ -47,6 +48,4 @@ def test(model, loader, f_loss, device):
# Be carefull, the model is outputing scores and not the probabilities # Be carefull, the model is outputing scores and not the probabilities
# But given the softmax is not altering the rank of its input scores # But given the softmax is not altering the rank of its input scores
# we can compute the label by argmaxing directly the scores # we can compute the label by argmaxing directly the scores
predicted_targets = outputs.argmax(dim=1) return tot_loss/N
correct += (predicted_targets == targets).sum().item() \ No newline at end of file
return tot_loss/N, correct/N
\ No newline at end of file
from tqdm import tqdm
def train(model, loader, f_loss, optimizer, device): def train(model, loader, f_loss, optimizer, device):
""" """
Train a model for one epoch, iterating over the loader Train a model for one epoch, iterating over the loader
...@@ -18,7 +20,7 @@ def train(model, loader, f_loss, optimizer, device): ...@@ -18,7 +20,7 @@ def train(model, loader, f_loss, optimizer, device):
model.train() model.train()
for _, (inputs, targets) in enumerate(loader): for _, (inputs, targets) in tqdm(enumerate(loader), total = len(loader)):
inputs, targets = inputs.to(device), targets.to(device) inputs, targets = inputs.to(device), targets.to(device)
# Compute the forward pass through the network up to the loss # Compute the forward pass through the network up to the loss
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment