Skip to content
Snippets Groups Projects
main.py 1.71 KiB
import dataloader
import model
import test
from train import train
import yaml
import losses
import optimizers
import torch
import logging

if __name__ == "__main__":
    logging.basicConfig(filename='logs/main_unit_test.log', level=logging.INFO)

    config_file = open("config.yml")
    cfg = yaml.load(config_file)

    use_cuda = torch.cuda.is_available()
    trainpath           = cfg["Dataset"]["_DEFAULT_TRAIN_FILEPATH"]
    num_days            = int(cfg["Dataset"]["num_days"])
    batch_size          = int(cfg["Dataset"]["batch_size"])
    num_workers         = int(cfg["Dataset"]["num_workers"])
    valid_ratio         = float(cfg["Dataset"]["valid_ratio"])
    max_num_samples     = eval(cfg["Dataset"]["max_num_samples"])

    train_loader, valid_loader = dataloader.get_dataloaders(
        trainpath,
        num_days,
        batch_size,
        num_workers,
        use_cuda,
        valid_ratio,
        overwrite_index=True,
        max_num_samples=max_num_samples,
    )

    if use_cuda :
        device = torch.device('cuda')
    else :
        device = toch.device('cpu')

    model = model.build_model(cfg, 18)
    model = model.to(device)

    f_loss = losses.RMSLELoss()

    optimizer = optimizers.optimizer(cfg, model)
  
    for t in range(cfg["Training"]["Epochs"]):
        print("Epoch {}".format(t))
        train(model, train_loader, f_loss, optimizer, device)

        val_loss = test.test(model, valid_loader, f_loss, device)
        print(" Validation : Loss : {:.4f}".format(val_loss))

    """
    logdir = generate_unique_logpath(top_logdir, "linear")
    print("Logging to {}".format(logdir))
    # -> Prints out     Logging to   ./logs/linear_1
    if not os.path.exists(logdir):
        os.mkdir(logdir)
    """