Newer
Older
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
# Linear Regression (Feed forward Network)
class LinearRegression(nn.Module):
def __init__(self, cfg, input_size):
super(LinearRegression, self).__init__()
self.input_size = input_size
self.bias = cfg["LinearRegression"]["Bias"]
self.hidden_size = int(cfg["LinearRegression"]["HiddenSize"])
self.regressor = nn.Sequential(
nn.Linear(input_size,self.hidden_size,self.bias),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size, self.bias),
nn.ReLU(),
nn.Linear(self.hidden_size,self.hidden_size,self.bias),
nn.ReLU(),
nn.Linear(self.hidden_size,self.hidden_size,self.bias),
nn.ReLU(),
nn.Linear(self.hidden_size,self.hidden_size,self.bias),
nn.ReLU(),
nn.Linear(self.hidden_size,self.hidden_size,self.bias),
nn.ReLU(),
nn.Linear(self.hidden_size,self.hidden_size, self.bias),
nn.ReLU(),
nn.Linear(self.hidden_size,1, self.bias),
nn.ReLU()
)
# UNET X i (B,T,N)
class UNetConvBlock(nn.Module):
def __init__(self, cin, cout):
pass
# Initialization
def init_he(module):
if type(module)==nn.Linear:
nn.init.kaiming_uniform_(module.weight, mode='fan_in', nonlinearity='relu')
def initialize_model(cfg, network):
for name, layer in network.named_modules():
#print(f"{cfg[cfg['Model']['Name']]['Initialization']}")
layer.apply(eval(f"{cfg[cfg['Model']['Name']]['Initialization']}"))
class ModelCheckpoint:
def __init__(self, filepath, model):
self.min_loss = None
self.filepath = filepath
self.model = model
def update(self, loss):
if (self.min_loss is None) or (loss < self.min_loss):
print("Saving a better model")
torch.save(self.model.state_dict(), self.filepath)
self.min_loss = loss