Skip to content
Snippets Groups Projects
Commit deda3a60 authored by Yandi's avatar Yandi
Browse files

preparing for batch job rnn 73 days

parent 7151c71a
No related branches found
No related tags found
1 merge request!1Master into main
......@@ -37,7 +37,7 @@ def create_submission(model, transform, device):
# is not using the temporal context, this is here arbitrarily chosen
# However, note that it must be a divisor of the total number of days
# in the 2017 year , either 1, 5, 73 or 365
num_days = 365
num_days = 73
num_workers = 7
use_cuda = torch.cuda.is_available()
......
......@@ -75,7 +75,9 @@ parser.add_argument("--model_name",
# Ensure the log directory exists
os.system("mkdir -p logslurms")
args = parser.parse_args()
time_wall = {"no_limit": "48:00:00","hour" : "1:00:00", "half" : "0:00:00", "quarter" : "0:00:15"}
# Launch the batch jobs
submit_job(makejob(commit_id, "cnn", 1, args.user, time_wall[args.time_wall]))
\ No newline at end of file
submit_job(makejob(commit_id, args.model_name, 1, args.user, time_wall[args.time_wall]))
\ No newline at end of file
File added
This diff is collapsed.
......@@ -33,18 +33,19 @@ if __name__ == "__main__":
parser.add_argument(
"--rootDir",
default=None,
help="Directory in which the log files will be stored"
)
args = parser.parse_args()
config_file = open("config.yml")
cfg = yaml.load(config_file)
rootDir = cfg["LogDir"] if eval(args.rootDir) != None else args.rootDir
rootDir = args.rootDir if args.rootDir != None else cfg["LogDir"]
logging.basicConfig(filename= rootDir + 'main_unit_test.log', level=logging.INFO)
config_file = open("config.yml")
cfg = yaml.load(config_file)
use_cuda = torch.cuda.is_available()
trainpath = cfg["Dataset"]["_DEFAULT_TRAIN_FILEPATH"]
......@@ -71,7 +72,7 @@ if __name__ == "__main__":
MAX = eval(cfg["ApproximativeMaxi"])
MIN = eval(cfg["ApproximativeMini"])
else :
MEAN, STD, MAX, MIN = get_stats_train_dataset(trainpath,
MEAN, STD, MAX, MIN = dataloader.get_stats_train_dataset(trainpath,
num_days,
batch_size,
num_workers,
......
......@@ -3,6 +3,7 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd import Variable
import torch.nn as nn
......@@ -35,6 +36,32 @@ class LinearRegression(nn.Module):
def forward(self, x):
return self.regressor(x)
# Recurrent Neural Networks
class RNN(nn.Module):
def __init__(self, cfg, input_size):
super(RNN, self).__init__()
self.hidden_size = cfg["RNN"]["HiddenSize"]
self.num_layers = cfg["RNN"]["NumLayers"]
# RNN
self.rnn = nn.RNN(input_size, self.hidden_size, self.num_layers, batch_first=True, nonlinearity='relu')
self.fc = nn.Linear(self.hidden_size, 1)
def forward(self, x):
use_cuda = torch.cuda.is_available()
if use_cuda :
device = torch.device('cuda')
else :
device = toch.device('cpu')
# Initialize hidden state with zeros
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# One time step
out, hn = self.rnn(x, h0)
out = self.fc(out)
print(out.shape)
return out
# Bidirectional LSTM
class BidirectionalLSTM(nn.Module):
def __init__(self, cfg, input_size):
......
......@@ -31,6 +31,7 @@ def train(args, model, loader, f_loss, optimizer, device, log_interval = 100):
# Compute the forward pass through the network up to the loss
outputs = model(inputs)
print(targets.shape)
loss = f_loss(outputs, targets)
# Backward and optimize
......
No preview for this file type
No preview for this file type
run-20230130_164435-f5w8sswr/logs/debug-internal.log
\ No newline at end of file
run-20230131_200825-ytr4zy6r/logs/debug-internal.log
\ No newline at end of file
run-20230130_164435-f5w8sswr/logs/debug.log
\ No newline at end of file
run-20230131_200825-ytr4zy6r/logs/debug.log
\ No newline at end of file
run-20230130_164435-f5w8sswr
\ No newline at end of file
run-20230131_200825-ytr4zy6r
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment