Skip to content
Snippets Groups Projects
Commit 4b57bc87 authored by Yandi's avatar Yandi
Browse files

RNN small droupout

parent 8df0ceb1
No related branches found
No related tags found
1 merge request!1Master into main
No preview for this file type
...@@ -120,16 +120,17 @@ if __name__ == "__main__": ...@@ -120,16 +120,17 @@ if __name__ == "__main__":
wandb.watch(network, log_freq = log_freq) wandb.watch(network, log_freq = log_freq)
for t in range(cfg["Training"]["Epochs"]): for t in range(cfg["Training"]["Epochs"]):
print("Epoch {}".format(t)) logging.info("Epoch {}".format(t))
train(args, network, train_loader, f_loss, optimizer, device, log_interval) train(args, network, train_loader, f_loss, optimizer, device, log_interval)
val_loss = test.test(network, valid_loader, f_loss, device) val_loss = test.test(network, valid_loader, f_loss, device)
network_checkpoint.update(val_loss) network_checkpoint.update(val_loss)
print(" Validation : Loss : {:.4f}".format(val_loss)) logging.info(" Validation : Loss : {:.4f}".format(val_loss))
if not args.no_wandb: if not args.no_wandb:
wandb.log({"val_loss": val_loss}) wandb.log({"val_loss": val_loss})
utils.write_summary(logdir, network, optimizer, val_loss)
create_submission.create_submission(network, dataloader.composite_transform(dataloader.transform_remove_space_time(), dataloader.transform_min_max_scaling(MIN, MAX)), device, rootDir, logdir) create_submission.create_submission(network, dataloader.composite_transform(dataloader.transform_remove_space_time(), dataloader.transform_min_max_scaling(MIN, MAX)), device, rootDir, logdir)
...@@ -42,18 +42,12 @@ def train(args, model, loader, f_loss, optimizer, device, log_interval = 100): ...@@ -42,18 +42,12 @@ def train(args, model, loader, f_loss, optimizer, device, log_interval = 100):
Y = list(model.parameters())[0].grad.cpu().tolist() Y = list(model.parameters())[0].grad.cpu().tolist()
#gradients.append(np.mean(Y))
#tar.append(np.mean(outputs.cpu().tolist()))
#out.append(np.mean(targets.cpu().tolist()))
if not args.no_wandb: if not args.no_wandb:
if batch_idx % log_interval == 0: if batch_idx % log_interval == 0:
wandb.log({"train_loss" : loss}) wandb.log({"train_loss" : loss})
optimizer.step() optimizer.step()
#visualize_gradients(gradients)
#visualize_gradients(tar)
#visualize_gradients(out)
def visualize_gradients(gradients): def visualize_gradients(gradients):
print(gradients) print(gradients)
import numpy as np import numpy as np
......
import os import os
import sys
def generate_unique_logpath(logdir, raw_run_name): def generate_unique_logpath(logdir, raw_run_name):
i = 0 i = 0
...@@ -14,4 +15,35 @@ def create_unique_logpath(top_logdir, raw_run_name): ...@@ -14,4 +15,35 @@ def create_unique_logpath(top_logdir, raw_run_name):
os.mkdir(top_logdir) os.mkdir(top_logdir)
logdir = generate_unique_logpath(top_logdir, raw_run_name) logdir = generate_unique_logpath(top_logdir, raw_run_name)
os.mkdir(logdir) os.mkdir(logdir)
return logdir, raw_run_name return logdir, raw_run_name
\ No newline at end of file
def write_summary(logdir, model, optimizer, val_loss):
summary_file = open(logdir + "/summary.txt", 'w')
summary_text = """
Validation loss
===============
{}
Executed command
================
{}
Dataset
=======
FashionMNIST
Model summary
=============
{}
{} trainable parameters
Optimizer
========
{}
""".format(val_loss," ".join(sys.argv), model, sum(p.numel() for p in model.parameters() if p.requires_grad), optimizer)
summary_file.write(summary_text)
summary_file.close()
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment