Newer
Older
batch_size: 128
num_workers: 7
valid_ratio: 0.2
max_num_samples: None #1000
_DEFAULT_TRAIN_FILEPATH: "/mounts/Datasets3/2022-ChallengePlankton/sub_2CMEMS-MEDSEA-2010-2016-training.nc.bin"
_DEFAULT_TEST_FILEPATH: "/mounts/Datasets3/2022-ChallengePlankton/sub_2CMEMS-MEDSEA-2017-testing.nc.bin"
_ENCODING_LINEAR: "I"
_ENCODING_INDEX: "I" # h(short) with 2 bytes should be sufficient
_ENCODING_OFFSET_FORMAT: ""
_ENCODING_ENDIAN: "<"
# Data Transformation
ApproximativeStats: True
ApproximativeMean: "torch.tensor([ 4.2457e+01, 7.4651e+00, 1.6738e+02, 1.3576e+09, 2.3628e+00,
4.6839e+01, 2.3855e-01, 3.6535e+00, 1.9776e+00, 2.2628e+02,
8.1003e+00, 1.8691e-01, 3.8384e+01, 2.6626e+00, 1.4315e+01,
-4.1419e-03, 6.0274e-03, -5.1017e-01])"
ApproximativeSTD: "torch.tensor([5.8939e-01, 8.1625e-01, 1.4535e+02, 5.4952e+07, 1.7543e-02, 1.3846e+02,
2.1302e-01, 1.9558e+00, 4.1455e+00, 1.2408e+01, 2.2938e-02, 9.9070e-02,
1.9490e-01, 9.2847e-03, 2.2575e+00, 8.5310e-02, 7.8280e-02, 8.6237e-02])"
#Optimizer selection
Optimizer: Adam # in {Adam}
#Model selection
Model:
Name: LinearRegression
#Model parameters selection
LinearRegression:
# Bias in {True, False}
Bias: True
#Visualization
Wandb:
log_freq: 100 #log gradients and parameters every log_freq batches
log_interval: 10 # log the train_loss every log_interval batches