Newer
Older
num_days: 73 # Test with sequence of 1 day - should be the same as in Test -
num_workers: 7
valid_ratio: 0.2
max_num_samples: None #1000
_DEFAULT_TRAIN_FILEPATH: "/mounts/Datasets3/2022-ChallengePlankton/sub_2CMEMS-MEDSEA-2010-2016-training.nc.bin"
_DEFAULT_TEST_FILEPATH: "/mounts/Datasets3/2022-ChallengePlankton/sub_2CMEMS-MEDSEA-2017-testing.nc.bin"
_ENCODING_LINEAR: "I"
_ENCODING_INDEX: "I" # h(short) with 2 bytes should be sufficient
_ENCODING_OFFSET_FORMAT: ""
_ENCODING_ENDIAN: "<"
ApproximativeMean: "torch.tensor([ 4.2457e+01, 7.4651e+00, 1.6738e+02, 1.3576e+09, 2.3628e+00,
4.6839e+01, 2.3855e-01, 3.6535e+00, 1.9776e+00, 2.2628e+02,
8.1003e+00, 1.8691e-01, 3.8384e+01, 2.6626e+00, 1.4315e+01,
-4.1419e-03, 6.0274e-03, -5.1017e-01])"
ApproximativeSTD: "torch.tensor([5.8939e-01, 8.1625e-01, 1.4535e+02, 5.4952e+07, 1.7543e-02, 1.3846e+02,
2.1302e-01, 1.9558e+00, 4.1455e+00, 1.2408e+01, 2.2938e-02, 9.9070e-02,
1.9490e-01, 9.2847e-03, 2.2575e+00, 8.5310e-02, 7.8280e-02, 8.6237e-02])"
ApproximativeMaxi: "torch.tensor([ 4.3479e+01, 9.0000e+00, 4.9267e+02, 1.4528e+09, 2.4088e+00,
2.7824e+03, 1.5576e+00, 6.2457e+00, 2.5120e+02, 2.7188e+02,
8.1683e+00, 3.2447e-01, 3.9041e+01, 2.7162e+00, 2.9419e+01,
8.6284e-01, 7.6471e-01, -7.7745e-02])"
ApproximativeMini: "torch.tensor([ 4.1479e+01, 6.0000e+00, 1.0182e+00, 1.2623e+09, 2.2433e+00,
1.0910e+01, 1.0000e-11, 1.0000e-11, -1.1467e+01, 1.9718e+02,
7.9218e+00, 1.0000e-11, 3.7171e+01, 2.5584e+00, 1.2075e+01,
-1.2436e+00, -9.9256e-01, -8.8131e-01])"
#Optimizer selection
Optimizer: Adam # in {Adam}
#Model parameters selection
LinearRegression:
# Bias in {True, False}
Bias: True
#Visualization
Wandb:
log_freq: 100 #log gradients and parameters every log_freq batches
log_interval: 10 # log the train_loss every log_interval batches