diff --git a/config.yml b/config.yml
index 09e83255cb3ff718960ccb281dbedd49d780e5ac..e550698059eec0ae5962ea2a11b6362b6f00a679 100644
--- a/config.yml
+++ b/config.yml
@@ -57,6 +57,7 @@ BidirectionalLSTM:
 RNN:
   HiddenSize: 35
   NumLayers: 4
+  NumFFN: 8
   Initialization: None
 
 #Name of directory containing logs
diff --git a/logs/RNN_13/best_model.pt b/logs/RNN_13/best_model.pt
index 09b47dc34b69108e83367faed6c66bff6c4be83f..6880a8a10f215b6db736b88ab182db54219ae469 100644
Binary files a/logs/RNN_13/best_model.pt and b/logs/RNN_13/best_model.pt differ
diff --git a/logs/RNN_14/best_model.pt b/logs/RNN_14/best_model.pt
index 2f1ca76dc5f8b8130756a47564cb58fe5f48b237..795a0bfd0ec72f57fb83627e0fe2f3e7808691b5 100644
Binary files a/logs/RNN_14/best_model.pt and b/logs/RNN_14/best_model.pt differ
diff --git a/logs/RNN_15/best_model.pt b/logs/RNN_15/best_model.pt
index 8c780c8949334bc211e4cb22336d09b34a2e4bc1..4a84e84260ad9429e6a7e8e59e6fbf0961b84202 100644
Binary files a/logs/RNN_15/best_model.pt and b/logs/RNN_15/best_model.pt differ
diff --git a/logs/RNN_16/best_model.pt b/logs/RNN_16/best_model.pt
index b829de358f3632b2f04079d9b0419a27f1da6d7b..8b77c8cb583be1259b6cf4e7e46b40ecba4e6ab8 100644
Binary files a/logs/RNN_16/best_model.pt and b/logs/RNN_16/best_model.pt differ
diff --git a/model.py b/model.py
index 3229622602e97d9485ab145ecd1b17430a895127..a6a74dff54e617489b222b50383af2c049a2f1ce 100644
--- a/model.py
+++ b/model.py
@@ -42,16 +42,34 @@ class RNN(nn.Module):
         super(RNN, self).__init__()
         self.hidden_size = cfg["RNN"]["HiddenSize"]
         self.num_layers = cfg["RNN"]["NumLayers"]
+        self.num_ffn = cfg["RNN"]["NumFFN"]
         # RNN
         self.rnn = nn.Sequential(
             nn.RNN(input_size, self.hidden_size, self.num_layers, batch_first=True, nonlinearity='relu'),
             nn.Dropout(p=0.2)
         )
+
+
+        self.fc = nn.Sequential()
         
-        self.fc = nn.Sequential(
-            nn.Linear(self.hidden_size, 1),
-            nn.Dropout(p=0.2)
+        for layer in range(self.num_ffn):
+            self.fc.add_module(
+                f"linear_{layer}", nn.Linear(self.hidden_size, self.hidden_size)
+            )
+            self.ffn.add_module(
+                f"relu_{layer}", 
+                nn.ReLU()
+            )
+            self.ffn.add_module(
+                f"dropout_{layer}", 
+                nn.Dropout(p=0.2)
+            )
+        
+        self.fc.add_module(
+            "last_linear",
+            nn.Linear(self.hidden_size, 1)
         )
+
     
     def forward(self, x):
         use_cuda = torch.cuda.is_available()
@@ -61,8 +79,7 @@ class RNN(nn.Module):
             device = toch.device('cpu')
         # Initialize hidden state with zeros
         h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
-            
-        # One time step
+    
         out, hn = self.rnn(x, h0)
         out = self.fc(out)
         return out