diff --git a/code.ipynb b/code.ipynb
index d72dcdf0eaa1760ea6dae73b6ac204b2f87d078f..71797ae083d89865d53e162031176f929c5629a7 100644
--- a/code.ipynb
+++ b/code.ipynb
@@ -887,7 +887,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 291,
+   "execution_count": 366,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -933,7 +933,7 @@
     "        predictions = model(X_test)\n",
     "        predictions = predictions.round()  # Threshold predictions to obtain binary output\n",
     "    accuracy = accuracy_score(y_test.numpy(), predictions.numpy())\n",
-    "    return accuracy"
+    "    return accuracy, predictions.numpy()"
    ]
   },
   {
@@ -1469,23 +1469,27 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 367,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Fold 1 - Accuracy on test set: 0.5043\n",
-      "Extracted features: (457, 11)\n",
-      "Fold 2 - Accuracy on test set: 0.4522\n",
-      "Extracted features: (457, 11)\n",
-      "Fold 3 - Accuracy on test set: 0.5000\n",
-      "Extracted features: (458, 11)\n",
-      "Fold 4 - Accuracy on test set: 0.5175\n",
-      "Extracted features: (458, 11)\n",
-      "Fold 5 - Accuracy on test set: 0.5263\n",
-      "Extracted features: (458, 11)\n"
+      "Fold 1\n",
+      "Accuracy on test set: 0.4435\n",
+      "Fold 2\n",
+      "Accuracy on test set: 0.5043\n",
+      "Fold 3\n",
+      "Accuracy on test set: 0.4386\n",
+      "Fold 4\n",
+      "Accuracy on test set: 0.3596\n",
+      "Fold 5\n",
+      "Accuracy on test set: 0.5000\n",
+      "Average test accuracy: 0.44921434019832185\n",
+      "Standard deviation of accuracies: 0.052509911439454035\n",
+      "Average test precision: 0.45282175087591564\n",
+      "Average test recall: 0.4877875949145444\n"
      ]
     }
    ],
@@ -1501,6 +1505,10 @@
     "X = torch.FloatTensor(X_tab_resampled)\n",
     "y = torch.FloatTensor(y_resampled)\n",
     "\n",
+    "accuracies = []\n",
+    "precision_scores = []\n",
+    "recall_scores = []\n",
+    "\n",
     "for k, (train_index, test_index) in enumerate(kf.split(X)):\n",
     "    X_train, X_test = X[train_index], X[test_index]\n",
     "    y_train, y_test = y[train_index], y[test_index]\n",
@@ -1509,10 +1517,24 @@
     "    train_model(X_train, y_train, mlp_model, criterion, optimizer)\n",
     "\n",
     "    # Evaluate the model\n",
-    "    accuracy = test_model(X_test, y_test, mlp_model)\n",
-    "    print(f'Fold {k+1} - Accuracy on test set: {accuracy:.4f}')\n",
-    "    train_features = mlp_model.feature_extractor(X_train).detach().numpy()\n",
-    "    print(f'Extracted features: {train_features.shape}')"
+    "    accuracy, y_pred = test_model(X_test, y_test, mlp_model)\n",
+    "    precision = precision_score(y_test, y_pred)\n",
+    "    recall = recall_score(y_test, y_pred)\n",
+    "    \n",
+    "    # Save all performance metrics\n",
+    "    accuracies.append(accuracy)\n",
+    "    precision_scores.append(precision)\n",
+    "    recall_scores.append(recall)\n",
+    "\n",
+    "    # Print accuracy of current fold\n",
+    "    print(f'Fold {k + 1}')\n",
+    "    print(f'Accuracy on test set: {accuracy:.4f}')\n",
+    "\n",
+    "# Print average and std of metrics\n",
+    "print(f'Average test accuracy: {np.mean(accuracies)}')\n",
+    "print(f'Standard deviation of accuracies: {np.std(accuracies)}')\n",
+    "print(f'Average test precision: {np.mean(precision_scores)}')\n",
+    "print(f'Average test recall: {np.mean(recall_scores)}')"
    ]
   }
  ],