From 70f652a273599daa930279a3d3c0d5408d2e9087 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Felix=20Ramnel=C3=B6v?= <felra653@student.liu.se>
Date: Wed, 11 Dec 2024 09:37:23 +0100
Subject: [PATCH] Lab 3: Code and figure fixes

---
 lab3/assignment2.R |   2 +-
 lab3/assignment3.R |  14 +--
 lab3/assignment4.R | 252 ++++++++++++++++++++++++++++++++-------------
 3 files changed, 187 insertions(+), 81 deletions(-)

diff --git a/lab3/assignment2.R b/lab3/assignment2.R
index 259d892..0bf6a27 100644
--- a/lab3/assignment2.R
+++ b/lab3/assignment2.R
@@ -13,7 +13,7 @@ h_time <- 4
 
 a <- 58.4274
 b <- 14.826
-date <- "1966-12-10"
+date <- "1983-05-10"
 times <- c(
   "04:00:00",
   "06:00:00",
diff --git a/lab3/assignment3.R b/lab3/assignment3.R
index 47b8abc..5ebddf8 100644
--- a/lab3/assignment3.R
+++ b/lab3/assignment3.R
@@ -89,8 +89,8 @@ err3
 
 # 3. Implementation of SVM predictions.
 
-gaussian_kernel <- function(x_i, x_star, sigma) {
-  exp(-(dist(rbind(x_i, x_star)) ^ 2) / (2 * sigma ^ 2))
+gaussian_kernel <- function(x, sigma = 0.05) {
+  exp(-(x ^ 2) / (2 * sigma ^ 2))
 }
 
 sv <- alphaindex(filter3)[[1]]
@@ -99,21 +99,15 @@ inte <- -b(filter3)
 k <- NULL
 for (i in 1:10) {
   # We produce predictions for just the first 10 points in the dataset.
-  
-  k2 <- 0
+  k2 <- inte
   data_point <- spam[i, -58]
-  
   for (j in 1:length(sv)) {
     support_vector <- spam[sv[j], -58]
-    kernel_value <- gaussian_kernel(support_vector, data_point, sigma = 0.05)
+    kernel_value <- gaussian_kernel(sum((support_vector - data_point)^2), sigma = 0.05)
     k2 <- k2 + co[j] * kernel_value
-    
   }
-  
-  k2 <- k2 + inte
   print(k2)
   k <- c(k, sign(k2))
-  
 }
 
 # Only first correct, close to decision boundary (0.006292512).
diff --git a/lab3/assignment4.R b/lab3/assignment4.R
index 13c4613..5050136 100644
--- a/lab3/assignment4.R
+++ b/lab3/assignment4.R
@@ -1,104 +1,216 @@
 library(neuralnet)
+
 set.seed(1234567890)
 
+# ----1.----
 
 Var <- runif(500, 0, 10)
 
-
-mydata <- data.frame(Var, Sin=sin(Var))
+mydata <- data.frame(Var, Sin = sin(Var))
 
 
-tr <- mydata[1:25,] # Training
-te <- mydata[26:500,] # Test
+tr <- mydata[1:25, ] # Training
+te <- mydata[26:500, ] # Test
 
 
 # Random initialization of the weights in the interval [-1, 1]
-winit <- runif(10,-1,1)
+winit <- runif(10, -1, 1)
 formula <- Sin ~ Var
-  nn <- neuralnet( formula , data = tr, hidden = c(10), startweights = winit )
-    # Plot of the training data (black), test data (blue), and predictions (red)
-    plot(tr, cex=2)
-    points(te, col = "blue", cex=1)
-    points(te[,1],predict(nn,te), col="red", cex=1)
-
-### PART 2 ###
-    
-    
+nn <- neuralnet(
+  formula ,
+  data = tr,
+  hidden = c(10),
+  startweights = winit
+)
+
+# Plot of the training data (black), test data (blue), and predictions (red)
+plot(
+  tr,
+  cex = 2,
+  ylab = "sin(x)",
+  xlab = "x",
+  main = "NN with logistic activation function"
+)
+points(te, col = "blue", cex = 1)
+points(te[, 1], predict(nn, te), col = "red", cex = 1)
+grid()
+legend(
+  "bottomleft",
+  legend = c("Training data", "Test data", "Predicted test data"),
+  col = c("black", "blue", "red"),
+  pch = 1,
+  pt.cex = c(2, 1, 1),
+  cex = 1
+)
+
+
+# ----2.----
+
 h1 <- function(x) {
   x
 }
-    
-    
+
+
 h2 <- function(x) {
-  ifelse(x>0,x,0)
+  ifelse(x > 0, x, 0)
 }
 
 h3 <- function(x)  {
   log(1 + exp(x))
 }
 
-
-
-nn_h1 <- neuralnet( formula , data = tr, hidden = c(10), startweights = t(winit), act.fct = h1 )
-# Plot of the training data (black), test data (blue), and predictions (red)
-plot(tr, cex=2, main = "h1")
-points(te, col = "blue", cex=1)
-points(te[,1],predict(nn_h1,te), col="red", cex=1)
-
-
-
-nn_h2 <- neuralnet( formula , data = tr, hidden = c(10), startweights = t(winit), act.fct = h2 )
-# Plot of the training data (black), test data (blue), and predictions (red)
-plot(tr, cex=2, main="h2")
-points(te, col = "blue", cex=1)
-points(te[,1],predict(nn_h2,te), col="red", cex=1)
-
-
-
-
-nn_h3 <- neuralnet( formula , data = tr, hidden = c(10), startweights = t(winit), act.fct = h3 )
-# Plot of the training data (black), test data (blue), and predictions (red)
-plot(tr, cex=2, main = "h3")
-points(te, col = "blue", cex=1)
-points(te[,1],predict(nn_h3,te), col="red", cex=1)
-
-
-# part 3
-
-
+nn_h1 <- neuralnet(
+  formula ,
+  data = tr,
+  hidden = c(10),
+  startweights = t(winit),
+  act.fct = h1
+)
+
+plot(tr,
+     cex = 2,
+     xlab = "x",
+     "sin(x)",
+     main = "NN with linear activation function (h1)")
+points(te, col = "blue", cex = 1)
+points(te[, 1], predict(nn_h1, te), col = "red", cex = 1)
+grid()
+legend(
+  "bottomleft",
+  legend = c("Training data", "Test data", "Predicted test data"),
+  col = c("black", "blue", "red"),
+  pch = 1,
+  pt.cex = c(2, 1, 1),
+  cex = 1
+)
+
+nn_h2 <- neuralnet(
+  formula ,
+  data = tr,
+  hidden = c(10),
+  startweights = t(winit),
+  act.fct = h2
+)
+
+plot(
+  tr,
+  cex = 2,
+  xlab = "x",
+  ylab = "sin(x)",
+  main = "NN with ReLU activation function (h2)"
+)
+points(te, col = "blue", cex = 1)
+points(te[, 1], predict(nn_h2, te), col = "red", cex = 1)
+grid()
+legend(
+  "bottomleft",
+  legend = c("Training data", "Test data", "Predicted test data"),
+  col = c("black", "blue", "red"),
+  pch = 1,
+  pt.cex = c(2, 1, 1),
+  cex = 1
+)
+
+nn_h3 <- neuralnet(
+  formula ,
+  data = tr,
+  hidden = c(10),
+  startweights = t(winit),
+  act.fct = h3
+)
+
+plot(
+  tr,
+  cex = 2,
+  xlab = "x",
+  ylab = "sin(x)",
+  main = "NN with softplus activation function (h3)"
+)
+points(te, col = "blue", cex = 1)
+points(te[, 1], predict(nn_h3, te), col = "red", cex = 1)
+grid()
+legend(
+  "bottomleft",
+  legend = c("Training data", "Test data", "Predicted test data"),
+  col = c("black", "blue", "red"),
+  pch = 1,
+  pt.cex = c(2, 1, 1),
+  cex = 1
+)
+
+
+# ----3.----
 Var1 <- runif(500, 0, 50)
 
-
-mydata1 <- data.frame(Var = Var1, Sin=sin(Var1))
-
-plot(mydata1, cex=2, main = "500 random points",ylim = c(-10,10))
-points(mydata1, col = "blue", cex=1)
-pred <- predict(nn,te)
-prediciton <- predict(nn,mydata1)
-points(mydata1[,1],prediciton, col="red", cex = 1)
-
-
-# Part 4
+mydata1 <- data.frame(Var = Var1, Sin = sin(Var1))
+
+pred1 <- predict(nn, mydata1)
+
+plot(
+  mydata1,
+  cex = 2,
+  main = "NN with logistic activation",
+  ylim = c(-10, 2),
+  xlab = "x",
+  ylab = "sin(x)"
+)
+points(mydata1, col = "blue", cex = 1)
+points(mydata1[, 1], pred1, col = "red", cex = 1)
+grid()
+legend(
+  "bottomleft",
+  legend = c("Training data", "Test data", "Predicted test data"),
+  col = c("black", "blue", "red"),
+  pch = 1,
+  pt.cex = c(2, 1, 1),
+  cex = 1
+)
+
+# ----4.----
+
+pred1[order(pred1)][1]
 
 nn$weights
 
+# Large weights -> logistic activation function caps -> covergance for high x
 
-# Part 5 
-
-
+# ----5.----
 
 Var2 <- runif(500, 0, 10)
 
-mydata2 <- data.frame(Sin2=sin(Var2), Var2)
+mydata2 <- data.frame(Sin2 = sin(Var2), Var2)
 
 formula1 <- Sin2 ~ Var2
 
-nn2 <- neuralnet( formula1 , data = mydata2, hidden = c(10), startweights = winit)
-
-
-
-
-
-plot(mydata2, cex=2, main = "500 random points reverse", ylim = c(-2,10))
-points(mydata2, col = "blue", cex=1)
-points(mydata2[,1],predict(nn2,mydata2), col="red", cex = 1)
+nn2 <- neuralnet(
+  formula1 ,
+  data = mydata2,
+  hidden = c(10),
+  startweights = winit
+)
+
+
+plot(
+  mydata2,
+  cex = 2,
+  main = "NN with logistic activation function",
+  xlab = "sin(x)",
+  ylab = "x",
+  ylim = c(-2, 10)
+)
+points(mydata2, col = "blue", cex = 1)
+points(mydata2[, 1],
+       predict(nn2, mydata2),
+       col = "red",
+       cex = 1)
+grid()
+legend(
+  "topleft",
+  legend = c("Training data", "Test data", "Predicted test data"),
+  col = c("black", "blue", "red"),
+  pch = 1,
+  pt.cex = c(2, 1, 1),
+  cex = 1
+)
+
+# One-to-many mapping from sin(x) to x, e.g. both sin(0) and sin(2pi) equals 0
-- 
GitLab