# # nn.py: Basic Neural Network implementation stub. # You will fill out the stubs below using numpy as much as possible. # This class serves as a base for you to build on for the labs. # # Author: Derek Riley 2021 # Inspired by https://towardsdatascience.com/how-to-build-your-own-neural-network-from-scratch-in-python-68998a08e4f6 # import numpy as np def sigmoid(x): """This is the sigmoid activation function.""" return 1.0 / (1.0 + np.exp(-1.0 * x)) def sigmoid_derivative(x): """This is the derivative of the sigmoid function.""" return sigmoid(x) * (1.0 - sigmoid(x)) class NeuralNetwork: """Represents a basic fully connected single-layer neural network. Attributes: input (2D numpy array): input features, one row for each sample, and one column for each feature weights2 (numpy array): connection weights between the input and hidden layer weights3 (numpy array): connection weights between the hidden layer and output neuron y (numpy array): expected outputs of the network, one row for each sample, and one column for each output variable output (numpy array): stores the current output of the network after a feed forward pass learning_rate (float): scales the derivative influence in updating weights """ def __init__(self, x, y, num_hidden_neurons=4, lr=1): """Setup a Neural Network with a single hidden layer. This method requires two vectors of x and y values as the input and output data. """ # FIXME week 7 self._a_1 = x self._weights_2 = np.array([[ 3.07153357, 2.01940447, -2.14695621, 2.62044111], [ 2.83203743, 2.15003442, -2.16855273, 2.77165525]]) self._weights_3 = np.array([[ 3.8124126 ], [ 1.92454886], [-5.20663292], [ 3.21598943]]) self._biases_2 = np.array([-1.26285168, -0.72768134, 0.89760201, -1.10572122]) self._biases_3 = np.array([-2.1110666]) self._y = y self._output = np.zeros(self._y.shape) self._learning_rate = lr def load_4_layer_ttt_network(self): self._weights_2 = np.array([[-3.12064667, -0.62044264, -3.18868069, -1.06183619], [-2.75995675, -0.3063746, -3.24168826, -0.7056788 ], [ 0.35471861, -1.40337629, 0.3368032, 1.96311844], [ 0.31900681, -0.98534514, 0.36569296, 1.7516015 ], [ 1.18823403, -0.88661356, 1.42729163, 2.3146592 ], [ 2.24817726, -0.73170809, 2.42017968, 3.13494424], [ 2.43338048, -1.12167492, 2.78634464, 3.30680788], [ 1.57132788, -1.4313579, 1.66389342, 2.45366816], [ 1.4126572, -1.38204671, 1.45066697, 2.78777504]]) self._weights_3 = np.array([[ 6.10550764], [ 2.6696074 ], [ 6.58122877], [-5.46573692]]) self._biases_2 = np.array([-0.00142707, -0.08451622, -0.00777166, 0.07153606]) self._biases_3 = np.array([0.03276832]) def inference(self): """ Use the network to make predictions for a given vector of inputs. This is the math to support a feed forward pass. """ # FIXME week 6 def feed_forward(self): """ This is used in the training process to calculate and save the outputs for updating weights. """ self._output = self.inference() def update_weights(self): """ Update model weights based on the error between the most recent predictions (feed_forward) and the training values. """ # FIXME week 7 (using Numpy without loops) def train(self, epochs=100, verbose=0): """This method trains the network for the given number of epochs. It doesn't return anything, instead it just updates the state of the network variables. """ for i in range(epochs): self.feed_forward() self.update_weights() if verbose > 1: print(self.loss()) def loss(self): """ Calculate the MSE error for the set of training data.""" return np.mean(np.square(self._output - self._y)) def accuracy_precision(self): """Calculate and return the accuracy and precision. This assumes that the network has already been trained.""" # FIXME week 6 - Do not use libraries other than NumPy. # See writeup for how to compute these. accuracy = -9999 precision = -9999 return accuracy, precision