Created
January 27, 2017 01:21
-
-
Save kwichmann/0c8db9074d68d3b7ce72363959d71598 to your computer and use it in GitHub Desktop.
Building a neural net
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import numpy as np | |
| class NeuralNetwork(): | |
| def __init__(self, input_layer_size): | |
| self.layer_sizes = [input_layer_size] | |
| self.w = [] | |
| def add_layer(self, number_of_neurons): | |
| neurons_in_previous_layer = self.layer_sizes[-1] | |
| # Initialize weights randomly. +1 is for the bias | |
| new_w = np.random.rand(number_of_neurons, neurons_in_previous_layer + 1) / 1000 | |
| self.layer_sizes.append(number_of_neurons) | |
| self.w.append(new_w) | |
| def __sigmoid(self, z): | |
| return 1 / (1 + np.exp(-z)) | |
| def __add_bias(self, matrix): | |
| number_of_columns = matrix.shape[1] | |
| return np.vstack([matrix, [1] * number_of_columns]) | |
| def activities(self, input_matrix): | |
| if len(self.w) == 0: | |
| print("You need to add at least one layer") | |
| return None | |
| layer_activities = [] | |
| for i in range(0, len(self.w)): | |
| with_bias = self.__add_bias(input_matrix) | |
| input_matrix = self.__sigmoid(np.matmul(self.w[i], with_bias)) | |
| layer_activities.append(input_matrix) | |
| return layer_activities | |
| def __zero_weights(self): | |
| zeros = [] | |
| for i in range(0, len(self.w)): | |
| zeros.append(np.zeros(shape = [self.layer_sizes[i + 1], self.layer_sizes[i] + 1])) | |
| return zeros | |
| def partials(self, input_matrix, output_matrix): | |
| if len(self.w) == 0: | |
| print("You need to add at least one layer") | |
| return None | |
| # Determine number of samples in the input matrix | |
| p = input_matrix.shape[1] | |
| predictions = self.activities(input_matrix) | |
| derivatives = [] | |
| error = predictions[len(self.w) - 1] - output_matrix | |
| for i in range(len(self.w) - 1, 0, -1): | |
| if i == 0: | |
| previous_activity = input_matrix | |
| else: | |
| previous_activity = predictions[i - 1] | |
| derivative = np.matmul(error, previous_activity.transpose()) | |
| derivatives.append(derivative / p) | |
| if i != 0: | |
| a_bias = self.__add_bias(previous_activity) | |
| error = np.matmul(self.w[i].transpose(), error) * a_bias * (1 - a_bias) | |
| return derivatives | |
| net = NeuralNetwork(2) | |
| net.add_layer(3) | |
| net.add_layer(3) | |
| net.add_layer(3) | |
| print(net.partials(np.array([[1, 3], [2, 4]]), np.array([[1, 0], [0, 0], [0, 1]]))) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment