Quantcast
Channel: Recent Questions - Stack Overflow
Viewing all articles
Browse latest Browse all 11751

Error in backpropagation method from scratch

$
0
0

I'm trying to make an AI that predicts crypto prices for a while now and I´ve encountered this persistent error in my backpropagation method (specifically regarding the scale of the arrays in the np.dot statment for computing the new weights), I think it might be due to a function but I don´t know how to correct it.

class Layer_Dense:    def __init__(self, n_neurons, weigths, biases):        self.n_neurons = np.array(n_neurons)        self.weigths = np.array(weigths)        self.biases = np.array(biases)    def forward(self, inputs):        self.inputs = np.array(inputs)        self.output = np.dot(self.weigths, inputs) + self.biases    def backpropagation(self, gradient, learning_rate = 0.01):        gradient = np.array(gradient)        # Calculate the gradient with respect to weights and biases        weights_gradient = np.dot(gradient, self.inputs.T) / len(self.inputs)        biases_gradient = np.sum(gradient) / len(self.inputs)        # Update weights and biases using some optimization algorithm (e.g., gradient descent)        self.weigths -= learning_rate * weights_gradient        self.biases -= learning_rate * biases_gradient        self.weigths = np.array(self.weigths)        # Return the gradient with respect to the inputs for the next layer        return np.dot(self.weigths.T, gradient)[...]neural_network_layers = [    Layer_Dense(neural_network_layers_and_its_neurons[1], weigths_list_of_matrises_for_continuation[0], biases_matrix_continuation[0]),    Layer_Dense(neural_network_layers_and_its_neurons[2], weigths_list_of_matrises_for_continuation[1], biases_matrix_continuation[1]),    Layer_Dense(neural_network_layers_and_its_neurons[3], weigths_list_of_matrises_for_continuation[2], biases_matrix_continuation[2]),    Layer_Dense(neural_network_layers_and_its_neurons[4], weigths_list_of_matrises_for_continuation[3], biases_matrix_continuation[3]),    Layer_Dense(neural_network_layers_and_its_neurons[5], weigths_list_of_matrises_for_continuation[4], biases_matrix_continuation[4])    ][...]for i in range(0, Batch_size):        real_outputs = []        for i in X:            current_input = i            # Forward pass through each layer            for a, layer in enumerate(neural_network_layers):                layer.forward(current_input)                if a != (len(neural_network_layers)-1):                    current_input = forward(layer.output)                if a == (len(neural_network_layers)-1):                    current_input = layer.output                    current_input = current_input[0]            real_outputs.append(current_input)        loss_a, accuracy_a = F.accuracy_and_or_loss_in_one_output_NN(expected_outputs, real_outputs, 0)        accuracy.append(accuracy_a)        loss.append(loss_a)#Here´s the function I was talking about earlier        loss_gradient = F.gradient_of_loss(expected_outputs, real_outputs)        if a12 == 1:            loss_b = loss_a + 1e10        current_gradient = loss_gradient        for layer in reversed(neural_network_layers):            current_gradient = layer.backpropagation(current_gradient, learning_rate)def gradient_of_loss(true_values_, predicted_values_):    true_values = np.array(true_values_)    predicted_values = np.array(predicted_values_)    # Calculate the difference between true and predicted values    gradient = predicted_values - true_values    return gradientI´ve tried to reshape the arrays, I´ve tried to transp+ose them, I´ve changed teh gradient of loss function like 10 times and I´ve watched 3 math youtube videos on partial derivatives, but nothing worked

Viewing all articles
Browse latest Browse all 11751

Trending Articles



<script src="https://jsc.adskeeper.com/r/s/rssing.com.1596347.js" async> </script>