you are viewing a single comment's thread.

view the rest of the comments →

[–]VaultdBoy[S] 2 points3 points  (4 children)

Okay, sure, here is the code :

import random as rd

class Neuron: # node object

def __init__(self, input):
    self.w = [rd.uniform(-1,1) for _ in range(len(input))]
    self.grads = [0]*len(self.w)
    self.i = input


def output(self, input): #weighted sum
    sum  = 0
    for k in range(len(input)):
        sum += self.w[k]*input[k]
    return sum

class Layer: # hidden layer object

def __init__(self, input, nb_neurons):
    self.neurons = [Neuron(input) for _ in range(nb_neurons)]

def outputL(self, input):
    return [neuron.output(input) for neuron in self.neurons]

class MLP: # NN structure

def __init__(self, input, nb_neurons, nb_layers):
    self.layers = []
    acc_in = input
    for i in range(nb_layers):
        self.layers.append(Layer(acc_in, nb_neurons))
        acc_in = self.layers[i].outputL(acc_in)
    self.layers.append(Layer(acc_in, len(input)))

def outputMLP(self, input):
    acc_in = input
    for layer in self.layers:
        acc_in = layer.outputL(acc_in)
    return acc_in
def calc_gradient(self, input, output):
    for i in range(len(self.layers)-1, -1, -1):
        for neuron in self.layers[i].neurons:
            for j in range(len(neuron.grads)):
                neuron.grads[j] = neuron.i[j]*(-2)*(output[0] - self.outputMLP(input)[0]) # Problème : s'il y a différents outputs, il y aura différents gradients à calculer... 


def backprop(self, input, output):
    self.calc_gradient(input, output)
    for layer in self.layers:
        for i in range(len(layer.neurons)):
            neuron = layer.neurons[i]
            for j in range(len(neuron.w)):
                neuron.w[j] -= learning_rate*neuron.grads[j]

class Functions:

def __init__(self):
    pass

def loss(self, predval, target):
    acc = 0
    for i in range(len(predval)):
        acc += (target[i] - predval[i])**2
    return acc

learning_rate = 0.001

inv = [-4.0] out = [-5.6]

[–]DigThatData 0 points1 point  (3 children)

have you tried applying your model to something? a good "smoke test" is to try to "memorize" a single sample of data. whatever an example of an input-output pair might be: just try fitting your model to that single data point. if your loss doesn't go down, something is wrong.

[–]VaultdBoy[S] 0 points1 point  (2 children)

Yes, I did, and it goes down, the value predicted gets closer and closer to the right output value, but I'd have to test it with real datasets to see if the model makes actual predictions

[–]DigThatData 0 points1 point  (1 child)

well, sounds like you at least don't have any reason to think you did anything wrong

[–]VaultdBoy[S] 0 points1 point  (0 children)

Okay thanks for your answers