我一直在对这个由 4 层组成的神经网络进行编程:第一个有 2 个神经元,第二个有 2 个,第三个有 2 个,输出一个有一个神经元,我制作了这个模式来显示我想要重现的内容:这是代码,您可以尝试运行它(python 3.7):import numpy as npimport matplotlib.pyplot as plt#Calculus of the sigmoiddef sigmoid(z): return 1.0/(1+ np.exp(-z))#Calculus of the sigmoid derivationdef sigmoid_derivative(y): return y * (1.0 - y)#Initialisation of the class (input, output, targets, weights, biais)class NeuralNetwork: def __init__(self, x, y): self.input = x self.weights1 = np.random.rand(self.input.shape[1],2) self.weights2 = np.random.rand(2,2) self.weights3 = np.random.rand(2,2) self.weights4 = np.random.rand(2,1) self.y = y self.output = np.zeros(self.y.shape) self.bias1 = np.random.rand(1,2) self.bias2 = np.random.rand(1,2) self.bias3 = np.random.rand(1,2) self.bias4 = np.random.rand(1,1) self.learning_rate = 0.005 #simple feed forward def feedforward(self): self.layer1 = sigmoid(np.dot(self.input, self.weights1) + self.bias1) self.layer2 = sigmoid(np.dot(self.layer1, self.weights2) + self.bias2) self.layer3 = sigmoid(np.dot(self.layer1, self.weights3) + self.bias3) self.output = sigmoid(np.dot(self.layer2, self.weights4) + self.bias4) #Back propagation algorithme def backprop(self): # application of the chain rule to find derivative of the loss function with respect to weights4, weights3, weights2, weights1 and the associated bias delta_4 = 2*(self.y - self.output) * sigmoid_derivative(self.output) d_weights4 = np.dot(self.layer3.T, delta_4) d_bias4 = delta_4 d_bias4 = d_bias4.mean(axis=0) delta_3 = np.dot(delta_4, self.weights4.T) * sigmoid_derivative(self.layer3) d_weights3 = np.dot(self.layer2.T, delta_3) d_bias3 = delta_3 d_bias3 = d_bias3.mean(axis=0)
添加回答
举报
0/150
提交
取消