-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnn_no_bias.py
58 lines (35 loc) · 1.24 KB
/
nn_no_bias.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import numpy as np
def sgm(x,deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
X = np.array([[0.05, 0.1]])
y = np.array([[0.99, 0.01]])
syn0 = np.array([[0.14, 0.2],
[0.25, 0.32]])
syn1 = np.array([[0.42, 0.42],
[0.38, 0.5]])
r = 90000
for j in range(r):
l0 = X
l1 = sgm(np.dot(l0,syn0))
h1 = l1[0][0]
h2 = l1[0][1]
l2 = sgm(np.dot(l1,syn1))
l2_error = y - l2
error = 0.5*np.sum((l2_error)**2)
if (j%100) == 0:
print ("Error: {0} , Iteration: {1} ".format(error,j))
#update
learningRate = 0.08
l2_delta = -l2_error*sgm(l2,deriv=True)
l1_error = l2_delta.dot(syn1.T)
l1_delta = l1_error * sgm(l1,deriv=True)
syn0[0][0] -= l1_delta[0][0]*X[0][0]*learningRate
syn0[0][1] -= l1_delta[0][1]*X[0][1]*learningRate
syn0[1][0] -= l1_delta[0][0]*X[0][0]*learningRate
syn0[1][1] -= l1_delta[0][1]*X[0][1]*learningRate
syn1[0][0] -= l2_delta[0][0]*h1*learningRate
syn1[0][1] -= l2_delta[0][1]*h2*learningRate
syn1[1][0] -= l2_delta[0][0]*h1*learningRate
syn1[1][1] -= l2_delta[0][1]*h2*learningRate