Skip to content

Commit 2bd6ade

Browse files
Machine Learning Baby
1 parent 9c7d4f5 commit 2bd6ade

File tree

7 files changed

+617
-0
lines changed

7 files changed

+617
-0
lines changed

3layer_nn.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
import numpy as np
2+
3+
# sigmoid function
4+
def nonlin(x, deriv = False):
5+
if(deriv == True):
6+
return x * (1 - x)
7+
return 1/(1 + np.exp(-x))
8+
9+
# input dataset
10+
X = np.array([[0, 0, 1],
11+
[0, 1, 1],
12+
[1, 0, 1],
13+
[1, 1, 1]])
14+
15+
# output dataset
16+
y = np.array([[0], [1], [1], [0]])
17+
18+
np.random.seed(1)
19+
20+
# initialise weights randomly
21+
syn0 = 2 * np.random.random((3, 4)) - 1
22+
syn1 = 2 * np.random.random((4, 1)) - 1
23+
24+
for j in xrange(60000):
25+
26+
# forward propagation
27+
l0 = X
28+
l1 = nonlin(np.dot(l0, syn0))
29+
l2 = nonlin(np.dot(l1, syn1))
30+
31+
# error
32+
l2_error = y - l2
33+
34+
if(j % 10000) == 0:
35+
print "Error:" + str(np.mean(np.abs(l2_error)))
36+
37+
# multiply how much we missed by the
38+
# slope of the sigmoid at the values in l1
39+
l2_delta = l2_error * nonlin(l2, deriv=True)
40+
41+
l1_error = l2_delta.dot(syn1.T)
42+
43+
l1_delta = l1_error * nonlin(l1, deriv = True)
44+
45+
# update weights
46+
syn1 += l1.T.dot(l2_delta)
47+
syn0 += l0.T.dot(l1_delta)
48+
49+
print "Output After Training:"
50+
print l2

basic_nn.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
import numpy as math
2+
3+
# sigmoid function
4+
def nonlin(x, deriv = False):
5+
if(deriv == True):
6+
return x * (1 - x)
7+
return 1/(1 + math.exp(-x))
8+
9+
# input dataset
10+
X = math.array([[0, 0, 1],
11+
[0, 1, 1],
12+
[1, 0, 1],
13+
[1, 1, 1]])
14+
15+
# output dataset
16+
y = math.array([[0, 0, 1, 1]]).T
17+
18+
def derivative(x, y):
19+
20+
m, n = np.shape(x)
21+
#numIterations= 1000
22+
#alpha = 0.005
23+
syn0 = np.ones([n, 1])
24+
25+
for iter in xrange(10000):
26+
27+
# forward propagation
28+
l0 = X
29+
l1 = nonlin(math.dot(l0, syn0))
30+
31+
# error
32+
l1_error = y - l1
33+
34+
# multiply how much we missed by the
35+
# slope of the sigmoid at the values in l1
36+
l1_delta = l1_error * nonlin(l1, True)
37+
38+
# update weights
39+
syn0 += math.dot(l0.T, l1_delta)
40+
41+
print "Output After Training:"
42+
print l1

deriv_nn.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
import numpy as np
2+
3+
# sigmoid function
4+
def nonlin(x, deriv = False):
5+
if(deriv == True):
6+
return x * (1 - x)
7+
return 1/(1 + np.exp(-x))
8+
9+
def derivative(x, y):
10+
11+
m, n = np.shape(x)
12+
#numIterations= 1000
13+
#alpha = 0.005
14+
np.random.seed(1)
15+
16+
# initialise weights randomly
17+
syn0 = 2*np.random.random((n, 1)) - 1
18+
19+
for iter in xrange(100):
20+
21+
# forward propagation
22+
l0 = x
23+
l1 = nonlin(np.dot(l0, syn0))
24+
print l1
25+
print x
26+
print y
27+
# error
28+
l1_error = y - l1
29+
30+
# multiply how much we missed by the
31+
# slope of the sigmoid at the values in l1
32+
l1_delta = l1_error * nonlin(l1, True)
33+
34+
# update weights
35+
syn0 += np.dot(l0.T, l1_delta)
36+
37+
return syn0[0];
38+
39+
print "Output After Training:"
40+
print l1

final_rnn.py

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
import copy, numpy as np
2+
np.random.seed(0)
3+
4+
# compute sigmoid nonlinearity
5+
def sigmoid(x):
6+
output = 1 / (1 + np.exp(-x))
7+
return output
8+
9+
# convert output of sigmoid function to its derivate#
10+
def sigmoid_output_to_derivative(output):
11+
return output * (1 - output)
12+
13+
14+
# training dataset generation
15+
int2binary = {}
16+
binary_dim = 8
17+
18+
largest_number = pow(2, binary_dim)
19+
binary = np.unpackbits(
20+
np.array([range(largest_number)], dtype = np.uint8).T, axis = 1)
21+
for i in range(largest_number):
22+
int2binary[i] = binary[i]
23+
24+
# input variables
25+
alpha = 0.1
26+
input_dim = 1
27+
hidden_dim = 32
28+
output_dim = 1
29+
30+
# initialise neural network weights
31+
synapse_0 = 2 * np.random.random((input_dim, hidden_dim)) - 1
32+
synapse_1 = 2 * np.random.random((hidden_dim, output_dim)) - 1
33+
synapse_h = 2 * np.random.random((hidden_dim, hidden_dim)) - 1
34+
35+
synapse_0_update = np.zeros_like(synapse_0)
36+
synapse_1_update = np.zeros_like(synapse_1)
37+
synapse_h_update = np.zeros_like(synapse_h)
38+
39+
# training logic
40+
for j in range(60000):
41+
42+
# generate a simple addition problem (a + b = c)
43+
# a_int = np.random.randint(largest_number / 2) # int version
44+
# a = int2binary[a_int] # binary encoding
45+
46+
# b_int = np.random.randint(largest_number / 2) # int version
47+
# b = int2binary[b_int] # binary encoding
48+
49+
# true answer
50+
# c_int = 2 * a_int
51+
# c = int2binary[c_int]
52+
53+
a = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
54+
c = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.35])
55+
56+
# where we'll store our best guess in binary
57+
d = np.zeros_like(c)
58+
59+
overallError = 0
60+
61+
layer_2_deltas = list()
62+
layer_1_values = list()
63+
layer_1_values.append(np.zeros(hidden_dim))
64+
65+
# moving along the positions in the binary encoding
66+
for position in range(binary_dim):
67+
68+
# generate IO
69+
X = np.array([[a[binary_dim - position - 1]]])
70+
y = np.array([[c[binary_dim - position - 1]]]).T
71+
72+
# hidden layer (input ~+ prev_hidden)
73+
layer_1 = sigmoid(np.dot(X, synapse_0) + np.dot(layer_1_values[-1], synapse_h))
74+
75+
# output layer (new binary representation)
76+
layer_2 = sigmoid(np.dot(layer_1, synapse_1))
77+
78+
# error
79+
layer_2_error = y - layer_2
80+
layer_2_deltas.append((layer_2_error) * sigmoid_output_to_derivative(layer_2))
81+
overallError += np.abs(layer_2_error[0])
82+
83+
# decode estimate so we can print it out
84+
d[binary_dim - position - 1] = layer_2[0][0]
85+
86+
# store hidden layer so we use it the next time
87+
layer_1_values.append(copy.deepcopy(layer_1))
88+
89+
future_layer_1_delta = np.zeros(hidden_dim)
90+
91+
for position in range(binary_dim):
92+
93+
X = np.array([[a[position]]])
94+
layer_1 = layer_1_values[-position-1]
95+
prev_layer_1 = layer_1_values[-position-2]
96+
97+
# error at output layer
98+
layer_2_delta = layer_2_deltas[-position-1]
99+
# error at hidden layer
100+
layer_1_delta = (future_layer_1_delta.dot(synapse_h.T) + layer_2_delta.dot(synapse_1.T)) * sigmoid_output_to_derivative(layer_1)
101+
102+
# let's update all our weights so we can try again
103+
synapse_1_update += np.atleast_2d(layer_1).T.dot(layer_2_delta)
104+
synapse_h_update += np.atleast_2d(prev_layer_1).T.dot(layer_1_delta)
105+
synapse_0_update += X.T.dot(layer_1_delta)
106+
107+
future_layer_layer_1_delta = layer_1_delta
108+
109+
110+
synapse_0 += synapse_0_update * alpha
111+
synapse_1 += synapse_1_update * alpha
112+
synapse_h += synapse_h_update * alpha
113+
114+
synapse_0_update *= 0
115+
synapse_1_update *= 0
116+
synapse_h_update *= 0
117+
118+
# print out progress
119+
if(j % 1000 == 0):
120+
print "Error:" + str(overallError)
121+
print "Pred:" + str(d)
122+
print "True:" + str(c)
123+
out = 0
124+
for index, x in enumerate(reversed(d)):
125+
out += x * pow(2, index)
126+
print str(a) + " -> " + str(d[0])
127+
print "------------"

lin_reg.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
import numpy as np
2+
import random
3+
4+
# sigmoid function
5+
def nonlin(x):
6+
return 1/(1 + np.exp(-(x/3)))
7+
8+
# m denotes the number of examples here, not the number of features
9+
def gradientDescent(x, y, theta, alpha, m, numIterations):
10+
xTrans = x.transpose()
11+
for i in range(0, numIterations):
12+
hypothesis = np.dot(x, theta)
13+
#print "Theta:" + str(theta)
14+
#print "X:" + str(np.shape(x))
15+
#print "Hypo:" + str(np.shape(hypothesis))
16+
#print "Y:" + str(np.shape(y))
17+
loss = hypothesis - y
18+
#print "Loss:" + str(loss)
19+
# avg cost per example (the 2 in 2*m doesn't really matter here.
20+
# But to be consistent with the gradient, I include it)
21+
cost = np.sum(loss ** 2) / (2 * m)
22+
#print("Iteration %d | Cost: %f" % (i, cost))
23+
# avg gradient per example
24+
gradient = np.dot(xTrans, loss) / m
25+
#print gradient
26+
# update
27+
theta = theta - alpha * gradient
28+
#print theta
29+
return theta
30+
31+
32+
def derivative(x, y):
33+
34+
m, n = np.shape(x)
35+
numIterations= 1000
36+
alpha = 0.005
37+
theta = np.ones([n, 1])
38+
39+
#print theta
40+
theta = gradientDescent(x, y, theta, alpha, m, numIterations)
41+
#print "Theta:" + str(theta)
42+
#print "X:" + str(x)
43+
#print "Y:" + str(y)
44+
theta = nonlin(theta)
45+
#print "Theta:" + str(theta)
46+
return theta * np.ones_like(y)
47+
#print m
48+
#print n
49+
50+
51+
# gen data
52+
x = np.array([[0],
53+
[1],
54+
[2],
55+
[3]])
56+
y = np.array([[0, 2, 4, 8]]).T

0 commit comments

Comments
 (0)