-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathsequential_nn.py
More file actions
executable file
·142 lines (116 loc) · 3.97 KB
/
sequential_nn.py
File metadata and controls
executable file
·142 lines (116 loc) · 3.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# Neural Network
# -- nn.py
#
# @package NeuralNetwork
import random
import math
import threading
import operator
NUM_TESTS = 300
NUM_INPUTS = 1000
NUM_HIDDEN = 1200
NUM_OUTPUTS = 1
LAYERS = [NUM_INPUTS, NUM_HIDDEN, NUM_OUTPUTS]
USE_BACKPROPAGATE = False
USE_PROFILE = True
class Layer(object):
def activate(self, inputs):
return inputs
@staticmethod
def sigmoid(num):
return math.tanh(num)
@staticmethod
def derivSig(num):
return 1 - math.tanh(num) ** 2
class OutputLayer(Layer):
def activate(self, inputs):
return [ int(round(value / 2 + .5)) for value in inputs ]
class InnerLayer(Layer):
def __init__(self, num_nodes, num_edges):
# A list of lists
# The rows correspond to the number of edges per node (number of nodes
# in the next layer).
# The columns correspond to the number of nodes in the current layer
self.weight_matrix = []
# Initialize the weights for each node
random.seed()
for row in range(num_edges):
self.weight_matrix.append([ random.uniform(-1.0, 1.0) for edge in range(num_nodes) ])
# TODO Parallelize this!
def activate(self, inputs):
""" Activate each neuron in the layer one at a time """
outputs = []
for weights in self.weight_matrix:
# This computes the dot product of two "vectors" (lists here)
dot_product = sum(map(operator.mul, inputs, weights))
output = self.sigmoid(dot_product)
outputs.append(output)
return outputs
class NeuralNetwork(object):
def __init__(self, nodes_per_layer):
self.layers = []
for num_nodes, num_edges in nodes_per_layer[:-1], nodes_per_layer[1:]:
layer = InnerLayer(num_nodes, num_edges)
self.layers.append(layer)
output_layer = OutputLayer()
self.layers.append(output_layer)
def feedForward(self, inputs):
# Process the inputs layer by layer until we have the final output
for layer in self.layers:
outputs = layer.activate(inputs)
# Inputs to the next layer are outputs from the previous one
inputs = outputs
# 'outputs' is the output from the last layer...which is the output layer
return outputs
def backPropagate(desired_outputs):
pass
def main():
# Test Data
inputs = [ random.random() for i in range(NUM_INPUTS) ]
desired_outputs = [ random.choice([0, 1]) for i in range(NUM_OUTPUTS) ]
# print inputs
# print desired_outputs
# initialize the neural network
nn = NeuralNetwork(LAYERS)
for i in range(NUM_TESTS):
# TODO: Call this in parallel?
# The idea is it's like a pipeline from the game
output = nn.feedForward(inputs)
if USE_BACKPROPAGATE:
error = nn.backPropagate(desired_outputs)
print "Test number {} -> ".format(i), output
if __name__ == "__main__":
if USE_PROFILE:
import profile
profile.run('main()')
else:
main()
# def backPropagate(targets, inputs, hidden):
# out_deltas = []
# for i in range(NUM_OUTPUTS):
# error = targets[i] - OUTPUTS[i]
# out_deltas.append(error * derivSig(OUTPUTS[i]))
#
# for i in range(NUM_HIDDEN):
# for j in range(NUM_OUTPUTS):
# delta = out_deltas[j] * hidden[i].final
# hidden[i].weights[j] += .5 * delta
#
# hidden_deltas = []
# for i in range(NUM_HIDDEN):
# error = 0
# for j in range(NUM_OUTPUTS):
# error += out_deltas[j] * hidden[i].weights[j]
# hidden_deltas.append(error * derivSig(hidden[i].final))
#
# for i in range(NUM_INPUTS):
# for j in range(NUM_HIDDEN):
# delta = hidden_deltas[j] * hidden[j].last_input
# inputs[i].weights[j] += .5 * delta
#
# error = 0
# for i in range(len(targets)):
# error += .5 * (targets[i] - OUTPUTS[i])**2
#
# return error
# vim:ts=4:sw=4:sta:et: