@@ -15,6 +15,7 @@ import RicardoSantos/MLActivationFunctions/3 as activation
15
15
// https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/
16
16
// https://www.anotsorandomwalk.com/backpropagation-example-with-numbers-step-by-step/
17
17
// https://machinelearningmastery.com/implement-backpropagation-algorithm-scratch-python/
18
+ // https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/
18
19
19
20
// @function helper method to generate random weights of (A * B) size.
20
21
// @param previous_size int, number of nodes on the left.
@@ -45,14 +46,59 @@ propagate_forward (float[] inputs, float[] weights, int[] layer_sizes, int weigh
45
46
// find the base level of current layer weights:
46
47
// base = (sum of nodes up to previous layer) * (max weights in a node)
47
48
// (weights in layer) = base + (previous layer nodes) * (current layer nodes)
49
+ int _previous_layer_size = array.get(layer_sizes, _layer - 1)
50
+ int _this_layer_size = array.get(layer_sizes, _layer)
48
51
int _w_base_idx = array.sum(array.slice(layer_sizes, 0, _layer-1)) * weights_max_width
49
- float[] _layer_weights = array.slice(weights, _w_base_idx, _w_base_idx + array.get(layer_sizes, _layer - 1) * array.get(layer_sizes, _layer) )
50
- _layer_output := nl.layer(_layer_output, _layer_weights, array.get(layer_sizes, _layer) )
52
+ float[] _layer_weights = array.slice(weights, _w_base_idx, _w_base_idx + _previous_layer_size * _this_layer_size )
53
+ _layer_output := nl.layer(_layer_output, _layer_weights, _this_layer_size )
51
54
array.concat(_layer_outputs, _layer_output)
52
55
_layer_outputs
53
56
//}
54
57
58
+ // @function helper method to propagate the signals backward.
59
+ // @param x TODO: fill parameters
60
+ // @returns float array.
61
+ propback_calculate_errors_for_output (float[] errors, float[] output_nodes, float[] targets) => //{
62
+ int _size_o = array.size(output_nodes)
63
+ int _size_t = array.size(targets)
64
+
65
+ switch
66
+ (_size_o != _size_t) => runtime.error(str.format('FunctionNNetwork -> propback_calculate_errors(): parameters "output_nodes" and "targets" size does not match, found: {0}, {1} .', _size_o, _size_t))
67
+
68
+ for _n = 0 to _size_o - 1
69
+ float _target = array.get(targets, _n)
70
+ float _output = array.get(output_nodes, _n)
71
+ array.push(errors, _target - _output)
72
+ //{ test:
73
+ // if barstate.isfirst
74
+ // _e = array.new_float(0)
75
+ // _o = array.from(1.0, 2, 3)
76
+ // _t = array.from(1.1, 1.1, 1.1)
77
+ // propback_calculate_errors_for_output(_e, _o, _t)
78
+ // label.new(bar_index, 0.0, str.format('{0}', str.tostring(_e)))
79
+ //}}
55
80
81
+ // @function helper method to propagate the signals backward.
82
+ // @param x TODO: fill parameters
83
+ // @returns float array.
84
+ propback_calculate_errors_for_hidden (float[] errors, float[] weights, float[] error_deltas) => //{
85
+ // iterate over weights in current layer
86
+ for _i = 0 to array.size(weights) - 1
87
+ float _error = 0.0
88
+ float _wi = array.get(weights, _i)
89
+ // iterate over nodes in next layer
90
+ for _n = 0 to array.size(error_deltas) - 1
91
+ float _error_delta_in_next_layer_node = array.get(error_deltas, _n) // #############
92
+ _error += _wi * _error_delta_in_next_layer_node
93
+ array.push(errors, _error)
94
+ //{ test:
95
+ // if barstate.isfirst
96
+ // _e = array.new_float(0)
97
+ // _w = array.from(0.1, 0.2, 0.3, 0.4, 0.5, 0.6)
98
+ // _d = array.from(1.1, 1.1, 1.1)
99
+ // propback_calculate_errors_for_hidden(_e, _w, _d)
100
+ // label.new(bar_index, 0.0, str.format('{0}', str.tostring(_e)))
101
+ //}}
56
102
57
103
// @function Generalized Neural Network Method.
58
104
// @param x TODO: add parameter x description here
@@ -95,46 +141,61 @@ export network (
95
141
float _error_total = loss.mse(targets, array.slice(_layer_outputs, _total_nodes - (array.get(layer_sizes, _size_ls - 1)), _total_nodes))
96
142
97
143
// propagate backward:
144
+ // reference for delta rule: https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/
145
+ // node_delta_error = (nodeoutput - nodetarget) * derivative(nodeoutput)
146
+ // update_weight = weight - learning_rate * node_error_delta
147
+ //##########################################################################
148
+ // Work in progress...
98
149
//##########################################################################
99
150
float[] _errors = array.new_float(0)
100
151
float[] _error_deltas = array.new_float(_total_nodes)
101
152
int _last_delta_idx = -1
102
153
103
154
// iterate over the network backwards
104
- for _layer = (_size_ls - 1) to 0
105
- //layer weights
106
- int _this_layer_size = array.get( layer_sizes, _layer )
107
-
108
- bool _is_not_last_layer = _layer != (_size_ls - 1)
155
+ for _l = (_size_ls - 1) to 0
156
+ int _this_layer_size = array.get(layer_sizes, _l)
157
+ int _number_of_nodes_up_to_this_layer = array.sum(array.slice( layer_sizes, 0, _l) )
158
+
159
+ bool _is_not_last_layer = _l != (_size_ls - 1)
109
160
if _is_not_last_layer
161
+ int _next_layer_size = array.get(layer_sizes, _l + 1)
162
+ int _w_base_idx = _number_of_nodes_up_to_this_layer * _weights_max_width
163
+ int _number_of_weights_in_layer = _this_layer_size * _next_layer_size
110
164
// get current layer weights:
111
- int _w_base_idx = array.sum(array.slice(layer_sizes, 0, _layer)) * _weights_max_width
112
- float[] _layer_weights = array.slice(weights, _w_base_idx, _w_base_idx + array.get(layer_sizes, _layer) * array.get(layer_sizes, _layer + 1))
113
-
114
- int _next_layer_size = array.get(layer_sizes, _layer + 1)
115
- // iterate over weights in current layer
116
- for _w = 0 to array.size(_layer_weights) - 1
117
- float _error = 0.0
118
- float _weight_in_this_layer = array.get(_layer_weights, _w)
119
- // iterate over nodes in next layer
120
- for _next_node = 0 to _next_layer_size - 1
121
- float _error_delta_in_next_layer_node = 1.0 // #############
122
- _error += _weight_in_this_layer * _error_delta_in_next_layer_node
123
- array.push(_errors, _error)
165
+ float[] _layer_weights = array.slice(weights, _w_base_idx, _w_base_idx + _number_of_weights_in_layer)
166
+ float[] _layer_error_deltas = array.slice(_error_deltas, _number_of_nodes_up_to_this_layer, _number_of_nodes_up_to_this_layer + _this_layer_size)
167
+ propback_calculate_errors_for_hidden(_errors, _layer_weights, _layer_error_deltas)//, _next_layer_size, _number_of_nodes_up_to_this_layer)
124
168
else
125
169
int _output_layer_size = array.get(layer_sizes, _size_ls - 1)
126
170
float[] _output_layer = array.slice(_layer_outputs, _total_nodes - _output_layer_size, _total_nodes)
127
- for _node_idx = 0 to _output_layer_size - 1
128
- float _target = array.get(targets, _node_idx)
129
- float _output = array.get(_output_layer, _node_idx)
130
- array.push(_errors, _target - _output)
131
-
171
+ propback_calculate_errors_for_output (_errors, _output_layer, targets)
172
+
173
+ // calculate the error delta of each node in current layer:
174
+ // TODO:
175
+ // - derivative based on the activation function of current layer.
132
176
for _n = 0 to _this_layer_size - 1
133
- float _this_node = 1.0//this_layer[_n]
177
+ float _this_node = array.get(_layer_outputs, _number_of_nodes_up_to_this_layer + _n)
134
178
_last_delta_idx += 1
135
- array.set(_error_deltas, _last_delta_idx, array.get(_errors, _n) * activation.sigmoid_derivative(_this_node))//float _this_node_delta = 0.0//errors[_n] * derivative(this_node_output)
136
-
137
- [_layer_outputs, _error_deltas]
179
+ array.set(_error_deltas, _last_delta_idx, nz(array.get(_errors, _n), 1.0) * activation.sigmoid_derivative(_this_node))//###########################
180
+
181
+ // update weights:
182
+ // https://machinelearningmastery.com/implement-backpropagation-algorithm-scratch-python/
183
+ // TODO:
184
+ // for each layer
185
+ // for each node in layer
186
+ // for each weight in ?*input layer*?
187
+ // // // # Update network weights with error
188
+ // // def update_weights(network, row, l_rate):
189
+ // // for i in range(len(network)):
190
+ // // inputs = row[:-1]
191
+ // // if i != 0:
192
+ // // inputs = [neuron['output'] for neuron in network[i - 1]]
193
+ // // for neuron in network[i]:
194
+ // // for j in range(len(inputs)):
195
+ // // neuron['weights'][j] += l_rate * neuron['delta'] * inputs[j]
196
+ // // neuron['weights'][-1] += l_rate * neuron['delta']
197
+
198
+ [_layer_outputs, _error_deltas, _errors]
138
199
//{ usage:
139
200
if barstate.islastconfirmedhistory
140
201
// 7, 5 = i3, h5 h3 o2
@@ -144,8 +205,8 @@ if barstate.islastconfirmedhistory
144
205
layer_sizes = array.from(5, 3, 2)
145
206
layer_biases = array.from(1.0, 1.0, 1.0)
146
207
layer_functions = array.from('sigmoid', 'sigmoid', 'sigmoid')
147
- [o, e] = network(inputs, expected_outputs, weights, layer_sizes, layer_biases, layer_functions, 'mse')
148
- label.new(bar_index, 0.0, str.format('{0}\n {1}', str.tostring(o), str.tostring(e)))
208
+ [o, d, e] = network(inputs, expected_outputs, weights, layer_sizes, layer_biases, layer_functions, 'mse')
209
+ label.new(bar_index, 0.0, str.format('output: {0}\ndelta: {1}\nerror:{2}\nweights:{3}, errors:{4} ', str.tostring(o), str.tostring(d), str.tostring(e), array.size(weights), array.size (e)))
149
210
//{ remarks:
150
211
//}}}
151
212
0 commit comments