forked from hpenedones/luacnn
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathxor-mlp.lua
37 lines (31 loc) · 1.2 KB
/
xor-mlp.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
require "nn"
mlp = nn.Sequential(); -- make a multi-layer perceptron
inputs = 2; outputs = 1; HUs = 20; -- parameters
mlp:add(nn.Linear(inputs, HUs))
mlp:add(nn.Tanh())
mlp:add(nn.Linear(HUs, outputs))
criterion = nn.MSECriterion()
for i = 1,2500 do
-- random sample
local input= torch.randn(2); -- normally distributed example in 2d
local output= torch.Tensor(1);
if input[1]*input[2] > 0 then -- calculate label for XOR function
output[1] = -1
else
output[1] = 1
end
-- feed it to the neural network and the criterion
criterion:forward(mlp:forward(input), output)
-- train over this example in 3 steps
-- (1) zero the accumulation of the gradients
mlp:zeroGradParameters()
-- (2) accumulate gradients
mlp:backward(input, criterion:backward(mlp.output, output))
-- (3) update parameters with a 0.01 learning rate
mlp:updateParameters(0.01)
end
x = torch.Tensor(2)
x[1] = 0.5; x[2] = 0.5; print("input: \n", x, "\noutput:", mlp:forward(x))
x[1] = 0.5; x[2] = -0.5; print("input: \n", x, "\noutput:", mlp:forward(x))
x[1] = -0.5; x[2] = 0.5; print("input: \n", x, "\noutput:", mlp:forward(x))
x[1] = -0.5; x[2] = -0.5; print("input: \n", x, "\noutput:", mlp:forward(x))