forked from Wei-1/Scala-Machine-Learning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathNeuralNetworkTest.scala
113 lines (101 loc) · 4.8 KB
/
NeuralNetworkTest.scala
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
// Wei Chen - Neural Network Test
// 2018-11-29
import com.scalaml.TestData._
import com.scalaml.general.MatrixFunc._
import com.scalaml.algorithm.NeuralNetwork
import com.scalaml.algorithm.{TANH, SIGMOID, RELU, LINEAR, SQUARE, L1, L2}
import org.scalatest.funsuite.AnyFunSuite
class NeuralNetworkSuite extends AnyFunSuite {
val hidden_layer = Array(5, 4, 3)
val input_column = UNLABELED_LARGE_HIGH_DIM_DATA.head.size
val output_column = TARGET_LARGE_HIGH_DIM_DATA.head.size
val layer_neurons = input_column +: hidden_layer :+ output_column
val limit = 20000
val nn_learning_rate = 0.05
val inputIds = (for(i <- 0 until input_column) yield ('a' + i).toString).toArray
val nn = new NeuralNetwork()
test("NeuralNetwork Test : SIGMOID Initialization") { // most stable in small data
assert(nn.config(layer_neurons, SIGMOID, LINEAR, null, inputIds,
_gradientClipping = true))
}
test("NeuralNetwork Test : SIGMOID Training") {
assert(nn.train(UNLABELED_LARGE_HIGH_DIM_DATA, TARGET_LARGE_HIGH_DIM_DATA,
iter = limit, _learningRate = nn_learning_rate))
assert(!nn.network.isEmpty)
}
test("NeuralNetwork Test : SIGMOID Predict") {
val result = nn.predict(UNLABELED_SMALL_HIGH_DIM_DATA)
val loss = result.zip(TARGET_SMALL_HIGH_DIM_DATA).map { case (a1, a2) =>
arrayminussquare(a1, a2).sum / result.head.size
}.sum / result.size
Console.err.println("SIGMOID: " + loss)
assert(matrixsimilar(result, TARGET_SMALL_HIGH_DIM_DATA, 0.5))
}
test("NeuralNetwork Test : TANH Activation Functions") {
val nn2 = new NeuralNetwork()
assert(nn2.config(layer_neurons, TANH))
assert(nn2.train(UNLABELED_LARGE_HIGH_DIM_DATA, TARGET_LARGE_HIGH_DIM_DATA,
iter = limit, _learningRate = nn_learning_rate))
val result = nn2.predict(UNLABELED_SMALL_HIGH_DIM_DATA)
val loss = result.zip(TARGET_SMALL_HIGH_DIM_DATA).map { case (a1, a2) =>
arrayminussquare(a1, a2).sum / result.head.size
}.sum / result.size
Console.err.println("TANH: " + loss)
assert(loss < 0.5)
// assert(matrixsimilar(result, TARGET_SMALL_HIGH_DIM_DATA, 0.5))
}
test("NeuralNetwork Test : RELU Activation Functions") { // not stable
val nn2 = new NeuralNetwork()
assert(nn2.config(layer_neurons, RELU))
assert(nn2.train(UNLABELED_LARGE_HIGH_DIM_DATA, TARGET_LARGE_HIGH_DIM_DATA,
iter = limit * 2, _learningRate = nn_learning_rate / 5))
val result = nn2.predict(UNLABELED_SMALL_HIGH_DIM_DATA)
val loss = result.zip(TARGET_SMALL_HIGH_DIM_DATA).map { case (a1, a2) =>
arrayminussquare(a1, a2).sum / result.head.size
}.sum / result.size
Console.err.println("RELU: " + loss)
assert(loss < 0.5)
// assert(matrixsimilar(result, TARGET_SMALL_HIGH_DIM_DATA, 0.5))
}
test("NeuralNetwork Test : RELU Activation Functions - L1") { // more stable + dead cell
val nn2 = new NeuralNetwork()
assert(nn2.config(layer_neurons, RELU, LINEAR, L1))
assert(nn2.train(UNLABELED_LARGE_HIGH_DIM_DATA, TARGET_LARGE_HIGH_DIM_DATA,
iter = limit * 2, _learningRate = nn_learning_rate / 5))
val result = nn2.predict(UNLABELED_SMALL_HIGH_DIM_DATA)
val loss = result.zip(TARGET_SMALL_HIGH_DIM_DATA).map { case (a1, a2) =>
arrayminussquare(a1, a2).sum / result.head.size
}.sum / result.size
Console.err.println("RELU L1: " + loss)
assert(loss < 0.5)
// assert(matrixsimilar(result, TARGET_SMALL_HIGH_DIM_DATA, 0.5))
}
test("NeuralNetwork Test : RELU Activation Functions - L2") { // most stable
val nn2 = new NeuralNetwork()
assert(nn2.config(layer_neurons, RELU, LINEAR, L2))
assert(nn2.train(UNLABELED_LARGE_HIGH_DIM_DATA, TARGET_LARGE_HIGH_DIM_DATA,
iter = limit * 2, _learningRate = nn_learning_rate / 5))
val result = nn2.predict(UNLABELED_SMALL_HIGH_DIM_DATA)
val loss = result.zip(TARGET_SMALL_HIGH_DIM_DATA).map { case (a1, a2) =>
arrayminussquare(a1, a2).sum / result.head.size
}.sum / result.size
Console.err.println("RELU L2: " + loss)
assert(loss < 0.5)
// assert(matrixsimilar(result, TARGET_SMALL_HIGH_DIM_DATA, 0.5))
}
test("NeuralNetwork Test : Invalid Config") {
assert(!nn.config(null))
}
test("NeuralNetwork Test : Unit Functions") {
assert(SQUARE.error(1, 1) == 0)
assert(SQUARE.error(1, 0) == 0.5)
assert(L1.output(1) == 1)
assert(L1.output(-1) == 1)
assert(L1.der(1) == 1)
assert(L1.der(-1) == -1)
assert(L2.output(1) == 0.5)
assert(L2.output(-1) == 0.5)
assert(L2.der(1) == 1)
assert(L2.der(-1) == -1)
}
}