-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_revnet.py
93 lines (70 loc) · 2.72 KB
/
test_revnet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import copy
import unittest
import numpy
import chainer
import chainer.functions as F
from chainer import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
import revnet
class TestRevnetStage(unittest.TestCase):
def setUp(self):
B = 10 # batch size
C = 32
H, W = 32, 32
n = 20 # number of units in each stage
self.chainlist = revnet.RevnetStage(n, C)
self.x = numpy.random.uniform(
-1, 1, (B, C, H, W)).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1, (B, C, H, W)).astype(numpy.float32)
self.check_backward_options = {
'dtype': numpy.float32, 'atol': 1e-2, 'rtol': 5e-2, 'eps': 5e-4}
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
lambda _x: self.chainlist(_x),
x_data, y_grad, **self.check_backward_options)
@condition.retry(5)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(5)
def test_backward_gpu(self):
self.chainlist.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_allclose_backward_by_direct_and_reverse(self, x_data, y_grad):
net_direct = copy.deepcopy(self.chainlist)
x_direct = chainer.Variable(x_data)
y_direct = forward(net_direct, x_direct)
y_direct.grad = y_grad
y_direct.backward()
net_reverse = copy.deepcopy(self.chainlist)
x_reverse = chainer.Variable(x_data)
y_reverse = net_reverse(x_reverse)
y_reverse.grad = y_grad
y_reverse.backward()
testing.assert_allclose(x_direct.grad, x_reverse.grad,
atol=1e-3, rtol=3e-0)
for p_direct, p_reverse in zip(net_direct.params(),
net_reverse.params()):
testing.assert_allclose(p_direct.grad, p_reverse.grad,
atol=1e-3, rtol=2e-0)
@condition.retry(5)
def test_check_allclose_backward_by_direct_and_reverse_cpu(self):
self.check_allclose_backward_by_direct_and_reverse(self.x, self.gy)
@attr.gpu
@condition.retry(5)
def test_check_allclose_backward_by_direct_and_reverse_gpu(self):
self.chainlist.to_gpu()
self.check_allclose_backward_by_direct_and_reverse(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def forward(chainlist, x):
x1, x2 = F.split_axis(x, 2, axis=1)
for res_unit in chainlist:
x2 += res_unit(x1)
x1, x2 = x2, x1
y = F.concat((x1, x2), axis=1)
return y
testing.run_module(__name__, __file__)