@@ -23,7 +23,7 @@ class TestDynamicShape(unittest.TestCase):
2323
2424 def _get_rnd_float32 (self , low = - 1.0 , high = 1.0 , shape = None ):
2525 output = np .random .uniform (low , high , shape )
26- if shape == None :
26+ if shape is None :
2727 return np .float32 (output )
2828 else :
2929 return output .astype (np .float32 )
@@ -87,6 +87,100 @@ def test_arg_min(self):
8787 expected_output = x .shape [axis ] - expected_output - 1
8888 np .testing .assert_almost_equal (output ['Y' ], expected_output )
8989
90+ def _batch_normalization (self , x , mean , variance , bias , scale ,
91+ variance_epsilon ):
92+ inv = np .reciprocal (np .sqrt (variance + variance_epsilon ))
93+ if scale is not None :
94+ inv *= scale
95+ return x * inv + (bias - mean * inv if bias is not None else - mean * inv )
96+
97+ def test_batch_normalization (self ):
98+ if legacy_opset_pre_ver (6 ):
99+ raise unittest .SkipTest ("Backend doesn't support consumed flag" )
100+ node_def = helper .make_node ("BatchNormalization" ,
101+ ["X" , "scale" , "bias" , "mean" , "var" ], ["Y" ],
102+ epsilon = 0.001 )
103+ graph_def = helper .make_graph (
104+ [node_def ],
105+ name = "test_unknown_shape" ,
106+ inputs = [
107+ helper .make_tensor_value_info ("X" , TensorProto .FLOAT , [None , None , None , None ]),
108+ helper .make_tensor_value_info ("scale" , TensorProto .FLOAT , [None ]),
109+ helper .make_tensor_value_info ("bias" , TensorProto .FLOAT , [None ]),
110+ helper .make_tensor_value_info ("mean" , TensorProto .FLOAT , [None ]),
111+ helper .make_tensor_value_info ("var" , TensorProto .FLOAT , [None ])
112+ ],
113+ outputs = [
114+ helper .make_tensor_value_info ("Y" , TensorProto .FLOAT , [None , None , None , None ])
115+ ])
116+ x_shape = [3 , 5 , 4 , 2 ]
117+ param_shape = [5 ]
118+ _param_shape = [1 , 5 , 1 , 1 ]
119+ x = self ._get_rnd_float32 (0 , 1 , shape = x_shape )
120+ m = self ._get_rnd_float32 (0 , 1 , shape = param_shape )
121+ _m = m .reshape (_param_shape )
122+ v = self ._get_rnd_float32 (0 , 1 , shape = param_shape )
123+ _v = v .reshape (_param_shape )
124+ scale = self ._get_rnd_float32 (0 , 1 , shape = param_shape )
125+ _scale = scale .reshape (_param_shape )
126+ bias = self ._get_rnd_float32 (0 , 1 , shape = param_shape )
127+ _bias = bias .reshape (_param_shape )
128+ golden = self ._batch_normalization (x , _m , _v , _bias , _scale , 0.001 )
129+ tf_rep = onnx_graph_to_tensorflow_rep (graph_def )
130+ output = tf_rep .run ({"X" : x , "scale" : scale , "bias" : bias , "mean" : m , "var" : v })
131+ np .testing .assert_almost_equal (output ["Y" ], golden , decimal = 5 )
132+
133+ def test_conv_transpose (self ):
134+ # test dynamic batch size on transpose of 2d convolution
135+ pads = [1 , 1 , 1 , 1 ]
136+ x_shape = [1 , 3 , 4 , 6 ]
137+ x = self ._get_rnd_float32 (shape = x_shape )
138+ weight_shape = [3 , 5 , 2 , 2 ]
139+ weights = self ._get_rnd_float32 (shape = weight_shape )
140+
141+ node_def = helper .make_node ("ConvTranspose" , ["X" , "weights" ], ["Y" ],
142+ pads = pads )
143+ graph_def = helper .make_graph (
144+ [node_def ],
145+ name = "test_unknown_shape" ,
146+ inputs = [
147+ helper .make_tensor_value_info ("X" , TensorProto .FLOAT , [None , 3 , 4 , 6 ]),
148+ helper .make_tensor_value_info ("weights" , TensorProto .FLOAT , weight_shape )
149+ ],
150+ outputs = [
151+ helper .make_tensor_value_info ("Y" , TensorProto .FLOAT , [None , None , None , None ])
152+ ])
153+
154+ tf_rep = onnx_graph_to_tensorflow_rep (graph_def )
155+ output = tf_rep .run ({"X" : x , "weights" : weights })
156+
157+ padh_left = weight_shape [2 ] - 1 - pads [0 ]
158+ padh_right = weight_shape [2 ] - 1 - pads [1 ]
159+ padw_left = weight_shape [3 ] - 1 - pads [2 ]
160+ padw_right = weight_shape [3 ] - 1 - pads [3 ]
161+
162+ kh = weight_shape [2 ]
163+ kw = weight_shape [3 ]
164+ outh = x_shape [2 ] + padh_right + padh_right - (kh - 1 )
165+ outw = x_shape [3 ] + padw_right + padw_right - (kw - 1 )
166+
167+ out_shape = [x_shape [0 ], weight_shape [1 ], outh , outw ]
168+
169+ test_output = np .zeros (out_shape )
170+ for b in range (0 , x_shape [0 ]):
171+ for m in range (0 , weight_shape [1 ]):
172+ for c in range (0 , x_shape [1 ]):
173+ for h in range (0 , outh ):
174+ for w in range (0 , outw ):
175+ for k1 in range (h , h + kh ):
176+ for k2 in range (w , w + kw ):
177+ if (k1 - padh_left >= 0 and k2 - padw_left >= 0 ):
178+ test_output [b ][m ][h ][w ] += x [b ][c ][k1 - padh_left ][
179+ k2 - padw_left ] * weights [c ][m ][kh + h - 1 -
180+ k1 ][kw + w - 1 - k2 ]
181+
182+ np .testing .assert_almost_equal (output ["Y" ], test_output , decimal = 5 )
183+
90184 def test_slice (self ):
91185 # test case 1 with normal inputs
92186 axes = [0 , 1 , 2 ]
0 commit comments