@@ -19,7 +19,7 @@ def __init__(self, net, layer_name):
19
19
self .net = net
20
20
self .layer_name = layer_name
21
21
self .feature = []
22
- self .gradient = None
22
+ self .gradient = []
23
23
self .net .eval ()
24
24
self .handlers = []
25
25
self ._register_hook ()
@@ -37,7 +37,8 @@ def _get_grads_hook(self, module, input_grad, output_grad):
37
37
:param output_grad:tuple,长度为1
38
38
:return:
39
39
"""
40
- self .gradient = output_grad [0 ]
40
+ self .gradient .insert (0 , output_grad [0 ]) # 梯度的顺序反的
41
+ print ("gradient shape:{}" .format (output_grad [0 ].size ()))
41
42
42
43
def _register_hook (self ):
43
44
for (name , module ) in self .net .named_modules ():
@@ -63,7 +64,7 @@ def __call__(self, inputs, index=0):
63
64
feature_level = output [0 ]['instances' ].feature_levels [index ] # box来自第几层feature map
64
65
score .backward ()
65
66
66
- gradient = self .gradient [0 ].cpu ().data .numpy () # [C,H,W]
67
+ gradient = self .gradient [feature_level ][ 0 ].cpu ().data .numpy () # [C,H,W]
67
68
weight = np .mean (gradient , axis = (1 , 2 )) # [C]
68
69
69
70
# feature_level 指feature map的层级,0去除batch维
@@ -103,7 +104,7 @@ def __call__(self, inputs, index=0):
103
104
feature_level = output [0 ]['instances' ].feature_levels [index ] # box来自第几层feature map
104
105
score .backward ()
105
106
106
- gradient = self .gradient [0 ].cpu ().data .numpy () # [C,H,W]
107
+ gradient = self .gradient [feature_level ][ 0 ].cpu ().data .numpy () # [C,H,W]
107
108
gradient = np .maximum (gradient , 0. ) # ReLU
108
109
indicate = np .where (gradient > 0 , 1. , 0. ) # 示性函数
109
110
norm_factor = np .sum (gradient , axis = (1 , 2 )) # [C]归一化
0 commit comments