Skip to content

Commit cef8dd3

Browse files
committed
no message
1 parent e313b2e commit cef8dd3

File tree

4 files changed

+256
-154
lines changed

4 files changed

+256
-154
lines changed

demo.py

Lines changed: 57 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
@author: yamane
66
"""
77

8-
from path import Path
98
import os
109
import numpy as np
1110
import matplotlib.pyplot as plt
@@ -19,13 +18,13 @@
1918
import make_html
2019

2120

22-
def lossfun(model, stream):
21+
def lossfun(model, stream, preprocess):
2322
loss = []
2423
loss_abs = []
2524
target = []
2625
predict = []
2726
for it in stream.get_epoch_iterator():
28-
x, t_l = load_datasets.data_crop(it[0])
27+
x, t_l = load_datasets.data_crop(it[0], preprocess=preprocess)
2928
y_l = model.predict(x, True)
3029
e_l = t_l - y_l
3130
e_l_abs = np.abs(t_l - y_l)
@@ -51,9 +50,12 @@ def show_and_save(stream, target, predict, save_path_f, save_path_d,
5150
fix_img = utility.change_aspect_ratio(dis_img, 1/y_r[batch][i], 1)
5251

5352
print('[test_data]:', i+1)
54-
print('[t_l]:', np.round(target[batch][i], 4), '\t[t_r]:', np.round(t_r[batch][i], 4))
55-
print('[y_l]:', np.round(predict[batch][i], 4), '\t[y_r]:', np.round(y_r[batch][i], 4))
56-
print('[e_l]:', np.round(e_l[i], 4), '\t[e_r]:', np.round(e_r[i], 4))
53+
print('[t_l]:', np.round(target[batch][i], 4),
54+
'\t[t_r]:', np.round(t_r[batch][i], 4))
55+
print('[y_l]:', np.round(predict[batch][i], 4),
56+
'\t[y_r]:', np.round(y_r[batch][i], 4))
57+
print('[e_l]:', np.round(e_l[i], 4), '\t[e_r]:',
58+
np.round(e_r[i], 4))
5759

5860
plt.figure(figsize=(16, 16))
5961
plt.subplot(131)
@@ -105,89 +107,89 @@ def draw_graph(loss, loss_abs, success_asp, num_test, save_root):
105107
else:
106108
max_value = np.abs(min(error))
107109

108-
plt.figure(figsize=(16, 12))
110+
plt.rcParams["font.size"] = 18
111+
plt.figure(figsize=(8, 3))
109112
plt.plot(error_abs)
110113
plt.plot(base_line, 'r-')
111-
plt.title('absolute Error for each test data', fontsize=28)
112-
plt.legend(["Error", "log(1.1)"], loc="upper right")
113-
plt.xlabel('Order of test data number', fontsize=28)
114-
plt.ylabel('Error(|t-y|) in log scale', fontsize=28)
114+
plt.legend(["Error", "log(1.1303)"], loc="upper left")
115+
plt.xlabel('Order of test data number', fontsize=24)
116+
plt.ylabel('Error(|t-y|) in log scale', fontsize=24)
115117
plt.ylim(0, max(error_abs)+0.01)
116118
plt.grid()
117119
plt.savefig(loss_abs_file+'.jpg', format='jpg', bbox_inches='tight')
118120
plt.show()
119121

120-
plt.figure(figsize=(16, 12))
122+
plt.figure(figsize=(8, 3))
121123
plt.plot(error, label='Error')
122-
plt.plot(base_line, label="log(1.1)")
123-
plt.plot(-base_line, label="log(1.1^-1)")
124+
plt.plot(base_line, label="log(1.1303)")
125+
plt.plot(-base_line, label="log(1.1303^-1)")
124126
plt.title('Error for each test data', fontsize=28)
125-
plt.legend(loc="upper right")
127+
plt.legend(loc="upper left")
126128
plt.xlabel('Order of test data number', fontsize=28)
127129
plt.ylabel('Error(t-y) in log scale', fontsize=28)
128130
plt.ylim(-max_value-0.01, max_value+0.01)
129131
plt.grid()
130132
plt.savefig(loss_file+'.jpg', format='jpg', bbox_inches='tight')
131133
plt.show()
132134

133-
fig = plt.figure(figsize=(16, 12))
135+
fig = plt.figure(figsize=(8, 3))
134136
ax = fig.add_subplot(1, 1, 1)
135-
ax.hist(error, bins=25)
136-
ax.set_title('Error histogram', fontsize=28)
137-
ax.set_xlabel('Error(t-y) in log scale', fontsize=28)
138-
ax.set_ylabel('Percentage', fontsize=28)
139-
plt.xlim(-1, 1)
137+
ax.hist(error, bins=22, range=(-1.0, 1.0))
138+
ax.set_xlabel('Error in log scale', fontsize=20)
139+
ax.set_ylabel('Percentage', fontsize=20)
140+
plt.grid()
141+
# plt.xlim(-1, 1)
140142
plt.savefig(loss_hist+'.jpg', format='jpg', bbox_inches='tight')
141143
fig.show()
142144

143145
count = 0
144146
for i in range(num_test):
145147
if loss_abs[0][i] < threshold:
146148
count += 1
147-
print('under log(1.1) =', count, '%')
149+
print('under log(1.1303) =', count, '%')
148150
print('[mean]:', np.mean(loss_abs))
149151

150152

151153
if __name__ == '__main__':
152154
# テスト結果を保存する場所
153-
# save_root = r'E:\demo'
154-
save_root = r'E:\yamane'
155+
save_root = r'demo'
155156
# テストに使うモデルのnpzファイルの場所
156-
# model_file = r'voc2012_regression_max_pooling\1489665734.69_asp_max_4.0\voc2012_regression_max_pooling.npz'
157-
model_file = 'dog_data_regression_ave_pooling.npz'
157+
model_file = r'npz\dog_data_regression_ave_pooling.npz'
158158
num_train = 16500 # 学習データ数
159159
num_valid = 500 # 検証データ数
160160
num_test = 100 # テストデータ数
161161
asp_r_max = 3.0 # 歪み画像の最大アスペクト比
162-
success_asp = 1.1 # 修正成功とみなす修正画像のアスペクト比の最大値
162+
success_asp = np.exp(0.12247601469) # 修正成功とみなす修正画像のアスペクト比の最大値
163163
batch_size = 100
164+
preprocesses = [None, 'edge', 'blur'] # specify None or 'edge' or 'blur'
164165

165166
# モデルのファイル名をフォルダ名にする
166-
# folder_name = model_file.split('\\')[-2]
167-
folder_name = model_file.split('.')[-2]
168-
169-
# テスト結果を保存するフォルダを作成
170-
test_folder_path = utility.create_folder(save_root, folder_name)
171-
fix_folder_path = utility.create_folder(test_folder_path, 'fix')
172-
dis_folder_path = utility.create_folder(test_folder_path, 'distorted')
173-
ori_folder_path = utility.create_folder(test_folder_path, 'original')
174-
model_path = Path(save_root) / model_file
175-
176-
# モデル読み込み
177-
model = voc2012_regression_max_pooling.Convnet().to_gpu()
178-
serializers.load_npz(model_path, model)
179-
180-
# streamを取得
181-
streams = load_datasets.load_voc2012_stream(
182-
batch_size, num_train, num_valid, num_test)
183-
train_stream, valid_stream, test_stream = streams
184-
185-
# 歪み画像の修正を実行
186-
with chainer.using_config('train', False):
187-
loss, loss_abs, target, predict = lossfun(model, test_stream)
188-
189-
show_and_save(test_stream, target, predict, fix_folder_path,
190-
dis_folder_path, ori_folder_path)
191-
192-
# 修正結果の誤差を描画
193-
draw_graph(loss, loss_abs, success_asp, num_test, test_folder_path)
167+
folder_name = model_file.split('\\')[-2]
168+
169+
for preprocess in preprocesses:
170+
# テスト結果を保存するフォルダを作成
171+
test_folder_path = utility.create_folder(
172+
save_root, folder_name + '_' + str(preprocess))
173+
fix_folder_path = utility.create_folder(test_folder_path, 'fix')
174+
dis_folder_path = utility.create_folder(test_folder_path, 'distorted')
175+
ori_folder_path = utility.create_folder(test_folder_path, 'original')
176+
177+
# モデル読み込み
178+
model = voc2012_regression_max_pooling.Convnet().to_gpu()
179+
serializers.load_npz(model_file, model)
180+
181+
# streamを取得
182+
streams = load_datasets.load_voc2012_stream(
183+
batch_size, num_train, num_valid, num_test)
184+
train_stream, valid_stream, test_stream = streams
185+
186+
# 歪み画像の修正を実行
187+
with chainer.no_backprop_mode(), chainer.using_config('train', False):
188+
loss, loss_abs, target, predict = lossfun(model, test_stream,
189+
preprocess=preprocess)
190+
191+
# show_and_save(test_stream, target, predict, fix_folder_path,
192+
# dis_folder_path, ori_folder_path)
193+
194+
# 修正結果の誤差を描画
195+
draw_graph(loss, loss_abs, success_asp, num_test, test_folder_path)

load_datasets.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,14 +71,26 @@ def load_toy_stream(batch_size, ):
7171

7272

7373
def data_crop(X_batch, aspect_ratio_max=3.0, output_size=256, crop_size=224,
74-
random=True, t=0):
74+
random=True, t=0, preprocess=None):
75+
if preprocess == 'edge':
76+
kernel = np.array([[-1, -1, -1],
77+
[-1, 8, -1],
78+
[-1, -1, -1]
79+
], np.float32)
80+
elif preprocess == 'blur':
81+
kernel = np.ones((5, 5), np.float32) / 25
82+
7583
images = []
7684
ts = []
7785

7886
for b in range(X_batch.shape[0]):
7987
# 補間方法を乱数で設定
8088
u = np.random.randint(5)
8189
image = X_batch[b]
90+
91+
if preprocess is not None:
92+
image = cv2.filter2D(image, -1, kernel)
93+
8294
if random is False:
8395
t = t
8496
else:

0 commit comments

Comments
 (0)