Skip to content

Commit eaf6c58

Browse files
committed
add files
add files
1 parent 5e2ef2f commit eaf6c58

15 files changed

+4283
-0
lines changed

DFR.py

Lines changed: 682 additions & 0 deletions
Large diffs are not rendered by default.

DFR_log.py

Lines changed: 327 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,327 @@
1+
# from __future__ import absolute_import, division, print_function, unicode_literals
2+
# 导入TensorFlow和tf.keras
3+
import os
4+
5+
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
6+
import tensorflow as tf
7+
from tensorflow import keras
8+
from keras import initializers
9+
from keras import optimizers
10+
from keras.callbacks import *
11+
from keras.models import Sequential, load_model
12+
from keras.layers import Conv2D, LSTM, Flatten, Dense, Activation, BatchNormalization, Dropout, Reshape, MaxPooling2D
13+
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint, ReduceLROnPlateau
14+
from keras.preprocessing.image import ImageDataGenerator
15+
from keras.regularizers import l1, l2
16+
from keras.utils import multi_gpu_model
17+
# 导入TensorFlow和tf.keras
18+
19+
# 导入辅助库
20+
import numpy as np
21+
import matplotlib.pyplot as plt
22+
import datetime
23+
24+
# 检验tensorflow版本
25+
print(tf.__version__)
26+
27+
28+
def mkdir(path):
29+
# 引入模块
30+
import os
31+
32+
# 去除首位空格
33+
path = path.strip()
34+
# 去除尾部 \ 符号
35+
path = path.rstrip("\\")
36+
37+
# 判断路径是否存在
38+
# 存在 True
39+
# 不存在 False
40+
isExists = os.path.exists(path)
41+
42+
# 判断结果
43+
if not isExists:
44+
# 如果不存在则创建目录
45+
# 创建目录操作函数
46+
os.makedirs(path)
47+
48+
print(path + ' 创建成功')
49+
return True
50+
else:
51+
# 如果目录存在则不创建,并提示目录已存在
52+
print(path + ' 目录已存在')
53+
return False
54+
55+
56+
class ParallelModelCheckpoint(ModelCheckpoint):
57+
def __init__(self, model, filepath, monitor='val_loss', verbose=0,
58+
save_best_only=False, save_weights_only=False,
59+
mode='auto', period=1):
60+
self.single_model = model
61+
super(ParallelModelCheckpoint, self).__init__(filepath, monitor, verbose, save_best_only, save_weights_only,
62+
mode, period)
63+
64+
def set_model(self, model):
65+
super(ParallelModelCheckpoint, self).set_model(self.single_model)
66+
67+
68+
class LR_Updater(Callback):
69+
'''This callback is utilized to log learning rates every iteration (batch cycle)
70+
it is not meant to be directly used as a callback but extended by other callbacks
71+
ie. LR_Cycle
72+
'''
73+
74+
def __init__(self, iterations):
75+
'''
76+
iterations = dataset size / batch size
77+
epochs = pass through full training dataset
78+
'''
79+
80+
self.epoch_iterations = iterations
81+
self.trn_iterations = 0.
82+
self.history = {}
83+
84+
def on_train_begin(self, logs={}):
85+
self.trn_iterations = 0.
86+
logs = logs or {}
87+
88+
def on_batch_end(self, batch, logs=None):
89+
logs = logs or {}
90+
self.trn_iterations += 1
91+
K.set_value(self.model.optimizer.lr, self.setRate())
92+
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
93+
self.history.setdefault('iterations', []).append(self.trn_iterations)
94+
for k, v in logs.items():
95+
self.history.setdefault(k, []).append(v)
96+
97+
def plot_lr(self):
98+
plt.xlabel("iterations")
99+
plt.ylabel("learning rate")
100+
plt.plot(self.history['iterations'], self.history['lr'])
101+
102+
def plot(self, n_skip=10):
103+
plt.xlabel("learning rate (log scale)")
104+
plt.ylabel("loss")
105+
plt.plot(self.history['lr'], self.history['loss'])
106+
plt.xscale('log')
107+
108+
109+
class LR_Cycle(LR_Updater):
110+
'''This callback is utilized to implement cyclical learning rates
111+
it is based on this pytorch implementation https://github.com/fastai/fastai/blob/master/fastai
112+
and adopted from this keras implementation https://github.com/bckenstler/CLR
113+
'''
114+
115+
def __init__(self, iterations, cycle_mult=1):
116+
'''
117+
iterations = dataset size / batch size
118+
iterations = number of iterations in one annealing cycle
119+
cycle_mult = used to increase the cycle length cycle_mult times after every cycle
120+
for example: cycle_mult = 2 doubles the length of the cycle at the end of each cy$
121+
'''
122+
self.min_lr = 0
123+
self.cycle_mult = cycle_mult
124+
self.cycle_iterations = 0.
125+
super().__init__(iterations)
126+
127+
def setRate(self):
128+
self.cycle_iterations += 1
129+
if self.cycle_iterations == self.epoch_iterations:
130+
print(self.epoch_iterations, 'change')
131+
self.cycle_iterations = 0.
132+
self.epoch_iterations *= self.cycle_mult
133+
cos_out = np.cos(np.pi * (self.cycle_iterations) / self.epoch_iterations) + 1
134+
if (self.cycle_iterations % 10) == 0:
135+
print(self.max_lr / 2 * cos_out)
136+
return self.max_lr / 2 * cos_out
137+
138+
def on_train_begin(self, logs={}):
139+
super().on_train_begin(logs={}) # changed to {} to fix plots after going from 1 to mult. lr
140+
self.cycle_iterations = 0.
141+
self.max_lr = K.get_value(self.model.optimizer.lr)
142+
143+
144+
# HAPPY
145+
class DFR_model1:################################33
146+
def __init__(self, epochs=100000, batch_size=512, load_weights=True):############################3 2 512
147+
self.name = 'DFR_log'
148+
self.model_filename = './DFR_log.h5'
149+
self.num_classes = 2######################################################################3
150+
self.input_shape = [28, 28, 1]
151+
self.epochs = epochs #
152+
self.batch_size = batch_size #
153+
self.weight_decay = 0.0001
154+
self.log_filepath = r'./DFR_log_tensorboard/'
155+
self.conv_l1_regularizer = 0.00045 # # #################################### 0.00045 3
156+
# self.lstm_l1_regularizer = 0.0003 #
157+
self.start_lr = 0.001 ###############adam
158+
self.end_lr = 0.0001 #bunengtaidi 0.0001
159+
self.patience = 50 #50
160+
self.epoch_1 = 1
161+
self.epoch_2 = 2
162+
self.epoch_3 = 3
163+
self.lr_1 = 0.001
164+
self.lr_2 = 0.001
165+
self.lr_3 = 0.001 # 0.55 0.5 0.475 0.04625 0.45 0. 4375 0.4
166+
167+
if load_weights:
168+
try:
169+
self._model = load_model(self.model_filename)
170+
print('Successfully loaded', self.name)
171+
except (ImportError, ValueError, OSError) as e:
172+
print(e)
173+
print('Failed to load', self.name)
174+
175+
def count_params(self):
176+
return self._model.count_params()
177+
178+
def build_model(self):
179+
# self.batch_size = self.batch_size * strategy.num_replicas_in_sync
180+
# with strategy.scope():
181+
model = Sequential([
182+
183+
# # FLATTEN Finishedsparse_
184+
Reshape((-1, 784, 1), input_shape=self.input_shape),
185+
186+
# # CONV 1 Finished
187+
Conv2D(32, (1, 25,), padding='SAME', strides=[1, 1, ],
188+
kernel_initializer=initializers.random_normal(stddev=0.1),
189+
kernel_regularizer=l1(self.conv_l1_regularizer)),
190+
# BatchNormalization(),
191+
# Dropout(0.5),
192+
Activation('relu'),
193+
MaxPooling2D((1, 3), strides=(1, 3), padding='SAME'),
194+
195+
# # CONV 2 Finished
196+
Conv2D(64, (1, 25,), padding='SAME', strides=[1, 1, ],
197+
kernel_initializer=initializers.random_normal(stddev=0.1),
198+
kernel_regularizer=l1(self.conv_l1_regularizer)),
199+
# BatchNormalization(),
200+
# Dropout(0.5),
201+
Activation('relu'),
202+
MaxPooling2D((1, 3), strides=(1, 3), padding='SAME'),
203+
204+
# # DENSE 1 / Dropout Finished
205+
Flatten(),
206+
Dense(1024, activation='relu', kernel_initializer=initializers.random_normal(stddev=0.1)),
207+
BatchNormalization(),
208+
# Dropout(0.2),###################################################### 1 0.5
209+
Dense(2, activation='softmax', kernel_initializer=initializers.random_normal(stddev=0.1)),
210+
211+
])
212+
adam = optimizers.Adam(lr=self.start_lr, beta_1=0.9, beta_2=0.999, ) # 7.28增大10 times训练步长
213+
model.compile(optimizer=adam,
214+
loss='categorical_crossentropy',
215+
metrics=['accuracy'])
216+
217+
# sparse_
218+
return model
219+
220+
def scheduler(self, epoch):
221+
# print(epoch, '--------------------------')
222+
if epoch <= self.epoch_1:
223+
return self.lr_1
224+
if epoch <= self.epoch_2:
225+
return self.lr_2
226+
if epoch <= self.epoch_3:
227+
return self.lr_3
228+
return self.lr_3
229+
230+
def train(self):
231+
train_data_path = './data_dfr_log/train_data.npy'
232+
train_label_path = './data_dfr_log/train_label.npy'
233+
test_data_path = './data_dfr_log/test_data.npy'
234+
test_label_path = './data_dfr_log/test_label.npy'
235+
236+
train_data = np.load(train_data_path)
237+
train_label = np.load(train_label_path)
238+
test_data = np.load(test_data_path)
239+
test_label = np.load(test_label_path)
240+
241+
print('train_data的数量为:', train_data.shape)
242+
print('train_label的数量为:', train_label.shape)
243+
print('test_data的数量为:', test_data.shape)
244+
print('test_label的数量为:', test_label.shape)
245+
246+
train_data = train_data.reshape([-1, 28, 28, 1])
247+
# train_label = train_label.reshape([-1, 28, 28, 1])
248+
test_data = test_data.reshape([-1, 28, 28, 1])
249+
# test_label = test_label.reshape([-1, 28, 28, 1])
250+
251+
print('train_data的数量为:', train_data.shape)
252+
print('train_label的数量为:', train_label.shape)
253+
print('test_data的数量为:', test_data.shape)
254+
print('test_label的数量为:', test_label.shape)
255+
256+
# 数据归一化到【0:255】
257+
self.x_test = test_data.astype(int)
258+
self.x_train = train_data.astype(int)
259+
self.x_test = 2 * self.x_test
260+
self.x_train = 2 * self.x_train
261+
y_train = keras.utils.to_categorical(train_label, self.num_classes)
262+
y_test = keras.utils.to_categorical(test_label, self.num_classes)
263+
self.y_test = y_test.astype(int)
264+
self.y_train = y_train.astype(int)
265+
266+
# 模型
267+
model = self.build_model()
268+
model.summary()
269+
270+
# 参数文件夹保存
271+
mkdir(self.model_filename + 'date_' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
272+
273+
# 训练
274+
change_lr = LearningRateScheduler(self.scheduler)
275+
276+
checkpoint = ModelCheckpoint(
277+
self.model_filename + 'date_' + datetime.datetime.now().strftime(
278+
"%Y%m%d-%H%M%S") + '/' + 'epoch_' + '{epoch:02d}' + '_val_acc_' + '{val_acc:.4f}' + '.h5',
279+
monitor='val_acc',
280+
verbose=0,
281+
save_best_only=True,
282+
mode='auto',
283+
period=5)
284+
# plot_callback = PlotLearning()
285+
tb_cb = TensorBoard(
286+
log_dir=self.log_filepath + 'date_' + datetime.datetime.now().strftime(
287+
"%Y%m%d-%H%M%S") + '_conv_l1_' + str(self.conv_l1_regularizer) + '_lstm_l1_' + str(
288+
self.conv_l1_regularizer),
289+
histogram_freq=0)
290+
291+
# lr change
292+
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, verbose=1,
293+
patience=self.patience, min_lr=self.end_lr)
294+
295+
# SGDR_lr = LR_Cycle(5000, 2)
296+
cbks = [checkpoint, tb_cb, reduce_lr]
297+
print('Using real-time data augmentation.')
298+
model.fit(x=self.x_train, y=self.y_train,
299+
batch_size=self.batch_size,
300+
epochs=self.epochs,
301+
callbacks=cbks,
302+
verbose=2,
303+
validation_data=(self.x_test, self.y_test),
304+
)
305+
# save model
306+
model.save(self.model_filename + '.h5')
307+
308+
self._model = model
309+
310+
def predict(self, img):
311+
return self._model.predict(img, batch_size=self.batch_size)
312+
313+
def predict_one(self, img):
314+
return self.predict(img)[0]
315+
316+
def accuracy(self):
317+
return self._model.evaluate(self.x_test, self.y_test, verbose=0)[1]
318+
319+
320+
if __name__ == '__main__':
321+
DFR = DFR_model1()
322+
DFR.train()
323+
print(DFR.accuracy())
324+
325+
# best(val_acc:97): 0。003 0。001 0.000001/
326+
# goaled: 0.01 0.0005 0.0000001/0.003 0.003 0.000001/0。01 0。005 /0。003 0。001 0.000001/0.001 0.00013 0.0001
327+
# failed: 0.01 0.001 0.000001/

Readme.md

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
# Attack code
2+
3+
## The composition of the directoty
4+
5+
- 0_AEEA_dataset
6+
Our datasets are here including a log dataset,a traffic dataset.
7+
- 1_model_for_traffic
8+
This directory includes our pretrained models in traffic dataset. You can get the training process by the tensorboard.
9+
- 2_model_for_log
10+
This directory includes our pretrained models in traffic dataset. You can get the training process by the tensorboard.
11+
- 3_attack_code
12+
You can get all the training code in this directory.
13+
If you want to test your own model, you can add your model in the your_model_name.py and put your pretrained model here.
14+
You can also try different ways to attck models, such as random attack,differential evolution.
15+
It is convinient to try your models in different dataset.
16+
- 4_EVALUATION
17+
- 5_For_TEST_h5
18+
You can get the accurate attack resutls.
19+
20+
## How to attack your models
21+
22+
- You need to pass your args to attack_for_traffic.py to attack models. EN:You need to try your own models before you attack it.
23+
- Example: python model_name.py --model model_name --others
24+
25+
## Envirionment
26+
27+
tensorflow_1_13_gpu
28+
keras
29+
30+
## Some important tips
31+
32+
- You can write your own model that you want to attack in keras. And you need to follow the rules in the 3_attack_code/model_name.py.
33+
- I write some comments for attacking models in the 3_attack_code/attack_for_traffic.py.
34+
- I write some comments for model building in the 3_attack_code/happy.py.
35+
- You can find the specific implements of differential_evolution in the 3_attack_code/differential_evolution.py.
36+
37+
## GOOD LUCKY TO YOUR TRAVEL IN AI!

0 commit comments

Comments
 (0)