|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | +""" |
| 3 | +Created on Thu Sep 5 20:18:22 2019 |
| 4 | +
|
| 5 | +@author: 75129 |
| 6 | +""" |
| 7 | + |
| 8 | +# Copyright (c) Microsoft Corporation |
| 9 | +# All rights reserved. |
| 10 | +# |
| 11 | +# MIT License |
| 12 | +# |
| 13 | +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated |
| 14 | +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation |
| 15 | +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and |
| 16 | +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: |
| 17 | +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. |
| 18 | +# |
| 19 | +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING |
| 20 | +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 21 | +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, |
| 22 | +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 23 | +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 24 | +import os |
| 25 | +import argparse |
| 26 | +#import keras |
| 27 | +import numpy as np |
| 28 | +#from keras import backend as K |
| 29 | +from keras.models import Model |
| 30 | +from keras.callbacks import TensorBoard |
| 31 | +from keras.datasets import mnist |
| 32 | +from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D,BatchNormalization,Dropout,Input,merge,SeparableConv2D,DepthwiseConv2D,Conv3D,MaxPooling3D |
| 33 | +from keras.models import Sequential,Model |
| 34 | +from sklearn.metrics import roc_auc_score |
| 35 | +import keras.optimizers as op |
| 36 | +import keras.losses as losses |
| 37 | +from keras.callbacks import Callback |
| 38 | +from keras.backend import concatenate |
| 39 | +import nni |
| 40 | +import sys |
| 41 | +sys.path.append('D:/myGIT/Landslide-Susceptibility') |
| 42 | +import data_read |
| 43 | +import pandas as pd |
| 44 | + |
| 45 | +# np.random.seed(33) /3d |
| 46 | +np.random.seed(27) |
| 47 | + |
| 48 | +#K.set_image_data_format('channels_last') |
| 49 | + |
| 50 | +H, W = 40, 40 |
| 51 | +CHANNELS=16 |
| 52 | +NUM_CLASSES = 2 |
| 53 | + |
| 54 | + |
| 55 | +def create_3d_model(inputs,hyper_params): |
| 56 | + # inputs=Input(shape=(40,40,16)) |
| 57 | + x=BatchNormalization()(inputs) |
| 58 | + x=DepthwiseConv2D(kernel_size=(3,3),activation='relu')(x) |
| 59 | + x=MaxPooling2D(pool_size=(3,3))(x) |
| 60 | + x=DepthwiseConv2D(kernel_size=(3,3),activation='relu')(x) |
| 61 | + x=MaxPooling2D(pool_size=(3,3))(x) |
| 62 | + x=DepthwiseConv2D(kernel_size=(3,3),activation='relu')(x) |
| 63 | + x=Flatten()(x) |
| 64 | + return x |
| 65 | + |
| 66 | +def create_mixture_model(hyper_params): |
| 67 | + inputs_3d=Input(shape=(40,40,16)) |
| 68 | + x_3d=create_3d_model(inputs_3d,hyper_params) |
| 69 | + x_1d=Input(shape=(4,)) |
| 70 | + mixtured=merge.concatenate([x_3d,x_1d]) |
| 71 | + z=BatchNormalization()(mixtured) |
| 72 | + z=Dense(np.int32(hyper_params['dense_size']),activation='relu')(z) |
| 73 | + z=Dropout(hyper_params['Dropout_rate'])(z) |
| 74 | + z=Dense(2,activation='softmax')(z) |
| 75 | + model=Model(inputs=[inputs_3d,x_1d],outputs=z) |
| 76 | + if hyper_params['optimizer'] == 'Adam': |
| 77 | + optimizer = op.Adam(lr=hyper_params['learning_rate']) |
| 78 | + else: |
| 79 | + optimizer = op.SGD(lr=hyper_params['learning_rate'], momentum=0.9) |
| 80 | + model.compile(loss=losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) |
| 81 | + model.summary() |
| 82 | + return model |
| 83 | + |
| 84 | +def create_Only3D_model(hyper_params): |
| 85 | + inputs_3d=Input(shape=(40,40,16)) |
| 86 | + x_3d=create_3d_model(inputs_3d,hyper_params) |
| 87 | + # x_1d=Input(shape=(20,)) |
| 88 | + # mixtured=merge.concatenate([x_3d,x_1d]) |
| 89 | + z=BatchNormalization()(x_3d) |
| 90 | + z=Dense(np.int32(hyper_params['dense_size']),activation='relu')(z) |
| 91 | + z=Dropout(hyper_params['Dropout_rate'])(z) |
| 92 | + z=Dense(2,activation='softmax')(z) |
| 93 | + model=Model(inputs=inputs_3d,outputs=z) |
| 94 | + if hyper_params['optimizer'] == 'Adam': |
| 95 | + optimizer = op.Adam(lr=hyper_params['learning_rate']) |
| 96 | + else: |
| 97 | + optimizer = op.SGD(lr=hyper_params['learning_rate'], momentum=0.9) |
| 98 | + model.compile(loss=losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) |
| 99 | + model.summary() |
| 100 | + return model |
| 101 | + |
| 102 | +def create_Only1D_model(hyper_params): |
| 103 | + # inputs_3d=Input(shape=(40,40,16)) |
| 104 | + # x_3d=create_3d_model(inputs_3d,hyper_params) |
| 105 | + x_1d=Input(shape=(16,)) |
| 106 | + # mixtured=merge.concatenate([x_3d,x_1d]) |
| 107 | + z=BatchNormalization()(x_1d) |
| 108 | + z=Dense(np.int32(hyper_params['dense_size']),activation='relu')(z) |
| 109 | + z=Dropout(hyper_params['Dropout_rate'])(z) |
| 110 | + z=Dense(2,activation='softmax')(z) |
| 111 | + model=Model(inputs=x_1d,outputs=z) |
| 112 | + if hyper_params['optimizer'] == 'Adam': |
| 113 | + optimizer = op.Adam(lr=hyper_params['learning_rate']) |
| 114 | + else: |
| 115 | + optimizer = op.SGD(lr=hyper_params['learning_rate'], momentum=0.9) |
| 116 | + model.compile(loss=losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) |
| 117 | + model.summary() |
| 118 | + return model |
| 119 | + |
| 120 | +def get_intermediate_output(model,output_layer,input_data): |
| 121 | + model_new=Model(inputs=model.input,outputs=model.get_layer(output_layer).output) |
| 122 | + output=model_new.predict(input_data) |
| 123 | + return output |
| 124 | + |
| 125 | + |
| 126 | +class SendMetrics(Callback): |
| 127 | + ''' |
| 128 | + Keras callback to send metrics to NNI framework |
| 129 | + ''' |
| 130 | + def __init__(self, validation_data=()): |
| 131 | + super(Callback, self).__init__() |
| 132 | + self.x_val,self.y_val = validation_data |
| 133 | + def on_epoch_end(self, epoch, logs={}): |
| 134 | + ''' |
| 135 | + Run on end of each epoch |
| 136 | + ''' |
| 137 | +# global test_x,test_y |
| 138 | + y_pred=self.model.predict(self.x_val, verbose=0) |
| 139 | + score = roc_auc_score(self.y_val[:,1], y_pred[:,1]) |
| 140 | + print(score) |
| 141 | + |
| 142 | + |
| 143 | +def get_aug_data(data_flip,data_rot,data_noise,aug_type): |
| 144 | + data_aug_generator=data_read.data_aug(None,None) |
| 145 | + if aug_type=="flip": |
| 146 | + return data_flip |
| 147 | + elif aug_type=="rot": |
| 148 | + return data_rot |
| 149 | + elif aug_type=="noise": |
| 150 | + return data_noise |
| 151 | + elif aug_type=="flip and rot": |
| 152 | + new_data0,new_data1=data_aug_generator.merge_data([data_flip[0],data_rot[0]], |
| 153 | + [data_flip[1],data_rot[1]]) |
| 154 | + return [new_data0,new_data1] |
| 155 | + elif aug_type=="flip and noise": |
| 156 | + new_data0,new_data1=data_aug_generator.merge_data([data_flip[0],data_noise[0]], |
| 157 | + [data_flip[1],data_noise[1]]) |
| 158 | + return [new_data0,new_data1] |
| 159 | + elif aug_type=="rot and noise": |
| 160 | + new_data0,new_data1=data_aug_generator.merge_data([data_rot[0],data_noise[0]], |
| 161 | + [data_rot[1],data_noise[1]]) |
| 162 | + return [new_data0,new_data1] |
| 163 | + elif aug_type=="all": |
| 164 | + new_data0,new_data1=data_aug_generator.merge_data([data_rot[0],data_noise[0],data_flip[0]], |
| 165 | + [data_rot[1],data_noise[1],data_flip[1]]) |
| 166 | + return [new_data0,new_data1] |
| 167 | + |
| 168 | +def train(args, params): |
| 169 | + ''' |
| 170 | + Train model |
| 171 | + ''' |
| 172 | + data=data_read.yongxin_data() |
| 173 | + train_x3d,train_y,test_x3d,test_y=data.get_train_data(tr_path='D:/myGIT/Landslide-Susceptibility/data/yongxin/tr_index.npy', |
| 174 | + tt_path='D:/myGIT/Landslide-Susceptibility./data/yongxin/tt_index.npy', |
| 175 | + data_type='3D') |
| 176 | + train_x1d,train_y,test_x1d,test_y=data.get_train_data(tr_path='D:/myGIT/Landslide-Susceptibility/data/yongxin/tr_index.npy', |
| 177 | + tt_path='D:/myGIT/Landslide-Susceptibility/data/yongxin/tt_index.npy', |
| 178 | + data_type='1D') |
| 179 | +# data_aug_generator=data_read.data_aug(train_x,train_y) |
| 180 | +# rot_x,rot_y=data_aug_generator.rotate(train_x,train_y) |
| 181 | +# flip_x,flip_y=data_aug_generator.flip(train_x,train_y) |
| 182 | +# noise_x,noise_y=data_aug_generator.add_noise(train_x,train_y,sigma=0.5) |
| 183 | +# aug_data=get_mixup_data(train_x,train_y,[flip_x,flip_y],[rot_x,rot_y],[noise_x,noise_y],params) |
| 184 | +# train_x,train_y=aug_data |
| 185 | + train_y=data_read.label_to_onehot(train_y) |
| 186 | + test_y=data_read.label_to_onehot(test_y) |
| 187 | + model = create_mixture_model(params) |
| 188 | + # train_x3d=np.expand_dims(train_x3d,axis=5) |
| 189 | + # test_x3d=np.expand_dims(test_x3d,axis=5) |
| 190 | + train_data=[train_x3d,train_x1d[:,-4:]] |
| 191 | + test_data=[test_x3d,test_x1d[:,-4:]] |
| 192 | + SendMetric=SendMetrics(validation_data=(test_data,test_y)) |
| 193 | + model.fit(train_data, train_y, epochs=args.epochs, verbose=1, |
| 194 | + validation_data=(test_data, test_y), callbacks=[SendMetric]) |
| 195 | + y_pred=model.predict(test_data) |
| 196 | + score = roc_auc_score(test_y[:,1], y_pred[:,1]) |
| 197 | +# _, acc = model.evaluate(x_test, y_test, verbose=0) |
| 198 | + print('Final result is: %d', score) |
| 199 | + all_pro=model.predict([data.x_3d,data.x_1d[:,-4:]]) |
| 200 | + df = pd.DataFrame(all_pro) |
| 201 | + df.to_csv('C:/Users/75129/Desktop/nni实验记录/proba_mixed.csv') |
| 202 | + extract_yinzi=get_intermediate_output(model,'flatten_1',[data.x_3d,data.x_1d[:,-4:]]) |
| 203 | + df = pd.DataFrame(extract_yinzi) |
| 204 | + df.to_csv('C:/Users/75129/Desktop/nni实验记录/cnn_extract_yinzi.csv') |
| 205 | + |
| 206 | + |
| 207 | +def generate_default_params(): |
| 208 | + ''' |
| 209 | + Generate default hyper parameters |
| 210 | + ''' |
| 211 | + |
| 212 | + |
| 213 | + |
| 214 | + parameters={"optimizer":"Adam","learning_rate":0.00025690063520965767,"Conv2D_1_number":99,"Conv2D_2_number":13,"dense_size":32,"Dropout_rate":0.7329247335728416} |
| 215 | + |
| 216 | + return parameters |
| 217 | + |
| 218 | +if __name__ == '__main__': |
| 219 | + PARSER = argparse.ArgumentParser() |
| 220 | + PARSER.add_argument("--batch_size", type=int, default=32, help="batch size", required=False) |
| 221 | + PARSER.add_argument("--epochs", type=int, default=100, help="Train epochs", required=False) |
| 222 | +# PARSER.add_argument("--num_train", type=int, default=60000, help="Number of train samples to be used, maximum 60000", required=False) |
| 223 | +# PARSER.add_argument("--num_test", type=int, default=10000, help="Number of test samples to be used, maximum 10000", required=False) |
| 224 | + |
| 225 | + ARGS, UNKNOWN = PARSER.parse_known_args() |
| 226 | + |
| 227 | + try: |
| 228 | + # get parameters from tuner |
| 229 | + PARAMS = generate_default_params() |
| 230 | + # train |
| 231 | + train(ARGS, PARAMS) |
| 232 | + except Exception as e: |
| 233 | + raise |
0 commit comments