@@ -113,7 +113,7 @@ def __init__(self,
113
113
self .height = tf .constant (DataGen .IMAGE_HEIGHT , dtype = tf .int32 )
114
114
self .height_float = tf .constant (DataGen .IMAGE_HEIGHT , dtype = tf .float64 )
115
115
116
- self .img_pl = tf .placeholder (tf .string , name = 'input_image_as_bytes' )
116
+ self .img_pl = tf .compat . v1 . placeholder (tf .string , name = 'input_image_as_bytes' )
117
117
self .img_data = tf .cond (
118
118
tf .less (tf .rank (self .img_pl ), 1 ),
119
119
lambda : tf .expand_dims (self .img_pl , 0 ),
@@ -156,7 +156,7 @@ def __init__(self,
156
156
forward_only = self .forward_only ,
157
157
use_gru = use_gru )
158
158
159
- table = tf .contrib . lookup .MutableHashTable (
159
+ table = tf .lookup . experimental .MutableHashTable (
160
160
key_dtype = tf .int64 ,
161
161
value_dtype = tf .string ,
162
162
default_value = "" ,
@@ -226,12 +226,12 @@ def __init__(self,
226
226
self .updates = []
227
227
self .summaries_by_bucket = []
228
228
229
- params = tf .trainable_variables ()
230
- opt = tf .train .AdadeltaOptimizer (learning_rate = initial_learning_rate )
229
+ params = tf .compat . v1 . trainable_variables ()
230
+ opt = tf .compat . v1 . train .AdadeltaOptimizer (learning_rate = initial_learning_rate )
231
231
loss_op = self .attention_decoder_model .loss
232
232
233
233
if self .reg_val > 0 :
234
- reg_losses = tf .get_collection (tf .GraphKeys .REGULARIZATION_LOSSES )
234
+ reg_losses = tf .compat . v1 . get_collection (tf . compat . v1 .GraphKeys .REGULARIZATION_LOSSES )
235
235
logging .info ('Adding %s regularization losses' , len (reg_losses ))
236
236
logging .debug ('REGULARIZATION_LOSSES: %s' , reg_losses )
237
237
loss_op = self .reg_val * tf .reduce_sum (reg_losses ) + loss_op
@@ -242,14 +242,14 @@ def __init__(self,
242
242
243
243
# Summaries for loss, variables, gradients, gradient norms and total gradient norm.
244
244
summaries = [
245
- tf .summary .scalar ("loss" , loss_op ),
246
- tf .summary .scalar ("total_gradient_norm" , tf .global_norm (gradients ))
245
+ tf .compat . v1 . summary .scalar ("loss" , loss_op ),
246
+ tf .compat . v1 . summary .scalar ("total_gradient_norm" , tf . linalg .global_norm (gradients ))
247
247
]
248
- all_summaries = tf .summary .merge (summaries )
248
+ all_summaries = tf .compat . v1 . summary .merge (summaries )
249
249
self .summaries_by_bucket .append (all_summaries )
250
250
251
251
# update op - apply gradients
252
- update_ops = tf .get_collection (tf .GraphKeys .UPDATE_OPS )
252
+ update_ops = tf .compat . v1 . get_collection (tf . compat . v1 .GraphKeys .UPDATE_OPS )
253
253
with tf .control_dependencies (update_ops ):
254
254
self .updates .append (
255
255
opt .apply_gradients (
@@ -258,7 +258,7 @@ def __init__(self,
258
258
)
259
259
)
260
260
261
- self .saver_all = tf .train .Saver (tf .all_variables ())
261
+ self .saver_all = tf .compat . v1 . train .Saver (tf . compat . v1 .all_variables ())
262
262
self .checkpoint_path = os .path .join (self .model_dir , "model.ckpt" )
263
263
264
264
ckpt = tf .train .get_checkpoint_state (model_dir )
@@ -268,7 +268,7 @@ def __init__(self,
268
268
self .saver_all .restore (self .sess , ckpt .model_checkpoint_path )
269
269
else :
270
270
logging .info ("Created model with fresh parameters." )
271
- self .sess .run (tf .initialize_all_variables ())
271
+ self .sess .run (tf .compat . v1 . initialize_all_variables ())
272
272
273
273
def predict (self , image_file_data ):
274
274
input_feed = {}
@@ -370,7 +370,7 @@ def train(self, data_path, num_epoch):
370
370
loss = 0.0
371
371
current_step = 0
372
372
skipped_counter = 0
373
- writer = tf .summary .FileWriter (self .model_dir , self .sess .graph )
373
+ writer = tf .compat . v1 . summary .FileWriter (self .model_dir , self .sess .graph )
374
374
375
375
logging .info ('Starting the training process.' )
376
376
for batch in s_gen .gen (self .batch_size ):
@@ -498,18 +498,18 @@ def _prepare_image(self, image):
498
498
dims = tf .shape (img )
499
499
width = self .max_width
500
500
501
- max_width = tf .to_int32 (tf .ceil (tf .truediv (dims [1 ], dims [0 ]) * self .height_float ))
502
- max_height = tf .to_int32 (tf .ceil (tf .truediv (width , max_width ) * self .height_float ))
501
+ max_width = tf .cast (tf .math . ceil (tf .truediv (dims [1 ], dims [0 ]) * self .height_float ), dtype = tf . int32 )
502
+ max_height = tf .cast (tf .math . ceil (tf .truediv (width , max_width ) * self .height_float ), dtype = tf . int32 )
503
503
504
504
resized = tf .cond (
505
505
tf .greater_equal (width , max_width ),
506
506
lambda : tf .cond (
507
507
tf .less_equal (dims [0 ], self .height ),
508
- lambda : tf .to_float (img ),
509
- lambda : tf .image .resize_images (img , [self .height , max_width ],
508
+ lambda : tf .cast (img , dtype = tf . float32 ),
509
+ lambda : tf .image .resize (img , [self .height , max_width ],
510
510
method = tf .image .ResizeMethod .BICUBIC ),
511
511
),
512
- lambda : tf .image .resize_images (img , [max_height , width ],
512
+ lambda : tf .image .resize (img , [max_height , width ],
513
513
method = tf .image .ResizeMethod .BICUBIC )
514
514
)
515
515
0 commit comments