-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathmodel.py
More file actions
34 lines (25 loc) · 1.23 KB
/
model.py
File metadata and controls
34 lines (25 loc) · 1.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#encoding: utf-8
import tensorflow as tf
import numpy as np
from model_parts import *
fc1_hidden = 280
fc2_hidden = 300
def inference(inputs, n_dim, reuse=False, trainable=True):
# scope_name, inputs, shape, bias_shape=None, reuse=False, trainable=True
fc1_output = fc_tanh("fc1", inputs, [n_dim, fc1_hidden], [fc1_hidden], reuse, trainable)
fc2_output = fc_sigmmoid("fc2", fc1_output, [fc1_hidden, fc2_hidden], [fc2_hidden], reuse, trainable)
softmax_linear = fc_softmax("softmax", fc2_output, [fc2_hidden, FLAGS.num_classes], [FLAGS.num_classes], reuse, trainable)
return softmax_linear
def loss(logits, targets):
print("logits shape: %s" % logits.get_shape())
print("targets shape: %s" % targets.get_shape())
loss = tf.reduce_mean(-tf.reduce_sum(targets*tf.log(logits), reduction_indices=[1]))
#loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, targets))
return loss
def accuracy(logits, targets):
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(targets, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return accuracy
def train_op(loss):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(loss)
return optimizer