Skip to content

Commit fea49c4

Browse files
authored
Merge pull request #44 from perhapszzy/master
add restfule client and clip the gradient.
2 parents 1eba5ce + 9fb9afe commit fea49c4

File tree

2 files changed

+68
-12
lines changed

2 files changed

+68
-12
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
# coding=utf-8
2+
# Copyright 2017 Caicloud authors. All Rights Reserved.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# ==============================================================================
16+
17+
from __future__ import print_function
18+
19+
import tensorflow as tf
20+
from caicloud.clever.serving.client import restful_client
21+
from caicloud.clever.serving.client import serving_error
22+
23+
make_ndarray = tf.contrib.util.make_ndarray
24+
25+
client = restful_client.RESTfulClient('192.168.16.42:31036')
26+
27+
def run():
28+
inputs = {
29+
'user': tf.contrib.util.make_tensor_proto([1], shape=[1]),
30+
'item': tf.contrib.util.make_tensor_proto([2], shape=[1]),
31+
}
32+
try:
33+
outputs = client.call_predict(inputs)
34+
result = outputs["infer"]
35+
print('score: {0}'.format(make_ndarray(result)[0][0]))
36+
except serving_error.ServingRESTfulError as e:
37+
print('serving error,\n status: {0},\n reason: {1},\n body: {2}'.format(
38+
e.status, e.reason, e.body))
39+
40+
if __name__ == '__main__':
41+
run()

caicloud.tensorflow/caicloud/clever/examples/recommandation/train.py

+27-12
Original file line numberDiff line numberDiff line change
@@ -5,24 +5,26 @@
55
import numpy as np
66
import tensorflow as tf
77
import pandas as pd
8+
import os
89

910
from caicloud.clever.tensorflow import dist_base
1011
from caicloud.clever.tensorflow import model_exporter
1112

12-
tf.app.flags.DEFINE_string("export_dir",
13-
"/tmp/saved_model/movie",
14-
"model export directory path.")
15-
16-
tf.app.flags.DEFINE_string("batch_size", 128, "training batch size.")
17-
tf.app.flags.DEFINE_string("embedding_dim", 50, "embedding dimension.")
13+
tf.app.flags.DEFINE_string("export_dir", "/tmp/saved_model/movie", "model export directory path.")
14+
tf.app.flags.DEFINE_string("data_dir", "/caicloud/admin/hengfengPOC/data", "path where data is located.")
1815

16+
tf.app.flags.DEFINE_integer("batch_size", 128, "training batch size.")
17+
tf.app.flags.DEFINE_integer("embedding_dim", 50, "embedding dimension.")
18+
tf.app.flags.DEFINE_float("learning_rate", 0.01, "learning rate.")
1919
FLAGS = tf.app.flags.FLAGS
20+
2021
USER_NUM = 6040
2122
ITEM_NUM = 3952
2223

2324
def get_data():
2425
col_names = ["user", "item", "rate", "st"]
25-
df = pd.read_csv("/tmp/movielens/ml-1m/ratings.dat", sep="::", header=None, names=col_names, engine='python')
26+
datafile = os.path.join(FLAGS.data_dir, "ml-1m/ratings.dat")
27+
df = pd.read_csv(datafile, sep="::", header=None, names=col_names, engine='python')
2628

2729
df["user"] -= 1
2830
df["item"] -= 1
@@ -96,9 +98,19 @@ def model_fn(sync, num_replicas):
9698
_global_step = tf.contrib.framework.get_or_create_global_step()
9799

98100
_cost = tf.square(_infer - _rate_batch)
99-
optimizer = tf.train.AdamOptimizer(0.001)
100-
_train_op = optimizer.minimize(_cost, global_step=_global_step)
101-
101+
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
102+
103+
if sync:
104+
optimizer = tf.train.SyncReplicasOptimizer(
105+
optimizer,
106+
replicas_to_aggregate=num_replicas,
107+
total_num_replicas=num_replicas,
108+
name="mnist_sync_replicas")
109+
110+
gradients, variables = zip(*optimizer.compute_gradients(_cost))
111+
gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
112+
_train_op = optimizer.apply_gradients(zip(gradients, variables), global_step=_global_step)
113+
102114
_rmse = tf.sqrt(tf.reduce_mean(_cost))
103115

104116
def rmse_evalute_fn(session):
@@ -129,8 +141,11 @@ def train_fn(session, num_global_step):
129141
users, items, rates = next(_iter_train)
130142
session.run(_train_op, feed_dict={_user_batch: users, _item_batch: items, _rate_batch: rates})
131143

132-
if _local_step % 2000 == 0:
133-
rmse, infer, cost = session.run([_rmse, _infer, _cost], feed_dict={_user_batch: _test["user"], _item_batch: _test["item"], _rate_batch: _test["rate"]})
144+
if _local_step % 200 == 0:
145+
rmse, infer, cost = session.run(
146+
[_rmse, _infer, _cost],
147+
feed_dict={_user_batch: _test["user"], _item_batch: _test["item"], _rate_batch: _test["rate"]})
148+
134149
print("Eval RMSE at round {} is: {}".format(num_global_step, rmse))
135150

136151
_local_step += 1

0 commit comments

Comments
 (0)