|
11 | 11 | "import tensorflow as tf\n",
|
12 | 12 | "import numpy as np\n",
|
13 | 13 | "import matplotlib.pyplot as plt\n",
|
14 |
| - "%matplotlib inline" |
| 14 | + "%matplotlib inline\n", |
| 15 | + "tf.reset_default_graph()" |
15 | 16 | ]
|
16 | 17 | },
|
17 | 18 | {
|
18 | 19 | "cell_type": "code",
|
19 | 20 | "execution_count": 2,
|
20 |
| - "metadata": { |
21 |
| - "collapsed": false |
22 |
| - }, |
| 21 | + "metadata": {}, |
23 | 22 | "outputs": [],
|
24 | 23 | "source": [
|
25 | 24 | "class FNN(object):\n",
|
|
86 | 85 | " def variable_summaries(self, var, name):\n",
|
87 | 86 | " with tf.name_scope(name+'_summaries'):\n",
|
88 | 87 | " mean = tf.reduce_mean(var)\n",
|
89 |
| - " tf.scalar_summary('mean/' + name, mean)\n", |
| 88 | + " tf.summary.scalar('mean/' + name, mean)\n", |
90 | 89 | " with tf.name_scope(name+'_stddev'):\n",
|
91 | 90 | " stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n",
|
92 | 91 | " # 记录每次训练后变量的数值变化\n",
|
93 |
| - " tf.scalar_summary('_stddev/' + name, stddev)\n", |
94 |
| - " tf.scalar_summary('_max/' + name, tf.reduce_max(var))\n", |
95 |
| - " tf.scalar_summary('_min/' + name, tf.reduce_min(var))\n", |
96 |
| - " tf.histogram_summary(name, var)\n", |
| 92 | + " tf.summary.scalar('_stddev/' + name, stddev)\n", |
| 93 | + " tf.summary.scalar_('_max/' + name, tf.reduce_max(var))\n", |
| 94 | + " tf.summary.scalar_('_min/' + name, tf.reduce_min(var))\n", |
| 95 | + " tf.summary.histogram(name, var)\n", |
97 | 96 | "\n",
|
98 | 97 | " def layer(self,in_tensor, in_dim, out_dim, layer_name, act=tf.nn.relu):\n",
|
99 | 98 | " with tf.name_scope(layer_name):\n",
|
|
113 | 112 | " # 计算Wx+b\n",
|
114 | 113 | " pre_activate = tf.matmul(in_tensor, weights) + biases\n",
|
115 | 114 | " # 记录直方图\n",
|
116 |
| - " tf.histogram_summary(layer_name + '/pre_activations', pre_activate)\n", |
| 115 | + " tf.summary.histogram(layer_name + '/pre_activations', pre_activate)\n", |
117 | 116 | " # 计算a(Wx+b)\n",
|
118 | 117 | " activations = act(pre_activate, name='activation')\n",
|
119 |
| - " tf.histogram_summary(layer_name + '/activations', activations)\n", |
| 118 | + " tf.summary.histogram(layer_name + '/activations', activations)\n", |
120 | 119 | " # 最终返回该层的输出,以及权重W的L2\n",
|
121 | 120 | " return activations, tf.nn.l2_loss(weights)\n",
|
122 | 121 | "\n",
|
|
174 | 173 | " with tf.name_scope('total_l2'):\n",
|
175 | 174 | " for l2 in self.total_l2:\n",
|
176 | 175 | " self.l2_penalty+=l2\n",
|
177 |
| - " tf.scalar_summary('l2_penalty', self.l2_penalty)\n", |
| 176 | + " tf.summary.scalar('l2_penalty', self.l2_penalty)\n", |
178 | 177 | " \n",
|
179 | 178 | " # 不同任务的loss\n",
|
180 | 179 | " # 若为回归,则loss是用于判断所有预测值和实际值差别的函数。\n",
|
|
183 | 182 | " self.loss=tf.reduce_mean((self.output-self.labels)**2)\n",
|
184 | 183 | " self.loss2=tf.nn.l2_loss(self.output-self.labels)\n",
|
185 | 184 | " \n",
|
186 |
| - " tf.scalar_summary('loss', self.loss)\n", |
| 185 | + " tf.summary.scalar('loss', self.loss)\n", |
187 | 186 | " else:\n",
|
188 | 187 | " # 若为分类,cross entropy的loss function\n",
|
189 | 188 | " entropy = tf.nn.softmax_cross_entropy_with_logits(self.output, self.labels)\n",
|
190 | 189 | " with tf.name_scope('cross entropy'):\n",
|
191 | 190 | " self.loss = tf.reduce_mean(entropy)\n",
|
192 |
| - " tf.scalar_summary('loss', self.loss)\n", |
| 191 | + " tf.summary.scalar('loss', self.loss)\n", |
193 | 192 | " with tf.name_scope('accuracy'):\n",
|
194 | 193 | " correct_prediction = tf.equal(tf.argmax(self.output, 1), tf.argmax(self.labels, 1))\n",
|
195 | 194 | " self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
|
196 |
| - " tf.scalar_summary('accuracy', self.accuracy)\n", |
| 195 | + " tf.summary.scalar('accuracy', self.accuracy)\n", |
197 | 196 | " \n",
|
198 | 197 | " # 整合所有loss,形成最终loss\n",
|
199 | 198 | " with tf.name_scope('total_loss'):\n",
|
200 | 199 | " self.total_loss=self.loss + self.l2_penalty*self.L2_lambda\n",
|
201 |
| - " tf.scalar_summary('total_loss', self.total_loss)\n", |
| 200 | + " tf.summary.scalar('total_loss', self.total_loss)\n", |
202 | 201 | " \n",
|
203 | 202 | " # 训练操作\n",
|
204 | 203 | " with tf.name_scope('train'):\n",
|
|
263 | 262 | {
|
264 | 263 | "cell_type": "code",
|
265 | 264 | "execution_count": 5,
|
266 |
| - "metadata": { |
267 |
| - "collapsed": false |
268 |
| - }, |
| 265 | + "metadata": {}, |
269 | 266 | "outputs": [
|
270 | 267 | {
|
271 | 268 | "name": "stdout",
|
|
282 | 279 | {
|
283 | 280 | "cell_type": "code",
|
284 | 281 | "execution_count": 6,
|
285 |
| - "metadata": { |
286 |
| - "collapsed": false |
287 |
| - }, |
| 282 | + "metadata": {}, |
288 | 283 | "outputs": [
|
289 | 284 | {
|
290 | 285 | "name": "stdout",
|
|
319 | 314 | "outputs": [],
|
320 | 315 | "source": [
|
321 | 316 | "sess = tf.InteractiveSession()\n",
|
322 |
| - "tf.initialize_all_variables().run()\n", |
323 |
| - "merged = tf.merge_all_summaries()\n", |
324 |
| - "train_writer = tf.train.SummaryWriter('log3' + '/train',sess.graph)\n", |
325 |
| - "test_writer = tf.train.SummaryWriter('log3' + '/test')" |
| 317 | + "sess.run(tf.global_variables_initializer())\n", |
| 318 | + "merged = tf.summary.merge_all()\n", |
| 319 | + "train_writer = tf.summary.FileWriter('log3' + '/train',sess.graph)\n", |
| 320 | + "test_writer = tf.summary.FileWriter('log3' + '/test')" |
326 | 321 | ]
|
327 | 322 | },
|
328 | 323 | {
|
329 | 324 | "cell_type": "code",
|
330 | 325 | "execution_count": 8,
|
331 |
| - "metadata": { |
332 |
| - "collapsed": false |
333 |
| - }, |
| 326 | + "metadata": {}, |
334 | 327 | "outputs": [],
|
335 | 328 | "source": [
|
336 | 329 | "def plots(T,P,i, n=21,length=400):\n",
|
|
360 | 353 | "cell_type": "code",
|
361 | 354 | "execution_count": 9,
|
362 | 355 | "metadata": {
|
363 |
| - "collapsed": false, |
364 | 356 | "scrolled": true
|
365 | 357 | },
|
366 | 358 | "outputs": [
|
|
439 | 431 | " test_writer.add_summary(summary, k)\n",
|
440 | 432 | " print('epoch%s | train_loss:%s |test_loss:%s' %(i,sess.run(ff.loss,feed_dict={ff.inputs:X0,ff.labels:Y0,ff.drop_keep_rate:1.0}),sess.run(ff.loss,feed_dict={ff.inputs:X_test,ff.labels:Y_test,ff.drop_keep_rate:1.0})))"
|
441 | 433 | ]
|
| 434 | + }, |
| 435 | + { |
| 436 | + "cell_type": "code", |
| 437 | + "execution_count": null, |
| 438 | + "metadata": {}, |
| 439 | + "outputs": [], |
| 440 | + "source": [ |
| 441 | + "#用完关闭session\n", |
| 442 | + "sess.close()" |
| 443 | + ] |
442 | 444 | }
|
443 | 445 | ],
|
444 | 446 | "metadata": {
|
|
457 | 459 | "name": "python",
|
458 | 460 | "nbconvert_exporter": "python",
|
459 | 461 | "pygments_lexer": "ipython2",
|
460 |
| - "version": "2.7.12" |
| 462 | + "version": "2.7.14" |
461 | 463 | }
|
462 | 464 | },
|
463 | 465 | "nbformat": 4,
|
|
0 commit comments