Skip to content
This repository was archived by the owner on Jan 1, 2021. It is now read-only.

Commit e99c50e

Browse files
committed
code for lecture 3. solutions uploaded after class
1 parent 48ac942 commit e99c50e

10 files changed

+712
-4
lines changed

examples/02_lazy_loading.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
""" Example of lazy vs normal loading
2-
Created by Chip Huyen (huyenn@stanford.edu)
2+
Created by Chip Huyen (chiphuyen@cs.stanford.edu)
33
CS20: "TensorFlow for Deep Learning Research"
44
cs20.stanford.edu
55
Lecture 02

examples/02_placeholder.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
""" Placeholder and feed_dict example
2-
Created by Chip Huyen (huyenn@stanford.edu)
2+
Created by Chip Huyen (chiphuyen@cs.stanford.edu)
33
CS20: "TensorFlow for Deep Learning Research"
44
cs20.stanford.edu
55
Lecture 02

examples/02_simple_tf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
""" Simple TensorFlow's ops
2-
Created by Chip Huyen (huyenn@stanford.edu)
2+
Created by Chip Huyen (chiphuyen@cs.stanford.edu)
33
CS20: "TensorFlow for Deep Learning Research"
44
cs20.stanford.edu
55
"""

examples/02_variables.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
""" Variable exmaples
2-
Created by Chip Huyen (huyenn@stanford.edu)
2+
Created by Chip Huyen (chiphuyen@cs.stanford.edu)
33
CS20: "TensorFlow for Deep Learning Research"
44
cs20.stanford.edu
55
Lecture 02

examples/03_linreg_dataset.py

+74
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
""" Solution for simple linear regression example using tf.data
2+
Created by Chip Huyen ([email protected])
3+
CS20: "TensorFlow for Deep Learning Research"
4+
cs20.stanford.edu
5+
Lecture 03
6+
"""
7+
import os
8+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9+
import time
10+
11+
import numpy as np
12+
import matplotlib.pyplot as plt
13+
import tensorflow as tf
14+
15+
import utils
16+
17+
DATA_FILE = 'data/birth_life_2010.txt'
18+
19+
# Step 1: read in the data
20+
data, n_samples = utils.read_birth_life_data(DATA_FILE)
21+
22+
# Step 2: create Dataset and iterator
23+
dataset = tf.data.Dataset.from_tensor_slices((data[:,0], data[:,1]))
24+
25+
iterator = dataset.make_initializable_iterator()
26+
X, Y = iterator.get_next()
27+
28+
# Step 3: create weight and bias, initialized to 0
29+
w = tf.get_variable('weights', initializer=tf.constant(0.0))
30+
b = tf.get_variable('bias', initializer=tf.constant(0.0))
31+
32+
# Step 4: build model to predict Y
33+
Y_predicted = X * w + b
34+
35+
# Step 5: use the square error as the loss function
36+
loss = tf.square(Y - Y_predicted, name='loss')
37+
# loss = utils.huber_loss(Y, Y_predicted)
38+
39+
# Step 6: using gradient descent with learning rate of 0.001 to minimize loss
40+
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
41+
42+
start = time.time()
43+
with tf.Session() as sess:
44+
# Step 7: initialize the necessary variables, in this case, w and b
45+
sess.run(tf.global_variables_initializer())
46+
writer = tf.summary.FileWriter('./graphs/linear_reg', sess.graph)
47+
48+
# Step 8: train the model for 100 epochs
49+
for i in range(100):
50+
sess.run(iterator.initializer) # initialize the iterator
51+
total_loss = 0
52+
try:
53+
while True:
54+
_, l = sess.run([optimizer, loss])
55+
total_loss += l
56+
except tf.errors.OutOfRangeError:
57+
pass
58+
59+
print('Epoch {0}: {1}'.format(i, total_loss/n_samples))
60+
61+
# close the writer when you're done using it
62+
writer.close()
63+
64+
# Step 9: output the values of w and b
65+
w_out, b_out = sess.run([w, b])
66+
print('w: %f, b: %f' %(w_out, b_out))
67+
print('Took: %f seconds' %(time.time() - start))
68+
69+
# plot the results
70+
plt.plot(data[:,0], data[:,1], 'bo', label='Real data')
71+
plt.plot(data[:,0], data[:,0] * w_out + b_out, 'r', label='Predicted data with squared error')
72+
# plt.plot(data[:,0], data[:,0] * (-5.883589) + 85.124306, 'g', label='Predicted data with Huber loss')
73+
plt.legend()
74+
plt.show()

examples/03_linreg_starter.py

+94
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
""" Starter code for simple linear regression example using placeholders
2+
Created by Chip Huyen ([email protected])
3+
CS20: "TensorFlow for Deep Learning Research"
4+
cs20.stanford.edu
5+
Lecture 03
6+
"""
7+
import os
8+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9+
import time
10+
11+
import numpy as np
12+
import matplotlib.pyplot as plt
13+
import tensorflow as tf
14+
15+
import utils
16+
17+
DATA_FILE = 'data/birth_life_2010.txt'
18+
19+
# Step 1: read in data from the .txt file
20+
data, n_samples = utils.read_birth_life_data(DATA_FILE)
21+
22+
# Step 2: create placeholders for X (birth rate) and Y (life expectancy)
23+
# Remember both X and Y are scalars with type float
24+
X, Y = None, None
25+
#############################
26+
########## TO DO ############
27+
#############################
28+
29+
# Step 3: create weight and bias, initialized to 0.0
30+
# Make sure to use tf.get_variable
31+
w, b = None, None
32+
#############################
33+
########## TO DO ############
34+
#############################
35+
36+
# Step 4: build model to predict Y
37+
# e.g. how would you derive at Y_predicted given X, w, and b
38+
Y_predicted = None
39+
#############################
40+
########## TO DO ############
41+
#############################
42+
43+
# Step 5: use the square error as the loss function
44+
loss = None
45+
#############################
46+
########## TO DO ############
47+
#############################
48+
49+
# Step 6: using gradient descent with learning rate of 0.001 to minimize loss
50+
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
51+
52+
start = time.time()
53+
54+
# Create a filewriter to write the model's graph to TensorBoard
55+
#############################
56+
########## TO DO ############
57+
#############################
58+
59+
with tf.Session() as sess:
60+
# Step 7: initialize the necessary variables, in this case, w and b
61+
#############################
62+
########## TO DO ############
63+
#############################
64+
65+
# Step 8: train the model for 100 epochs
66+
for i in range(100):
67+
total_loss = 0
68+
for x, y in data:
69+
# Execute train_op and get the value of loss.
70+
# Don't forget to feed in data for placeholders
71+
_, loss = ########## TO DO ############
72+
total_loss += loss
73+
74+
print('Epoch {0}: {1}'.format(i, total_loss/n_samples))
75+
76+
# close the writer when you're done using it
77+
#############################
78+
########## TO DO ############
79+
#############################
80+
writer.close()
81+
82+
# Step 9: output the values of w and b
83+
w_out, b_out = None, None
84+
#############################
85+
########## TO DO ############
86+
#############################
87+
88+
print('Took: %f seconds' %(time.time() - start))
89+
90+
# uncomment the following lines to see the plot
91+
# plt.plot(data[:,0], data[:,1], 'bo', label='Real data')
92+
# plt.plot(data[:,0], data[:,0] * w_out + b_out, 'r', label='Predicted data')
93+
# plt.legend()
94+
# plt.show()

examples/03_logreg_placeholder.py

+93
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
""" Solution for simple logistic regression model for MNIST
2+
with placeholder
3+
MNIST dataset: yann.lecun.com/exdb/mnist/
4+
Created by Chip Huyen ([email protected])
5+
CS20: "TensorFlow for Deep Learning Research"
6+
cs20.stanford.edu
7+
Lecture 03
8+
"""
9+
import os
10+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
11+
12+
import numpy as np
13+
import tensorflow as tf
14+
from tensorflow.examples.tutorials.mnist import input_data
15+
import time
16+
17+
import utils
18+
19+
# Define paramaters for the model
20+
learning_rate = 0.01
21+
batch_size = 128
22+
n_epochs = 30
23+
24+
# Step 1: Read in data
25+
# using TF Learn's built in function to load MNIST data to the folder data/mnist
26+
mnist = input_data.read_data_sets('data/mnist', one_hot=True)
27+
X_batch, Y_batch = mnist.train.next_batch(batch_size)
28+
29+
# Step 2: create placeholders for features and labels
30+
# each image in the MNIST data is of shape 28*28 = 784
31+
# therefore, each image is represented with a 1x784 tensor
32+
# there are 10 classes for each image, corresponding to digits 0 - 9.
33+
# each lable is one hot vector.
34+
X = tf.placeholder(tf.float32, [batch_size, 784], name='image')
35+
Y = tf.placeholder(tf.int32, [batch_size, 10], name='label')
36+
37+
# Step 3: create weights and bias
38+
# w is initialized to random variables with mean of 0, stddev of 0.01
39+
# b is initialized to 0
40+
# shape of w depends on the dimension of X and Y so that Y = tf.matmul(X, w)
41+
# shape of b depends on Y
42+
w = tf.get_variable(name='weights', shape=(784, 10), initializer=tf.random_normal_initializer())
43+
b = tf.get_variable(name='bias', shape=(1, 10), initializer=tf.zeros_initializer())
44+
45+
# Step 4: build model
46+
# the model that returns the logits.
47+
# this logits will be later passed through softmax layer
48+
logits = tf.matmul(X, w) + b
49+
50+
# Step 5: define loss function
51+
# use cross entropy of softmax of logits as the loss function
52+
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name='loss')
53+
loss = tf.reduce_mean(entropy) # computes the mean over all the examples in the batch
54+
# loss = tf.reduce_mean(-tf.reduce_sum(tf.nn.softmax(logits) * tf.log(Y), reduction_indices=[1]))
55+
56+
# Step 6: define training op
57+
# using gradient descent with learning rate of 0.01 to minimize loss
58+
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
59+
60+
# Step 7: calculate accuracy with test set
61+
preds = tf.nn.softmax(logits)
62+
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1))
63+
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
64+
65+
writer = tf.summary.FileWriter('./graphs/logreg_placeholder', tf.get_default_graph())
66+
with tf.Session() as sess:
67+
start_time = time.time()
68+
sess.run(tf.global_variables_initializer())
69+
n_batches = int(mnist.train.num_examples/batch_size)
70+
71+
# train the model n_epochs times
72+
for i in range(n_epochs):
73+
total_loss = 0
74+
75+
for j in range(n_batches):
76+
X_batch, Y_batch = mnist.train.next_batch(batch_size)
77+
_, loss_batch = sess.run([optimizer, loss], {X: X_batch, Y:Y_batch})
78+
total_loss += loss_batch
79+
print('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))
80+
print('Total time: {0} seconds'.format(time.time() - start_time))
81+
82+
# test the model
83+
n_batches = int(mnist.test.num_examples/batch_size)
84+
total_correct_preds = 0
85+
86+
for i in range(n_batches):
87+
X_batch, Y_batch = mnist.test.next_batch(batch_size)
88+
accuracy_batch = sess.run(accuracy, {X: X_batch, Y:Y_batch})
89+
total_correct_preds += accuracy_batch
90+
91+
print('Accuracy {0}'.format(total_correct_preds/mnist.test.num_examples))
92+
93+
writer.close()

0 commit comments

Comments
 (0)