Skip to content
This repository was archived by the owner on Jan 1, 2021. It is now read-only.

Commit b7f91f3

Browse files
committed
assignment 1
1 parent e99c50e commit b7f91f3

File tree

3 files changed

+167
-1
lines changed

3 files changed

+167
-1
lines changed

2017/assignments/exercises/e01.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,13 @@
11
"""
2-
Simple TensorFlow exercises
2+
Simple exercises to get used to TensorFlow API
33
You should thoroughly test your code
44
"""
5+
import os
6+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
57

68
import tensorflow as tf
79

10+
sess = tf.InteractiveSession()
811
###############################################################################
912
# 1a: Create two random 0-d tensors x and y of any distribution.
1013
# Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
@@ -15,6 +18,7 @@
1518
x = tf.random_uniform([]) # Empty array as shape creates a scalar.
1619
y = tf.random_uniform([])
1720
out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))
21+
print(sess.run(out))
1822

1923
###############################################################################
2024
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).

assignments/exercises/q1.py

+93
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
"""
2+
Simple exercises to get used to TensorFlow API
3+
You should thoroughly test your code.
4+
TensorFlow's official documentation should be your best friend here
5+
CS20: "TensorFlow for Deep Learning Research"
6+
cs20.stanford.edu
7+
Created by Chip Huyen ([email protected])
8+
"""
9+
import os
10+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
11+
12+
import tensorflow as tf
13+
14+
sess = tf.InteractiveSession()
15+
###############################################################################
16+
# 1a: Create two random 0-d tensors x and y of any distribution.
17+
# Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
18+
# Hint: look up tf.cond()
19+
# I do the first problem for you
20+
###############################################################################
21+
22+
x = tf.random_uniform([]) # Empty array as shape creates a scalar.
23+
y = tf.random_uniform([])
24+
out = tf.cond(tf.greater(x, y), lambda: x + y, lambda: x - y)
25+
print(sess.run(out))
26+
27+
###############################################################################
28+
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
29+
# Return x + y if x < y, x - y if x > y, 0 otherwise.
30+
# Hint: Look up tf.case().
31+
###############################################################################
32+
33+
# YOUR CODE
34+
35+
###############################################################################
36+
# 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]]
37+
# and y as a tensor of zeros with the same shape as x.
38+
# Return a boolean tensor that yields Trues if x equals y element-wise.
39+
# Hint: Look up tf.equal().
40+
###############################################################################
41+
42+
# YOUR CODE
43+
44+
###############################################################################
45+
# 1d: Create the tensor x of value
46+
# [29.05088806, 27.61298943, 31.19073486, 29.35532951,
47+
# 30.97266006, 26.67541885, 38.08450317, 20.74983215,
48+
# 34.94445419, 34.45999146, 29.06485367, 36.01657104,
49+
# 27.88236427, 20.56035233, 30.20379066, 29.51215172,
50+
# 33.71149445, 28.59134293, 36.05556488, 28.66994858].
51+
# Get the indices of elements in x whose values are greater than 30.
52+
# Hint: Use tf.where().
53+
# Then extract elements whose values are greater than 30.
54+
# Hint: Use tf.gather().
55+
###############################################################################
56+
57+
# YOUR CODE
58+
59+
###############################################################################
60+
# 1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,
61+
# 2, ..., 6
62+
# Hint: Use tf.range() and tf.diag().
63+
###############################################################################
64+
65+
# YOUR CODE
66+
67+
###############################################################################
68+
# 1f: Create a random 2-d tensor of size 10 x 10 from any distribution.
69+
# Calculate its determinant.
70+
# Hint: Look at tf.matrix_determinant().
71+
###############################################################################
72+
73+
# YOUR CODE
74+
75+
###############################################################################
76+
# 1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].
77+
# Return the unique elements in x
78+
# Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.
79+
###############################################################################
80+
81+
# YOUR CODE
82+
83+
###############################################################################
84+
# 1h: Create two tensors x and y of shape 300 from any normal distribution,
85+
# as long as they are from the same distribution.
86+
# Use tf.cond() to return:
87+
# - The mean squared error of (x - y) if the average of all elements in (x - y)
88+
# is negative, or
89+
# - The sum of absolute value of all elements in the tensor (x - y) otherwise.
90+
# Hint: see the Huber loss function in the lecture slides 3.
91+
###############################################################################
92+
93+
# YOUR CODE

examples/03_linreg_placeholder.py

+69
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
""" Solution for simple linear regression example using placeholders
2+
Created by Chip Huyen ([email protected])
3+
CS20: "TensorFlow for Deep Learning Research"
4+
cs20.stanford.edu
5+
Lecture 03
6+
"""
7+
import os
8+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9+
import time
10+
11+
import numpy as np
12+
import matplotlib.pyplot as plt
13+
import tensorflow as tf
14+
15+
import utils
16+
17+
DATA_FILE = 'data/birth_life_2010.txt'
18+
19+
# Step 1: read in data from the .txt file
20+
data, n_samples = utils.read_birth_life_data(DATA_FILE)
21+
22+
# Step 2: create placeholders for X (birth rate) and Y (life expectancy)
23+
X = tf.placeholder(tf.float32, name='X')
24+
Y = tf.placeholder(tf.float32, name='Y')
25+
26+
# Step 3: create weight and bias, initialized to 0
27+
w = tf.get_variable('weights', initializer=tf.constant(0.0))
28+
b = tf.get_variable('bias', initializer=tf.constant(0.0))
29+
30+
# Step 4: build model to predict Y
31+
Y_predicted = w * X + b
32+
33+
# Step 5: use the squared error as the loss function
34+
# you can use either mean squared error or Huber loss
35+
loss = tf.square(Y - Y_predicted, name='loss')
36+
# loss = utils.huber_loss(Y, Y_predicted)
37+
38+
# Step 6: using gradient descent with learning rate of 0.001 to minimize loss
39+
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
40+
41+
42+
start = time.time()
43+
writer = tf.summary.FileWriter('./graphs/linear_reg', tf.get_default_graph())
44+
with tf.Session() as sess:
45+
# Step 7: initialize the necessary variables, in this case, w and b
46+
sess.run(tf.global_variables_initializer())
47+
48+
# Step 8: train the model for 100 epochs
49+
for i in range(100):
50+
total_loss = 0
51+
for x, y in data:
52+
# Session execute optimizer and fetch values of loss
53+
_, l = sess.run([optimizer, loss], feed_dict={X: x, Y:y})
54+
total_loss += l
55+
print('Epoch {0}: {1}'.format(i, total_loss/n_samples))
56+
57+
# close the writer when you're done using it
58+
writer.close()
59+
60+
# Step 9: output the values of w and b
61+
w_out, b_out = sess.run([w, b])
62+
63+
print('Took: %f seconds' %(time.time() - start))
64+
65+
# plot the results
66+
plt.plot(data[:,0], data[:,1], 'bo', label='Real data')
67+
plt.plot(data[:,0], data[:,0] * w_out + b_out, 'r', label='Predicted data')
68+
plt.legend()
69+
plt.show()

0 commit comments

Comments
 (0)