|
| 1 | +# %% 1 |
| 2 | +# Package imports |
| 3 | +import matplotlib.pyplot as plt |
| 4 | +import numpy as np |
| 5 | +import sklearn |
| 6 | +import sklearn.datasets |
| 7 | +import sklearn.linear_model |
| 8 | +import matplotlib |
| 9 | + |
| 10 | +# Display plots inline and change default figure size |
| 11 | +%matplotlib inline |
| 12 | +matplotlib.rcParams['figure.figsize'] = (10.0, 8.0) |
| 13 | + |
| 14 | +# %% 2 |
| 15 | +np.random.seed(3) |
| 16 | +X, y = sklearn.datasets.make_moons(200, noise=0.20) |
| 17 | +plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral) |
| 18 | + |
| 19 | +# %% 3 |
| 20 | +# Train the logistic rgeression classifier |
| 21 | +clf = sklearn.linear_model.LogisticRegressionCV() |
| 22 | +clf.fit(X, y) |
| 23 | + |
| 24 | +# %% 4 |
| 25 | +# Helper function to plot a decision boundary. |
| 26 | +# If you don't fully understand this function don't worry, it just generates the contour plot below. |
| 27 | +def plot_decision_boundary(pred_func): |
| 28 | + # Set min and max values and give it some padding |
| 29 | + x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 |
| 30 | + y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 |
| 31 | + h = 0.01 |
| 32 | + # Generate a grid of points with distance h between them |
| 33 | + xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) |
| 34 | + # Predict the function value for the whole gid |
| 35 | + Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) |
| 36 | + Z = Z.reshape(xx.shape) |
| 37 | + # Plot the contour and training examples |
| 38 | + plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) |
| 39 | + plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) |
| 40 | + |
| 41 | +# %% 12 |
| 42 | +# Plot the decision boundary |
| 43 | +plot_decision_boundary(lambda x: clf.predict(x)) |
| 44 | +plt.title("Logistic Regression") |
| 45 | + |
| 46 | +# %% 15 |
| 47 | +num_examples = len(X) # training set size |
| 48 | +nn_input_dim = 2 # input layer dimensionality |
| 49 | +nn_output_dim = 2 # output layer dimensionality |
| 50 | + |
| 51 | +# Gradient descent parameters (I picked these by hand) |
| 52 | +epsilon = 0.01 # learning rate for gradient descent |
| 53 | +reg_lambda = 0.01 # regularization strength |
| 54 | + |
| 55 | +# %% 7 |
| 56 | +# Helper function to evaluate the total loss on the dataset |
| 57 | +def calculate_loss(model): |
| 58 | + W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2'] |
| 59 | + # Forward propagation to calculate our predictions |
| 60 | + z1 = X.dot(W1) + b1 |
| 61 | + a1 = np.tanh(z1) |
| 62 | + z2 = a1.dot(W2) + b2 |
| 63 | + exp_scores = np.exp(z2) |
| 64 | + probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) |
| 65 | + # Calculating the loss |
| 66 | + corect_logprobs = -np.log(probs[range(num_examples), y]) |
| 67 | + data_loss = np.sum(corect_logprobs) |
| 68 | + # Add regulatization term to loss (optional) |
| 69 | + data_loss += reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2))) |
| 70 | + return 1./num_examples * data_loss |
| 71 | + |
| 72 | +# %% 8 |
| 73 | +# Helper function to predict an output (0 or 1) |
| 74 | +def predict(model, x): |
| 75 | + W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2'] |
| 76 | + # Forward propagation |
| 77 | + z1 = x.dot(W1) + b1 |
| 78 | + a1 = np.tanh(z1) |
| 79 | + z2 = a1.dot(W2) + b2 |
| 80 | + exp_scores = np.exp(z2) |
| 81 | + probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) |
| 82 | + return np.argmax(probs, axis=1) |
| 83 | + |
| 84 | +# %% 16 |
| 85 | +# This function learns parameters for the neural network and returns the model. |
| 86 | +# - nn_hdim: Number of nodes in the hidden layer |
| 87 | +# - num_passes: Number of passes through the training data for gradient descent |
| 88 | +# - print_loss: If True, print the loss every 1000 iterations |
| 89 | +def build_model(nn_hdim, num_passes=20000, print_loss=False): |
| 90 | + |
| 91 | + # Initialize the parameters to random values. We need to learn these. |
| 92 | + np.random.seed(0) |
| 93 | + W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim) |
| 94 | + b1 = np.zeros((1, nn_hdim)) |
| 95 | + W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim) |
| 96 | + b2 = np.zeros((1, nn_output_dim)) |
| 97 | + |
| 98 | + # This is what we return at the end |
| 99 | + model = {} |
| 100 | + |
| 101 | + # Gradient descent. For each batch... |
| 102 | + for i in range(0, num_passes): |
| 103 | + |
| 104 | + # Forward propagation |
| 105 | + z1 = X.dot(W1) + b1 |
| 106 | + a1 = np.tanh(z1) |
| 107 | + z2 = a1.dot(W2) + b2 |
| 108 | + exp_scores = np.exp(z2) |
| 109 | + probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) |
| 110 | + |
| 111 | + # Backpropagation |
| 112 | + delta3 = probs |
| 113 | + delta3[range(num_examples), y] -= 1 |
| 114 | + dW2 = (a1.T).dot(delta3) |
| 115 | + db2 = np.sum(delta3, axis=0, keepdims=True) |
| 116 | + delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2)) |
| 117 | + dW1 = np.dot(X.T, delta2) |
| 118 | + db1 = np.sum(delta2, axis=0) |
| 119 | + |
| 120 | + # Add regularization terms (b1 and b2 don't have regularization terms) |
| 121 | + dW2 += reg_lambda * W2 |
| 122 | + dW1 += reg_lambda * W1 |
| 123 | + |
| 124 | + # Gradient descent parameter update |
| 125 | + W1 += -epsilon * dW1 |
| 126 | + b1 += -epsilon * db1 |
| 127 | + W2 += -epsilon * dW2 |
| 128 | + b2 += -epsilon * db2 |
| 129 | + |
| 130 | + # Assign new parameters to the model |
| 131 | + model = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2} |
| 132 | + |
| 133 | + # Optionally print the loss. |
| 134 | + # This is expensive because it uses the whole dataset, so we don't want to do it too often. |
| 135 | + if print_loss and i % 1000 == 0: |
| 136 | + print("Loss after iteration %i: %f" %(i, calculate_loss(model))) |
| 137 | + |
| 138 | + return model |
| 139 | + |
| 140 | +# %% 17 |
| 141 | +# Build a model with a 3-dimensional hidden layer |
| 142 | +model = build_model(3, print_loss=True) |
| 143 | + |
| 144 | +# Plot the decision boundary |
| 145 | +plot_decision_boundary(lambda x: predict(model, x)) |
| 146 | +plt.title("Decision Boundary for hidden layer size 3") |
| 147 | + |
| 148 | +# %% 14 |
| 149 | +plt.figure(figsize=(16, 32)) |
| 150 | +hidden_layer_dimensions = [1, 2, 3, 4, 5, 20, 50] |
| 151 | +for i, nn_hdim in enumerate(hidden_layer_dimensions): |
| 152 | + plt.subplot(5, 2, i+1) |
| 153 | + plt.title('Hidden Layer size %d' % nn_hdim) |
| 154 | + model = build_model(nn_hdim) |
| 155 | + plot_decision_boundary(lambda x: predict(model, x)) |
| 156 | +plt.show() |
0 commit comments