Skip to content

Commit 72941b0

Browse files
Created using Colab
1 parent e4367c6 commit 72941b0

File tree

1 file changed

+190
-0
lines changed

1 file changed

+190
-0
lines changed

DCGAN_Mnist_Image_Generator.ipynb

+190
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,190 @@
1+
{
2+
"nbformat": 4,
3+
"nbformat_minor": 0,
4+
"metadata": {
5+
"colab": {
6+
"provenance": [],
7+
"gpuType": "T4",
8+
"authorship_tag": "ABX9TyPMMIYlUnZFCYTD08+YW80Q",
9+
"include_colab_link": true
10+
},
11+
"kernelspec": {
12+
"name": "python3",
13+
"display_name": "Python 3"
14+
},
15+
"language_info": {
16+
"name": "python"
17+
},
18+
"accelerator": "GPU"
19+
},
20+
"cells": [
21+
{
22+
"cell_type": "markdown",
23+
"metadata": {
24+
"id": "view-in-github",
25+
"colab_type": "text"
26+
},
27+
"source": [
28+
"<a href=\"https://colab.research.google.com/github/masoudshahrian/DeepLearning-Code/blob/main/DCGAN_Mnist_Image_Generator.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
29+
]
30+
},
31+
{
32+
"cell_type": "code",
33+
"execution_count": 1,
34+
"metadata": {
35+
"id": "ITcNPzlG2zbi"
36+
},
37+
"outputs": [],
38+
"source": [
39+
"import tensorflow as tf\n",
40+
"from tensorflow.keras.layers import Dense, Reshape, Flatten, Dropout, LeakyReLU, BatchNormalization, Conv2D, Conv2DTranspose\n",
41+
"from tensorflow.keras.datasets import mnist\n",
42+
"import numpy as np\n",
43+
"import matplotlib.pyplot as plt\n"
44+
]
45+
},
46+
{
47+
"cell_type": "code",
48+
"source": [
49+
"# Loading the MNIST dataset\n",
50+
"(x_train, _), (_, _) = mnist.load_data()\n",
51+
"x_train = (x_train - 127.5) / 127.5 # Normalize the images to [-1, 1]\n",
52+
"x_train = np.expand_dims(x_train, axis=-1)\n",
53+
"\n",
54+
"BUFFER_SIZE = 60000\n",
55+
"BATCH_SIZE = 256"
56+
],
57+
"metadata": {
58+
"colab": {
59+
"base_uri": "https://localhost:8080/"
60+
},
61+
"id": "UtlJvbh020ln",
62+
"outputId": "6329222c-e487-4bf0-861a-d9da7b01a183"
63+
},
64+
"execution_count": 2,
65+
"outputs": [
66+
{
67+
"output_type": "stream",
68+
"name": "stdout",
69+
"text": [
70+
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n",
71+
"\u001b[1m11490434/11490434\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 0us/step\n"
72+
]
73+
}
74+
]
75+
},
76+
{
77+
"cell_type": "code",
78+
"source": [
79+
"# Create batches of the training data\n",
80+
"train_dataset = tf.data.Dataset.from_tensor_slices(x_train).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n",
81+
"\n",
82+
"# Generator model\n",
83+
"def make_generator_model():\n",
84+
" model = tf.keras.Sequential()\n",
85+
" model.add(Dense(7*7*256, use_bias=False, input_shape=(100,)))\n",
86+
" model.add(BatchNormalization())\n",
87+
" model.add(LeakyReLU())\n",
88+
" model.add(Reshape((7, 7, 256)))\n",
89+
" model.add(Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))\n",
90+
" model.add(BatchNormalization())\n",
91+
" model.add(LeakyReLU())\n",
92+
" model.add(Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))\n",
93+
" model.add(BatchNormalization())\n",
94+
" model.add(LeakyReLU())\n",
95+
" model.add(Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))\n",
96+
" return model\n",
97+
"\n",
98+
"# Discriminator model\n",
99+
"def make_discriminator_model():\n",
100+
" model = tf.keras.Sequential()\n",
101+
" model.add(Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1]))\n",
102+
" model.add(LeakyReLU())\n",
103+
" model.add(Dropout(0.3))\n",
104+
" model.add(Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n",
105+
" model.add(LeakyReLU())\n",
106+
" model.add(Dropout(0.3))\n",
107+
" model.add(Flatten())\n",
108+
" model.add(Dense(1))\n",
109+
" return model\n",
110+
"\n",
111+
"# Define the loss and optimizers\n",
112+
"cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n",
113+
"\n",
114+
"def discriminator_loss(real_output, fake_output):\n",
115+
" real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n",
116+
" fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n",
117+
" total_loss = real_loss + fake_loss\n",
118+
" return total_loss\n",
119+
"\n",
120+
"def generator_loss(fake_output):\n",
121+
" return cross_entropy(tf.ones_like(fake_output), fake_output)\n",
122+
"\n",
123+
"generator = make_generator_model()\n",
124+
"discriminator = make_discriminator_model()\n",
125+
"\n",
126+
"generator_optimizer = tf.keras.optimizers.Adam(1e-4)\n",
127+
"discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)"
128+
],
129+
"metadata": {
130+
"id": "j1oNM69k2_TO"
131+
},
132+
"execution_count": null,
133+
"outputs": []
134+
},
135+
{
136+
"cell_type": "code",
137+
"source": [
138+
"\n",
139+
"\n",
140+
"# Define the training loop\n",
141+
"EPOCHS = 50\n",
142+
"noise_dim = 100\n",
143+
"num_examples_to_generate = 16\n",
144+
"\n",
145+
"# Seed for visualization\n",
146+
"seed = tf.random.normal([num_examples_to_generate, noise_dim])\n",
147+
"\n",
148+
"@tf.function\n",
149+
"def train_step(images):\n",
150+
" noise = tf.random.normal([BATCH_SIZE, noise_dim])\n",
151+
" with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n",
152+
" generated_images = generator(noise, training=True)\n",
153+
" real_output = discriminator(images, training=True)\n",
154+
" fake_output = discriminator(generated_images, training=True)\n",
155+
" gen_loss = generator_loss(fake_output)\n",
156+
" disc_loss = discriminator_loss(real_output, fake_output)\n",
157+
" gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)\n",
158+
" gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n",
159+
" generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\n",
160+
" discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\n",
161+
"\n",
162+
"def train(dataset, epochs):\n",
163+
" for epoch in range(epochs):\n",
164+
" for image_batch in dataset:\n",
165+
" train_step(image_batch)\n",
166+
" display.clear_output(wait=True)\n",
167+
" generate_and_save_images(generator, epoch + 1, seed)\n",
168+
" print(f'Epoch {epoch+1} completed')\n",
169+
"\n",
170+
"def generate_and_save_images(model, epoch, test_input):\n",
171+
" predictions = model(test_input, training=False)\n",
172+
" fig = plt.figure(figsize=(4, 4))\n",
173+
" for i in range(predictions.shape[0]):\n",
174+
" plt.subplot(4, 4, i+1)\n",
175+
" plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n",
176+
" plt.axis('off')\n",
177+
" plt.savefig(f'image_at_epoch_{epoch}.png')\n",
178+
" plt.show()\n",
179+
"\n",
180+
"# Start training\n",
181+
"train(train_dataset, EPOCHS)\n"
182+
],
183+
"metadata": {
184+
"id": "IO89qLJL21OG"
185+
},
186+
"execution_count": null,
187+
"outputs": []
188+
}
189+
]
190+
}

0 commit comments

Comments
 (0)