forked from athenarc/DPGANs
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathGANs_Noisy_weights.py
157 lines (115 loc) · 5.52 KB
/
GANs_Noisy_weights.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
from IPython import display
import math
BUFFER_SIZE = 60000
BATCH_SIZE = 512
EPOCHS = 100
num_examples_to_generate = 30000
noise_dim = 100
learning_rate = 0.0001
image_dim = 28
def display_image(epoch_no):
return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))
seed = tf.random.normal([num_examples_to_generate, noise_dim])
generator_optimizer = tf.keras.optimizers.RMSprop(learning_rate)
discriminator_optimizer = tf.keras.optimizers.RMSprop(learning_rate)
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def make_generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, image_dim, image_dim, 1)
return model
generator = make_generator_model()
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[image_dim, image_dim, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
discriminator = make_discriminator_model()
def discriminator_loss(real_output, fake_output):
epsilon = 0.1
delta = 0.0001
q = BATCH_SIZE / 60000
nd = 1
std = (2*q*math.sqrt(nd*math.log(1/delta,2)))/epsilon
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
noise = tf.random.normal(shape=tf.shape(total_loss), mean=0.0, stddev=std, dtype=tf.float32)
return total_loss + (1/BATCH_SIZE)*noise
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
for i in range(predictions.shape[0]):
plt.imsave('Gen_Images/image_{}_at_epoch_{}.png'.format(i,epoch), predictions[i, :, :, 0] * 127.5 + 127.5,cmap='gray')
#for i in range(predictions.shape[0]):
#plt.subplot(math.sqrt(num_examples_to_generate), math.sqrt(num_examples_to_generate), i+1)
#plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
#plt.axis('off')
#plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
#plt.show()
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
# Produce images for the GIF as we go
if ((epoch + 1) % 50 == 0) and (epoch + 1 != 50):
display.clear_output(wait=True)
generate_and_save_images(generator,epoch + 1, seed)
# Save the model every 15 epochs
print('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
# Generate after the final epoch
#display.clear_output(wait=True)
#generate_and_save_images(generator,epochs,seed)
####################################################################
(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
#(train_images, train_labels), (_, _) = tf.keras.datasets.fashion_mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], image_dim, image_dim, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train(train_dataset, EPOCHS)