From | CSDN Blog Author | JensLee
Edited | Deep Learning This Little Thing Public Account
This article is for academic exchange only. If there is any infringement, please contact the backend to delete it.


def generator_model(): model = Sequential() model.add(Dense(input_dim=1000, output_dim=1024)) model.add(Activation('tanh')) model.add(Dense(128 * 8 * 8)) model.add(BatchNormalization()) model.add(Activation('tanh')) model.add(Reshape((8, 8, 128), input_shape=(8 * 8 * 128,))) model.add(UpSampling2D(size=(4, 4))) model.add(Conv2D(64, (5, 5), padding='same')) model.add(Activation('tanh')) model.add(UpSampling2D(size=(2, 2))) model.add(Conv2D(3, (5, 5), padding='same')) model.add(Activation('tanh')) return model

def discriminator_model(): model = Sequential() model.add(Conv2D(64, (5, 5), padding='same', input_shape=(64, 64, 3))) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (5, 5))) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(1024)) model.add(Activation('tanh')) model.add(Dense(1)) model.add(Activation('sigmoid')) return model

# Randomly generated 1000-dimensional noise
noise = np.random.uniform(-1, 1, size=(BATCH_SIZE, 1000))
# X_train is the training image data, here we take out a batch size of images for training, these are real images (64 images)
image_batch = X_train[index * BATCH_SIZE:(index + 1) * BATCH_SIZE]
# Here are the fake images generated by the generator
generated_images = generator_model.predict(noise, verbose=0)
# Concatenate the real and fake images
X = np.concatenate((image_batch, generated_images))
# Corresponding labels for X, the first 64 images are real, label is 1, the last 64 images are fake, label is 0
y = [1] * BATCH_SIZE + [0] * BATCH_SIZE
# Send the concatenated training data to the discriminator for training
# d_loss = discriminator_model.train_on_batch(X, y)
def generator_containing_discriminator(g, d): model = Sequential() model.add(g) # The parameters of the discriminator are not modified d.trainable = False model.add(d) return model

# Train a batch of data for index in range(int(X_train.shape[0]/BATCH_SIZE)):
# Generate random noise noise = np.random.uniform(-1, 1, size=(BATCH_SIZE, 1000))
# These are all real images image_batch = X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
# Here generate fake images generated_images = g.predict(noise, verbose=0)
# Concatenate real and fake images X = np.concatenate((image_batch, generated_images))
# The labels for the first 64 images are 1, i.e., real images, and the last 64 images are fake images y = [1] * BATCH_SIZE + [0] * BATCH_SIZE
# Train the discriminator to continuously improve its recognition accuracy d_loss = d.train_on_batch(X, y)
# Generate random noise again noise = np.random.uniform(-1, 1, (BATCH_SIZE, 1000))
# Set the discriminator's parameters to be non-adjustable d.trainable = False
# ×××××××××××××××××××××××××××××××××××××××××××××××××××××××××× # Here we send in noise and assume that this noise is labeled as real g_loss = generator_containing_discriminator.train_on_batch(noise, [1] * BATCH_SIZE) # ××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
# At this point, set the discriminator to be trainable again, allowing its parameters to be modified d.trainable = True
# Print loss values print("batch %d d_loss : %s, g_loss : %f" % (index, d_loss, g_loss))
g_loss = generator_containing_discriminator.train_on_batch(noise, [1] * BATCH_SIZE)

—End—
Recommended for you
A Review of Image Classification Problems Under Long-Tail Distribution (2019-2020)
Major Updates on GitHub: Online Development Launched, Time to Uninstall IDE
Trump Takes Aim at H1B Visas, LeCun and Wu Enda Condemn Publicly!
23 Amazing Pandas Codes Commonly Used in Data Analysis
How to Create Beautiful Illustrations in Research Papers?