Tifinagh-MNIST / Gans_For_One_Simples_Tifinagh_MNIST.py
iseddik
feat: add use cases
b6249dd
raw
history blame contribute delete
No virus
5.8 kB
# Gans for one Tifinagh-MNIST letter
## The libraries we will use
"""
import os
import cv2
from numpy import array
from numpy import expand_dims
from numpy import zeros
from numpy import ones
from numpy import vstack
from numpy.random import randn
from numpy.random import randint
from keras.datasets.mnist import load_data
from keras.optimizer_v2.adam import Adam
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import LeakyReLU
from keras.layers import Dropout
from matplotlib import pyplot
n_class = 1 #number of classes
n_train_Tifinagh_mnist = 2000
def upload_data_Tfinagh_MNIST(path_name, number_of_class, number_of_images):
X_Data = []
for i in range(number_of_class):
images = os.listdir(path_name + str(i+11))
for j in range(number_of_images):
img = cv2.imread(path_name + str(i+11)+ '/' + images[j], 0)
X_Data.append(img)
print("> the " + str(i) + "-th file is successfully uploaded.", end='\r')
return array(X_Data)
n_class = 1 #number of classes
n_train_Tifinagh_mnist = 2000
def upload_data_Tfinagh_MNIST(path_name, num_of_class, number_of_images):
X_Data = []
images = os.listdir(path_name + str(num_of_class))
for j in range(len(images)):
img = cv2.imread(path_name + str(num_of_class) + '/' + images[j], 0)
X_Data.append(img)
return array(X_Data)
def define_discriminator(in_shape=(28,28,1)):
model = Sequential()
model.add(Conv2D(64, (3,3), strides=(2, 2), padding='same', input_shape=in_shape))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Conv2D(64, (3,3), strides=(2, 2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def define_generator(latent_dim):
model = Sequential()
n_nodes = 128 * 7 * 7
model.add(Dense(n_nodes, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Reshape((7, 7, 128)))
model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(1, (7,7), activation='sigmoid', padding='same'))
return model
def define_gan(g_model, d_model):
d_model.trainable = False
model = Sequential()
model.add(g_model)
model.add(d_model)
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def load_real_samples(num_class):
trainX = upload_data_Tfinagh_MNIST('drive/MyDrive/DATA2/train_data/', num_class, n_train_Tifinagh_mnist)
X = expand_dims(trainX, axis=-1)
X = X.astype('float32')
X = X / 255.0
return X
def generate_real_samples(dataset, n_samples):
ix = randint(0, dataset.shape[0], n_samples)
X = dataset[ix]
y = ones((n_samples, 1))
return X, y
def generate_latent_points(latent_dim, n_samples):
x_input = randn(latent_dim * n_samples)
x_input = x_input.reshape(n_samples, latent_dim)
return x_input
def generate_fake_samples(g_model, latent_dim, n_samples):
x_input = generate_latent_points(latent_dim, n_samples)
X = g_model.predict(x_input)
y = zeros((n_samples, 1))
return X, y
def save_plot(examples, epoch, n=10):
for i in range(n * n):
pyplot.subplot(n, n, 1 + i)
pyplot.axis('off')
pyplot.imshow(examples[i, :, :, 0], cmap='gray_r')
filename = 'generated_plot_e%03d.png' % (epoch+1)
pyplot.savefig(filename)
pyplot.close()
def summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=100):
X_real, y_real = generate_real_samples(dataset, n_samples)
_, acc_real = d_model.evaluate(X_real, y_real, verbose=0)
x_fake, y_fake = generate_fake_samples(g_model, latent_dim, n_samples)
_, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)
print('>Accuracy real: %.0f%%, fake: %.0f%%' % (acc_real*100, acc_fake*100))
#save_plot(x_fake, epoch)
filename = 'generator_model_%03d.h5' % (epoch + 1)
g_model.save(filename)
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=100, n_batch=128):
bat_per_epo = int(dataset.shape[0] / n_batch)
half_batch = int(n_batch / 2)
for i in range(n_epochs):
for j in range(bat_per_epo):
X_real, y_real = generate_real_samples(dataset, half_batch)
X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
X, y = vstack((X_real, X_fake)), vstack((y_real, y_fake))
d_loss, _ = d_model.train_on_batch(X, y)
X_gan = generate_latent_points(latent_dim, n_batch)
y_gan = ones((n_batch, 1))
g_loss = gan_model.train_on_batch(X_gan, y_gan)
print('>%d, %d/%d, d=%.3f, g=%.3f' % (i+1, j+1, bat_per_epo, d_loss, g_loss))
if (i+1) % 10 == 0:
summarize_performance(i, g_model, d_model, dataset, latent_dim)
# size of the latent space
latent_dim = 100
# create the discriminator
d_model = define_discriminator()
# create the generator
g_model = define_generator(latent_dim)
# create the gan
gan_model = define_gan(g_model, d_model)
# load image data
dataset = load_real_samples(29)
# train model
train(g_model, d_model, gan_model, dataset, latent_dim)
z = generate_latent_points(100, 9)
im = g_model.predict(z)
from matplotlib import pyplot as plt
plt.figure(figsize=(9, 9))
for i in range(9):
orig_map=plt.cm.get_cmap()
# reversing the original colormap using reversed() function
reversed_map = orig_map.reversed()
plt.subplot(3, 3, i+1)
plt.imshow(im[i, :, :, 0], cmap = reversed_map);
plt.axis('off')