import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from sklearn.metrics import accuracy_score from tensorflow.keras import layers, losses from tensorflow.keras.models import Model import misc latent_dim = 64 # print("# GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) class Autoencoder(Model): def __init__(self, nary): super(Autoencoder, self).__init__() self.latent_dim = latent_dim self.encoder = tf.keras.Sequential() self.encoder.add(tf.keras.Input(shape=(4,), dtype=bool)) self.encoder.add(layers.Dense(units=32)) self.encoder.add(layers.Dense(units=2, activation='relu')) # self.encoder.add(layers.Dropout(0.2)) # self.encoder.add(layers.ReLU(max_value=1.0)) self.decoder = tf.keras.Sequential() self.decoder.add(tf.keras.Input(shape=(2,))) self.decoder.add(layers.Dense(units=32)) # self.encoder.add(tf.keras.layers.Dropout(0.2)) self.decoder.add(layers.Dense(units=4, activation='softmax')) # self.decoder.add(layers.Softmax(units=4, dtype=bool)) # [ # layers.Input(shape=(28, 28, 1)), # layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2), # layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2) # ]) # self.decoder = tf.keras.Sequential([ # layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'), # layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'), # layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same') # ]) def call(self, x, **kwargs): encoded = self.encoder(x) decoded = self.decoder(encoded) return decoded def view_encoder(encoder, N, samples=1000): test_values = misc.generate_random_bit_array(samples).reshape((-1, N)) mvector = np.array([2**i for i in range(N)], dtype=int) symbols = (test_values * mvector).sum(axis=1) encoded = encoder(test_values).numpy() for i in range(2**N): xy = encoded[symbols == i] plt.plot(xy[:, 0], xy[:, 1], 'x', markersize=12, label=format(i, f'0{N}b')) # plt.annotate(xy=[xy[:, 0].mean(), xy[:, 1].mean()] + [0.01, 0.01], s=format(i, f'0{N}b')) plt.xlabel('Real') plt.ylabel('Imaginary') plt.title("Autoencoder generated alphabet") plt.legend() plt.show() pass if __name__ == '__main__': # (x_train, _), (x_test, _) = fashion_mnist.load_data() # # x_train = x_train.astype('float32') / 255. # x_test = x_test.astype('float32') / 255. # # print(f"Train data: {x_train.shape}") # print(f"Test data: {x_test.shape}") samples = 1e5 x_train = misc.generate_random_bit_array(samples).reshape((-1, 4)) x_test_array = misc.generate_random_bit_array(samples * 0.2) x_test = x_test_array.reshape((-1, 4)) autoencoder = Autoencoder(latent_dim) autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError()) autoencoder.fit(x_train, x_train, epochs=2, shuffle=True, validation_data=(x_test, x_test)) encoded_data = autoencoder.encoder(x_test) decoded_data = autoencoder.decoder(encoded_data).numpy().reshape((-1,)) result = np.zeros(x_test_array.shape, dtype=bool) result[decoded_data > 0.5] = True print("Accuracy: %.4f" % accuracy_score(x_test_array, result)) view_encoder(autoencoder.encoder, 4) pass