| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112 |
- import matplotlib.pyplot as plt
- import numpy as np
- import tensorflow as tf
- from sklearn.metrics import accuracy_score
- from tensorflow.keras import layers, losses
- from tensorflow.keras.models import Model
- import misc
- latent_dim = 64
- print("# GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
- class Autoencoder(Model):
- def __init__(self, nary):
- super(Autoencoder, self).__init__()
- self.latent_dim = latent_dim
- self.encoder = tf.keras.Sequential()
- self.encoder.add(tf.keras.Input(shape=(2**nary,), dtype=bool))
- self.encoder.add(layers.Dense(units=2**(nary+1)))
- # self.encoder.add(layers.Dropout(0.2))
- self.encoder.add(layers.Dense(units=2**(nary+1)))
- self.encoder.add(layers.Dense(units=2, activation="sigmoid"))
- # self.encoder.add(layers.ReLU(max_value=1.0))
- self.decoder = tf.keras.Sequential()
- self.decoder.add(tf.keras.Input(shape=(2,)))
- self.decoder.add(layers.Dense(units=2**(nary+1)))
- # self.decoder.add(layers.Dropout(0.2))
- self.decoder.add(layers.Dense(units=2**(nary+1)))
- self.decoder.add(layers.Dense(units=2**nary, activation="softmax"))
- self.randomiser = tf.random_normal_initializer(mean=0.0, stddev=0.1, seed=None)
- # self.decoder.add(layers.Softmax(units=4, dtype=bool))
- # [
- # layers.Input(shape=(28, 28, 1)),
- # layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2),
- # layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)
- # ])
- # self.decoder = tf.keras.Sequential([
- # layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'),
- # layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'),
- # layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same')
- # ])
- def call(self, x, **kwargs):
- encoded = self.encoder(x)
- encoded = tf.clip_by_value(encoded, clip_value_min=0, clip_value_max=2, name=None)
- # noise = self.randomiser(shape=(-1, 2), dtype=tf.float32)
- noise = np.random.normal(0, 1, (1, 2)) * 0.2
- noisy = tf.convert_to_tensor(noise, dtype=tf.float32)
- decoded = self.decoder(encoded + noisy)
- return decoded
- def view_encoder(encoder, N, samples=1000):
- test_values = misc.generate_random_bit_array(samples).reshape((-1, N))
- test_values_ho = misc.bit_matrix2one_hot(test_values)
- mvector = np.array([2**i for i in range(N)], dtype=int)
- symbols = (test_values * mvector).sum(axis=1)
- encoded = encoder(test_values_ho).numpy()
- # encoded = misc.polar2rect(encoded)
- for i in range(2**N):
- xy = encoded[symbols == i]
- plt.plot(xy[:, 0], xy[:, 1], 'x', markersize=12, label=format(i, f'0{N}b'))
- plt.annotate(xy=[xy[:, 0].mean()+0.01, xy[:, 1].mean()+0.01], text=format(i, f'0{N}b'))
- plt.xlabel('Real')
- plt.ylabel('Imaginary')
- plt.title("Autoencoder generated alphabet")
- # plt.legend()
- plt.show()
- pass
- if __name__ == '__main__':
- # (x_train, _), (x_test, _) = fashion_mnist.load_data()
- #
- # x_train = x_train.astype('float32') / 255.
- # x_test = x_test.astype('float32') / 255.
- #
- # print(f"Train data: {x_train.shape}")
- # print(f"Test data: {x_test.shape}")
- n = 4
- samples = 3e6
- x_train = misc.generate_random_bit_array(samples).reshape((-1, n))
- x_train_ho = misc.bit_matrix2one_hot(x_train)
- x_test_array = misc.generate_random_bit_array(samples * 0.3)
- x_test = x_test_array.reshape((-1, n))
- x_test_ho = misc.bit_matrix2one_hot(x_test)
- autoencoder = Autoencoder(n)
- autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
- autoencoder.fit(x_train_ho, x_train_ho,
- epochs=1,
- shuffle=False,
- validation_data=(x_test_ho, x_test_ho))
- encoded_data = autoencoder.encoder(x_test_ho)
- decoded_data = autoencoder.decoder(encoded_data).numpy()
- result = misc.int2bit_array(decoded_data.argmax(axis=1), n)
- print("Accuracy: %.4f" % accuracy_score(x_test_array, result.reshape(-1,)))
- view_encoder(autoencoder.encoder, n)
- pass
|