autoencoder.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. import matplotlib.pyplot as plt
  2. import numpy as np
  3. import tensorflow as tf
  4. from sklearn.metrics import accuracy_score
  5. from tensorflow.keras import layers, losses
  6. from tensorflow.keras.models import Model
  7. import misc
  8. latent_dim = 64
  9. # print("# GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
  10. class Autoencoder(Model):
  11. def __init__(self, nary):
  12. super(Autoencoder, self).__init__()
  13. self.latent_dim = latent_dim
  14. self.encoder = tf.keras.Sequential()
  15. self.encoder.add(tf.keras.Input(shape=(4,), dtype=bool))
  16. self.encoder.add(layers.Dense(units=32))
  17. self.encoder.add(layers.Dense(units=2, activation='relu'))
  18. # self.encoder.add(layers.Dropout(0.2))
  19. # self.encoder.add(layers.ReLU(max_value=1.0))
  20. self.decoder = tf.keras.Sequential()
  21. self.decoder.add(tf.keras.Input(shape=(2,)))
  22. self.decoder.add(layers.Dense(units=32))
  23. # self.encoder.add(tf.keras.layers.Dropout(0.2))
  24. self.decoder.add(layers.Dense(units=4, activation='softmax'))
  25. # self.decoder.add(layers.Softmax(units=4, dtype=bool))
  26. # [
  27. # layers.Input(shape=(28, 28, 1)),
  28. # layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2),
  29. # layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)
  30. # ])
  31. # self.decoder = tf.keras.Sequential([
  32. # layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'),
  33. # layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'),
  34. # layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same')
  35. # ])
  36. def call(self, x, **kwargs):
  37. encoded = self.encoder(x)
  38. decoded = self.decoder(encoded)
  39. return decoded
  40. def view_encoder(encoder, N, samples=1000):
  41. test_values = misc.generate_random_bit_array(samples).reshape((-1, N))
  42. mvector = np.array([2**i for i in range(N)], dtype=int)
  43. symbols = (test_values * mvector).sum(axis=1)
  44. encoded = encoder(test_values).numpy()
  45. for i in range(2**N):
  46. xy = encoded[symbols == i]
  47. plt.plot(xy[:, 0], xy[:, 1], 'x', markersize=12, label=format(i, f'0{N}b'))
  48. # plt.annotate(xy=[xy[:, 0].mean(), xy[:, 1].mean()] + [0.01, 0.01], s=format(i, f'0{N}b'))
  49. plt.xlabel('Real')
  50. plt.ylabel('Imaginary')
  51. plt.title("Autoencoder generated alphabet")
  52. plt.legend()
  53. plt.show()
  54. pass
  55. if __name__ == '__main__':
  56. # (x_train, _), (x_test, _) = fashion_mnist.load_data()
  57. #
  58. # x_train = x_train.astype('float32') / 255.
  59. # x_test = x_test.astype('float32') / 255.
  60. #
  61. # print(f"Train data: {x_train.shape}")
  62. # print(f"Test data: {x_test.shape}")
  63. samples = 1e5
  64. x_train = misc.generate_random_bit_array(samples).reshape((-1, 4))
  65. x_test_array = misc.generate_random_bit_array(samples * 0.2)
  66. x_test = x_test_array.reshape((-1, 4))
  67. autoencoder = Autoencoder(latent_dim)
  68. autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
  69. autoencoder.fit(x_train, x_train,
  70. epochs=2,
  71. shuffle=True,
  72. validation_data=(x_test, x_test))
  73. encoded_data = autoencoder.encoder(x_test)
  74. decoded_data = autoencoder.decoder(encoded_data).numpy().reshape((-1,))
  75. result = np.zeros(x_test_array.shape, dtype=bool)
  76. result[decoded_data > 0.5] = True
  77. print("Accuracy: %.4f" % accuracy_score(x_test_array, result))
  78. view_encoder(autoencoder.encoder, 4)
  79. pass