Преглед на файлове

Testing autoencoders with other modulations

Min преди 5 години
родител
ревизия
7632f0d3fc
променени са 2 файла, в които са добавени 131 реда и са изтрити 18 реда
  1. 47 0
      main.py
  2. 84 18
      models/autoencoder.py

+ 47 - 0
main.py

@@ -5,6 +5,7 @@ from sklearn.metrics import accuracy_score
 from models import basic
 from models import basic
 from models.basic import AWGNChannel, BPSKDemod, BPSKMod, BypassChannel, AlphabetMod, AlphabetDemod
 from models.basic import AWGNChannel, BPSKDemod, BPSKMod, BypassChannel, AlphabetMod, AlphabetDemod
 import misc
 import misc
+from models.autoencoder import Autoencoder
 
 
 
 
 def show_constellation(mod, chan, demod, samples=1000):
 def show_constellation(mod, chan, demod, samples=1000):
@@ -27,6 +28,8 @@ def show_constellation(mod, chan, demod, samples=1000):
 
 
 
 
 def get_ber(mod, chan, demod, samples=1000):
 def get_ber(mod, chan, demod, samples=1000):
+    if samples % mod.N:
+        samples += mod.N - (samples % mod.N)
     x = misc.generate_random_bit_array(samples)
     x = misc.generate_random_bit_array(samples)
     x_mod = mod.forward(x)
     x_mod = mod.forward(x)
     x_chan = chan.forward(x_mod)
     x_chan = chan.forward(x_mod)
@@ -54,6 +57,50 @@ if __name__ == '__main__':
     # misc.display_alphabet(mod.alphabet, a_vals=True)
     # misc.display_alphabet(mod.alphabet, a_vals=True)
     # mod = MaryMod('64qam', 10e6)
     # mod = MaryMod('64qam', 10e6)
     # misc.display_alphabet(mod.alphabet, a_vals=True)
     # misc.display_alphabet(mod.alphabet, a_vals=True)
+    # aenc = Autoencoder(4, -25)
+    # aenc.train(samples=5e5)
+    # plt.plot(*get_AWGN_ber(aenc.get_modulator(), aenc.get_demodulator(), samples=12000, start=-15), '-',
+    #          label='AE 4bit -25dB')
+
+    aenc = Autoencoder(5, -25)
+    aenc.train(samples=1e6)
+    plt.plot(*get_AWGN_ber(aenc.get_modulator(), aenc.get_demodulator(), samples=12000, start=-15), '-',
+             label='AE 5bit -25dB')
+
+    aenc = Autoencoder(5, -15)
+    aenc.train(samples=1e6)
+    plt.plot(*get_AWGN_ber(aenc.get_modulator(), aenc.get_demodulator(), samples=12000, start=-15), '-',
+             label='AE 5bit -15dB')
+
+    aenc = Autoencoder(4, -25)
+    aenc.train(samples=6e5)
+    plt.plot(*get_AWGN_ber(aenc.get_modulator(), aenc.get_demodulator(), samples=12000, start=-15), '-',
+             label='AE 4bit -20dB')
+
+    aenc = Autoencoder(4, -15)
+    aenc.train(samples=6e5)
+    plt.plot(*get_AWGN_ber(aenc.get_modulator(), aenc.get_demodulator(), samples=12000, start=-15), '-',
+             label='AE 4bit -15dB')
+
+    aenc = Autoencoder(2, -20)
+    aenc.train(samples=6e5)
+    plt.plot(*get_AWGN_ber(aenc.get_modulator(), aenc.get_demodulator(), samples=12000, start=-15), '-',
+             label='AE 2bit -20dB')
+
+    aenc = Autoencoder(2, -15)
+    aenc.train(samples=6e5)
+    plt.plot(*get_AWGN_ber(aenc.get_modulator(), aenc.get_demodulator(), samples=12000, start=-15), '-',
+             label='AE 2bit -15dB')
+
+    # aenc = Autoencoder(4, -10)
+    # aenc.train(samples=5e5)
+    # plt.plot(*get_AWGN_ber(aenc.get_modulator(), aenc.get_demodulator(), samples=12000, start=-15), '-',
+    #          label='AE 4bit -10dB')
+    #
+    # aenc = Autoencoder(4, -8)
+    # aenc.train(samples=5e5)
+    # plt.plot(*get_AWGN_ber(aenc.get_modulator(), aenc.get_demodulator(), samples=12000, start=-15), '-',
+    #          label='AE 4bit -8dB')
 
 
     plt.plot(*get_AWGN_ber(AlphabetMod('64qam', 10e6), AlphabetDemod('64qam', 10e6), samples=12000, start=-15), '-', label='64-QAM')
     plt.plot(*get_AWGN_ber(AlphabetMod('64qam', 10e6), AlphabetDemod('64qam', 10e6), samples=12000, start=-15), '-', label='64-QAM')
     plt.plot(*get_AWGN_ber(AlphabetMod('16qam', 10e6), AlphabetDemod('16qam', 10e6), samples=12000, start=-15), '-', label='16-QAM')
     plt.plot(*get_AWGN_ber(AlphabetMod('16qam', 10e6), AlphabetDemod('16qam', 10e6), samples=12000, start=-15), '-', label='16-QAM')

+ 84 - 18
models/autoencoder.py

@@ -6,33 +6,70 @@ from sklearn.metrics import accuracy_score
 from tensorflow.keras import layers, losses
 from tensorflow.keras import layers, losses
 from tensorflow.keras.models import Model
 from tensorflow.keras.models import Model
 import misc
 import misc
+import defs
 
 
 latent_dim = 64
 latent_dim = 64
 
 
-
 print("# GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
 print("# GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
 
 
 
 
+class AutoencoderMod(defs.Modulator):
+    def __init__(self, autoencoder):
+        super().__init__(2**autoencoder.N)
+        self.autoencoder = autoencoder
+
+    def forward(self, binary: np.ndarray) -> np.ndarray:
+        reshaped = binary.reshape((-1, self.N))
+        reshaped_ho = misc.bit_matrix2one_hot(reshaped)
+        encoded = self.autoencoder.encoder(reshaped_ho)
+        x = encoded.numpy()
+        x2 = x * 2 - 1
+
+        f = np.zeros(x2.shape[0])
+        x3 = misc.rect2polar(np.c_[x2[:, 0], x2[:, 1], f])
+        return x3
+
+
+class AutoencoderDemod(defs.Demodulator):
+    def __init__(self, autoencoder):
+        super().__init__(2**autoencoder.N)
+        self.autoencoder = autoencoder
+
+    def forward(self, values: np.ndarray) -> np.ndarray:
+        rect = misc.polar2rect(values[:, [0, 1]])
+        decoded = self.autoencoder.decoder(rect).numpy()
+        result = misc.int2bit_array(decoded.argmax(axis=1), self.N)
+        return result.reshape(-1, )
+
+
 class Autoencoder(Model):
 class Autoencoder(Model):
-    def __init__(self, nary):
+    def __init__(self, N, noise):
         super(Autoencoder, self).__init__()
         super(Autoencoder, self).__init__()
-        self.latent_dim = latent_dim
+        self.N = N
         self.encoder = tf.keras.Sequential()
         self.encoder = tf.keras.Sequential()
-        self.encoder.add(tf.keras.Input(shape=(2**nary,), dtype=bool))
-        self.encoder.add(layers.Dense(units=2**(nary+1)))
+        self.encoder.add(tf.keras.Input(shape=(2 ** N,), dtype=bool))
+        self.encoder.add(layers.Dense(units=2 ** (N + 1)))
         # self.encoder.add(layers.Dropout(0.2))
         # self.encoder.add(layers.Dropout(0.2))
-        self.encoder.add(layers.Dense(units=2**(nary+1)))
+        self.encoder.add(layers.Dense(units=2 ** (N + 1)))
         self.encoder.add(layers.Dense(units=2, activation="sigmoid"))
         self.encoder.add(layers.Dense(units=2, activation="sigmoid"))
         # self.encoder.add(layers.ReLU(max_value=1.0))
         # self.encoder.add(layers.ReLU(max_value=1.0))
 
 
         self.decoder = tf.keras.Sequential()
         self.decoder = tf.keras.Sequential()
         self.decoder.add(tf.keras.Input(shape=(2,)))
         self.decoder.add(tf.keras.Input(shape=(2,)))
-        self.decoder.add(layers.Dense(units=2**(nary+1)))
+        self.decoder.add(layers.Dense(units=2 ** (N + 1)))
         # self.decoder.add(layers.Dropout(0.2))
         # self.decoder.add(layers.Dropout(0.2))
-        self.decoder.add(layers.Dense(units=2**(nary+1)))
-        self.decoder.add(layers.Dense(units=2**nary, activation="softmax"))
+        self.decoder.add(layers.Dense(units=2 ** (N + 1)))
+        self.decoder.add(layers.Dense(units=2 ** N, activation="softmax"))
+
+        # self.randomiser = tf.random_normal_initializer(mean=0.0, stddev=0.1, seed=None)
+
+        self.mod = None
+        self.demod = None
+        self.compiled = False
+
+        # Divide by 2 because encoder outputs values between 0 and 1 instead of -1 and 1
+        self.noise = 10 ** (noise / 10)  # / 2
 
 
-        self.randomiser = tf.random_normal_initializer(mean=0.0, stddev=0.1, seed=None)
         # self.decoder.add(layers.Softmax(units=4, dtype=bool))
         # self.decoder.add(layers.Softmax(units=4, dtype=bool))
 
 
         # [
         # [
@@ -48,25 +85,54 @@ class Autoencoder(Model):
 
 
     def call(self, x, **kwargs):
     def call(self, x, **kwargs):
         encoded = self.encoder(x)
         encoded = self.encoder(x)
-        encoded = tf.clip_by_value(encoded,  clip_value_min=0, clip_value_max=2, name=None)
+        encoded = encoded * 2 - 1
+        # encoded = tf.clip_by_value(encoded, clip_value_min=0, clip_value_max=1, name=None)
         # noise = self.randomiser(shape=(-1, 2), dtype=tf.float32)
         # noise = self.randomiser(shape=(-1, 2), dtype=tf.float32)
-        noise = np.random.normal(0, 1, (1, 2)) * 0.2
+        noise = np.random.normal(0, 1, (1, 2)) * self.noise
         noisy = tf.convert_to_tensor(noise, dtype=tf.float32)
         noisy = tf.convert_to_tensor(noise, dtype=tf.float32)
         decoded = self.decoder(encoded + noisy)
         decoded = self.decoder(encoded + noisy)
         return decoded
         return decoded
 
 
+    def train(self, samples=1e6):
+        if samples % self.N:
+            samples += self.N - (samples % self.N)
+        x_train = misc.generate_random_bit_array(samples).reshape((-1, self.N))
+        x_train_ho = misc.bit_matrix2one_hot(x_train)
+
+        x_test_array = misc.generate_random_bit_array(samples * 0.3)
+        x_test = x_test_array.reshape((-1, self.N))
+        x_test_ho = misc.bit_matrix2one_hot(x_test)
+
+        if not self.compiled:
+            self.compile(optimizer='adam', loss=losses.MeanSquaredError())
+            self.compiled = True
+
+        self.fit(x_train_ho, x_train_ho, shuffle=False, validation_data=(x_test_ho, x_test_ho))
+        # encoded_data = self.encoder(x_test_ho)
+        # decoded_data = self.decoder(encoded_data).numpy()
+
+    def get_modulator(self):
+        if self.mod is None:
+            self.mod = AutoencoderMod(self)
+        return self.mod
+
+    def get_demodulator(self):
+        if self.demod is None:
+            self.demod = AutoencoderDemod(self)
+        return self.demod
+
 
 
 def view_encoder(encoder, N, samples=1000):
 def view_encoder(encoder, N, samples=1000):
     test_values = misc.generate_random_bit_array(samples).reshape((-1, N))
     test_values = misc.generate_random_bit_array(samples).reshape((-1, N))
     test_values_ho = misc.bit_matrix2one_hot(test_values)
     test_values_ho = misc.bit_matrix2one_hot(test_values)
-    mvector = np.array([2**i for i in range(N)], dtype=int)
+    mvector = np.array([2 ** i for i in range(N)], dtype=int)
     symbols = (test_values * mvector).sum(axis=1)
     symbols = (test_values * mvector).sum(axis=1)
     encoded = encoder(test_values_ho).numpy()
     encoded = encoder(test_values_ho).numpy()
     # encoded = misc.polar2rect(encoded)
     # encoded = misc.polar2rect(encoded)
-    for i in range(2**N):
+    for i in range(2 ** N):
         xy = encoded[symbols == i]
         xy = encoded[symbols == i]
         plt.plot(xy[:, 0], xy[:, 1], 'x', markersize=12, label=format(i, f'0{N}b'))
         plt.plot(xy[:, 0], xy[:, 1], 'x', markersize=12, label=format(i, f'0{N}b'))
-        plt.annotate(xy=[xy[:, 0].mean()+0.01, xy[:, 1].mean()+0.01], text=format(i, f'0{N}b'))
+        plt.annotate(xy=[xy[:, 0].mean() + 0.01, xy[:, 1].mean() + 0.01], text=format(i, f'0{N}b'))
     plt.xlabel('Real')
     plt.xlabel('Real')
     plt.ylabel('Imaginary')
     plt.ylabel('Imaginary')
     plt.title("Autoencoder generated alphabet")
     plt.title("Autoencoder generated alphabet")
@@ -87,14 +153,14 @@ if __name__ == '__main__':
 
 
     n = 4
     n = 4
 
 
-    samples = 3e6
+    samples = 1e6
     x_train = misc.generate_random_bit_array(samples).reshape((-1, n))
     x_train = misc.generate_random_bit_array(samples).reshape((-1, n))
     x_train_ho = misc.bit_matrix2one_hot(x_train)
     x_train_ho = misc.bit_matrix2one_hot(x_train)
     x_test_array = misc.generate_random_bit_array(samples * 0.3)
     x_test_array = misc.generate_random_bit_array(samples * 0.3)
     x_test = x_test_array.reshape((-1, n))
     x_test = x_test_array.reshape((-1, n))
     x_test_ho = misc.bit_matrix2one_hot(x_test)
     x_test_ho = misc.bit_matrix2one_hot(x_test)
 
 
-    autoencoder = Autoencoder(n)
+    autoencoder = Autoencoder(n, -8)
     autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
     autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
 
 
     autoencoder.fit(x_train_ho, x_train_ho,
     autoencoder.fit(x_train_ho, x_train_ho,
@@ -106,7 +172,7 @@ if __name__ == '__main__':
     decoded_data = autoencoder.decoder(encoded_data).numpy()
     decoded_data = autoencoder.decoder(encoded_data).numpy()
 
 
     result = misc.int2bit_array(decoded_data.argmax(axis=1), n)
     result = misc.int2bit_array(decoded_data.argmax(axis=1), n)
-    print("Accuracy: %.4f" % accuracy_score(x_test_array, result.reshape(-1,)))
+    print("Accuracy: %.4f" % accuracy_score(x_test_array, result.reshape(-1, )))
     view_encoder(autoencoder.encoder, n)
     view_encoder(autoencoder.encoder, n)
 
 
     pass
     pass