|
|
@@ -4,7 +4,6 @@ import keras
|
|
|
import tensorflow as tf
|
|
|
import numpy as np
|
|
|
import matplotlib.pyplot as plt
|
|
|
-from matplotlib import collections as matcoll
|
|
|
from sklearn.preprocessing import OneHotEncoder
|
|
|
from keras import layers, losses
|
|
|
|
|
|
@@ -12,6 +11,8 @@ from keras import layers, losses
|
|
|
class ExtractCentralMessage(layers.Layer):
|
|
|
def __init__(self, messages_per_block, samples_per_symbol):
|
|
|
"""
|
|
|
+ A keras layer that extracts the central message(symbol) in a block.
|
|
|
+
|
|
|
:param messages_per_block: Total number of messages in transmission block
|
|
|
:param samples_per_symbol: Number of samples per transmitted symbol
|
|
|
"""
|
|
|
@@ -32,6 +33,9 @@ class ExtractCentralMessage(layers.Layer):
|
|
|
class AwgnChannel(layers.Layer):
|
|
|
def __init__(self, rx_stddev=0.1):
|
|
|
"""
|
|
|
+ A additive white gaussian noise channel model. The GaussianNoise class is utilized to prevent identical noise
|
|
|
+ being applied every time the call function is called.
|
|
|
+
|
|
|
:param rx_stddev: Standard deviation of receiver noise (due to e.g. TIA circuit)
|
|
|
"""
|
|
|
super(AwgnChannel, self).__init__()
|
|
|
@@ -48,6 +52,9 @@ class DigitizationLayer(layers.Layer):
|
|
|
lpf_cutoff=32e9,
|
|
|
q_stddev=0.1):
|
|
|
"""
|
|
|
+ This layer simulated the finite bandwidth of the hardware by means of a low pass filter. In addition to this,
|
|
|
+ artefacts casued by quantization is modelled by the addition of white gaussian noise of a given stddev.
|
|
|
+
|
|
|
:param fs: Sampling frequency of the simulation in Hz
|
|
|
:param num_of_samples: Total number of samples in the input
|
|
|
:param lpf_cutoff: Cutoff frequency of LPF modelling finite bandwidth in ADC/DAC
|
|
|
@@ -85,6 +92,9 @@ class OpticalChannel(layers.Layer):
|
|
|
rx_stddev=0.01,
|
|
|
q_stddev=0.01):
|
|
|
"""
|
|
|
+ A channel model that simulates chromatic dispersion, non-linear photodiode detection, finite bandwidth of
|
|
|
+ ADC/DAC as well as additive white gaussian noise in optical communication channels.
|
|
|
+
|
|
|
:param fs: Sampling frequency of the simulation in Hz
|
|
|
:param num_of_samples: Total number of samples in the input
|
|
|
:param dispersion_factor: Dispersion factor in s^2/km
|
|
|
@@ -138,6 +148,10 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
messages_per_block,
|
|
|
channel):
|
|
|
"""
|
|
|
+ The autoencoder that aims to find a encoding of the input messages. It should be noted that a "block" consists
|
|
|
+ of multiple "messages" to introduce memory into the simulation as this is essential for modelling inter-symbol
|
|
|
+ interference. The autoencoder architecture was heavily influenced by IEEE 8433895.
|
|
|
+
|
|
|
:param cardinality: Number of different messages. Chosen such that each message encodes log_2(cardinality) bits
|
|
|
:param samples_per_symbol: Number of samples per transmitted symbol
|
|
|
:param messages_per_block: Total number of messages in transmission block
|
|
|
@@ -182,6 +196,8 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
|
|
|
def generate_random_inputs(self, num_of_blocks, return_vals=False):
|
|
|
"""
|
|
|
+ A method that generates a list of one-hot encoded messages. This is utilized for generating the test/train data.
|
|
|
+
|
|
|
:param num_of_blocks: Number of blocks to generate. A block contains multiple messages to be transmitted in
|
|
|
consecutively to model ISI. The central message in a block is returned as the label for training.
|
|
|
:param return_vals: If true, the raw decimal values of the input sequence will be returned
|
|
|
@@ -204,6 +220,8 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
|
|
|
def train(self, num_of_blocks=1e6, batch_size=None, train_size=0.8, lr=1e-3):
|
|
|
"""
|
|
|
+ Method to train the autoencoder. Further configuration to the loss function, optimizer etc. can be made in here.
|
|
|
+
|
|
|
:param num_of_blocks: Number of blocks to generate for training. Analogous to the dataset size.
|
|
|
:param batch_size: Number of samples to consider on each update iteration of the optimization algorithm
|
|
|
:param train_size: Float less than 1 representing the proportion of the dataset to use for training
|
|
|
@@ -231,6 +249,10 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
)
|
|
|
|
|
|
def view_encoder(self):
|
|
|
+ '''
|
|
|
+ A method that views the learnt encoder for each distint message. This is displayed as a plot with asubplot for
|
|
|
+ each image.
|
|
|
+ '''
|
|
|
# Generate inputs for encoder
|
|
|
messages = np.zeros((self.cardinality, self.messages_per_block, self.cardinality))
|
|
|
|
|
|
@@ -277,6 +299,10 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
pass
|
|
|
|
|
|
def view_sample_block(self):
|
|
|
+ '''
|
|
|
+ Generates a random string of input message and encodes them. In addition to this, the output is passed through
|
|
|
+ digitization layer without any quantization noise for the low pass filtering.
|
|
|
+ '''
|
|
|
# Generate a random block of messages
|
|
|
val, inp, _ = self.generate_random_inputs(num_of_blocks=1, return_vals=True)
|
|
|
|