|
|
@@ -8,7 +8,12 @@ import matplotlib.pyplot as plt
|
|
|
from sklearn.metrics import accuracy_score
|
|
|
from sklearn.preprocessing import OneHotEncoder
|
|
|
from tensorflow.keras import layers, losses
|
|
|
+
|
|
|
+from models.data import BinaryTimeDistributedOneHotGenerator
|
|
|
from models.layers import ExtractCentralMessage, OpticalChannel, DigitizationLayer, BitsToSymbols, SymbolsToBits
|
|
|
+import tensorflow_model_optimization as tfmot
|
|
|
+
|
|
|
+import graphs
|
|
|
|
|
|
|
|
|
class EndToEndAutoencoder(tf.keras.Model):
|
|
|
@@ -17,7 +22,9 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
samples_per_symbol,
|
|
|
messages_per_block,
|
|
|
channel,
|
|
|
- custom_loss_fn=False):
|
|
|
+ custom_loss_fn=False,
|
|
|
+ quantize=False,
|
|
|
+ alpha=1):
|
|
|
"""
|
|
|
The autoencoder that aims to find a encoding of the input messages. It should be noted that a "block" consists
|
|
|
of multiple "messages" to introduce memory into the simulation as this is essential for modelling inter-symbol
|
|
|
@@ -27,6 +34,7 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
:param samples_per_symbol: Number of samples per transmitted symbol
|
|
|
:param messages_per_block: Total number of messages in transmission block
|
|
|
:param channel: Channel Layer object. Must be a subclass of keras.layers.Layer with an implemented forward pass
|
|
|
+ :param alpha: Alpha value for in loss function
|
|
|
"""
|
|
|
super(EndToEndAutoencoder, self).__init__()
|
|
|
|
|
|
@@ -36,6 +44,7 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
|
|
|
# Labelled n in paper
|
|
|
self.samples_per_symbol = samples_per_symbol
|
|
|
+ self.alpha = alpha
|
|
|
|
|
|
# Labelled N in paper - conditional +=1 to ensure odd value
|
|
|
if messages_per_block % 2 == 0:
|
|
|
@@ -79,13 +88,14 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
# Decoding Neural Network
|
|
|
self.decoder = tf.keras.Sequential([
|
|
|
layers.Dense(2 * self.cardinality),
|
|
|
- layers.LeakyReLU(alpha=leaky_relu_alpha),
|
|
|
+ layers.ReLU(),
|
|
|
layers.Dense(2 * self.cardinality),
|
|
|
- layers.LeakyReLU(alpha=leaky_relu_alpha),
|
|
|
+ layers.ReLU(),
|
|
|
layers.Dense(self.cardinality, activation='softmax')
|
|
|
], name="decoding_model")
|
|
|
+ self.decoder.build((1, self.samples_per_symbol))
|
|
|
|
|
|
- def save_end_to_end(self):
|
|
|
+ def save_end_to_end(self, name):
|
|
|
# extract all params and save
|
|
|
|
|
|
params = {"fs": self.channel.layers[1].fs,
|
|
|
@@ -101,7 +111,10 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
"enob": self.channel.layers[1].enob,
|
|
|
"custom_loss_fn": self.custom_loss_fn
|
|
|
}
|
|
|
- dir_str = os.path.join("exports", dt.utcnow().strftime("%Y%m%d-%H%M%S"))
|
|
|
+
|
|
|
+ if not name:
|
|
|
+ name = dt.utcnow().strftime("%Y%m%d-%H%M%S")
|
|
|
+ dir_str = os.path.join("exports", name)
|
|
|
|
|
|
if not os.path.exists(dir_str):
|
|
|
os.makedirs(dir_str)
|
|
|
@@ -167,10 +180,7 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
y_bits_pred = SymbolsToBits(self.cardinality)(y_pred)
|
|
|
|
|
|
bit_cost = losses.BinaryCrossentropy()(y_bits_true, y_bits_pred)
|
|
|
-
|
|
|
- a = 1
|
|
|
-
|
|
|
- return symbol_cost + a * bit_cost
|
|
|
+ return symbol_cost + self.alpha * bit_cost
|
|
|
|
|
|
def generate_random_inputs(self, num_of_blocks, return_vals=False):
|
|
|
"""
|
|
|
@@ -198,7 +208,7 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
|
|
|
return out_arr, out_arr[:, mid_idx, :]
|
|
|
|
|
|
- def train(self, num_of_blocks=1e6, epochs=1, batch_size=None, train_size=0.8, lr=1e-3):
|
|
|
+ def train(self, num_of_blocks=1e6, epochs=1, batch_size=None, train_size=0.8, lr=1e-3, **kwargs):
|
|
|
"""
|
|
|
Method to train the autoencoder. Further configuration to the loss function, optimizer etc. can be made in here.
|
|
|
|
|
|
@@ -207,8 +217,13 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
:param train_size: Float less than 1 representing the proportion of the dataset to use for training
|
|
|
:param lr: The learning rate of the optimizer. Defines how quickly the algorithm converges
|
|
|
"""
|
|
|
- X_train, y_train = self.generate_random_inputs(int(num_of_blocks * train_size))
|
|
|
- X_test, y_test = self.generate_random_inputs(int(num_of_blocks * (1 - train_size)))
|
|
|
+ # X_train, y_train = self.generate_random_inputs(int(num_of_blocks * train_size))
|
|
|
+ # X_test, y_test = self.generate_random_inputs(int(num_of_blocks * (1 - train_size)))
|
|
|
+
|
|
|
+ train_data = BinaryTimeDistributedOneHotGenerator(
|
|
|
+ num_of_blocks, cardinality=self.cardinality, blocks=self.messages_per_block)
|
|
|
+ test_data = BinaryTimeDistributedOneHotGenerator(
|
|
|
+ num_of_blocks * .3, cardinality=self.cardinality, blocks=self.messages_per_block)
|
|
|
|
|
|
opt = tf.keras.optimizers.Adam(learning_rate=lr)
|
|
|
|
|
|
@@ -225,28 +240,42 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
run_eagerly=False
|
|
|
)
|
|
|
|
|
|
- self.fit(x=X_train,
|
|
|
- y=y_train,
|
|
|
- batch_size=batch_size,
|
|
|
- epochs=epochs,
|
|
|
- shuffle=True,
|
|
|
- validation_data=(X_test, y_test)
|
|
|
- )
|
|
|
+ return self.fit(
|
|
|
+ train_data,
|
|
|
+ epochs=epochs,
|
|
|
+ shuffle=True,
|
|
|
+ validation_data=test_data,
|
|
|
+ **kwargs
|
|
|
+ )
|
|
|
|
|
|
- def test(self, num_of_blocks=1e4, length_plot=False, plt_show=True):
|
|
|
- X_test, y_test = self.generate_random_inputs(int(num_of_blocks))
|
|
|
+ def test(self, num_of_blocks=1e4, length_plot=False, plt_show=True, distance=None):
|
|
|
+ # X_test, y_test = self.generate_random_inputs(int(num_of_blocks))
|
|
|
+ test_data = BinaryTimeDistributedOneHotGenerator(
|
|
|
+ 1000, cardinality=self.cardinality, blocks=self.messages_per_block)
|
|
|
|
|
|
- y_out = self.call(X_test)
|
|
|
+ num_of_blocks = int(num_of_blocks / 1000)
|
|
|
+ if num_of_blocks <= 0:
|
|
|
+ num_of_blocks = 1
|
|
|
|
|
|
- y_pred = tf.argmax(y_out, axis=1)
|
|
|
- y_true = tf.argmax(y_test, axis=1)
|
|
|
+ ber = []
|
|
|
+ ser = []
|
|
|
|
|
|
- self.symbol_error_rate = 1 - accuracy_score(y_true, y_pred)
|
|
|
+ for i in range(num_of_blocks):
|
|
|
+ y_out = self.call(test_data.x)
|
|
|
|
|
|
- bits_pred = SymbolsToBits(self.cardinality)(tf.one_hot(y_pred, self.cardinality)).numpy().flatten()
|
|
|
- bits_true = SymbolsToBits(self.cardinality)(y_test).numpy().flatten()
|
|
|
+ y_pred = tf.argmax(y_out, axis=1)
|
|
|
+ y_true = tf.argmax(test_data.y, axis=1)
|
|
|
+ ser.append(1 - accuracy_score(y_true, y_pred))
|
|
|
|
|
|
- self.bit_error_rate = 1 - accuracy_score(bits_true, bits_pred)
|
|
|
+ bits_pred = SymbolsToBits(self.cardinality)(tf.one_hot(y_pred, self.cardinality)).numpy().flatten()
|
|
|
+ bits_true = SymbolsToBits(self.cardinality)(test_data.y).numpy().flatten()
|
|
|
+ ber.append(1 - accuracy_score(bits_true, bits_pred))
|
|
|
+ test_data.on_epoch_end()
|
|
|
+ print(f"\rTested {i + 1} of {num_of_blocks} blocks", end="")
|
|
|
+
|
|
|
+ print(f"\rTested all {num_of_blocks} blocks")
|
|
|
+ self.symbol_error_rate = sum(ser) / len(ser)
|
|
|
+ self.bit_error_rate = sum(ber) / len(ber)
|
|
|
|
|
|
if length_plot:
|
|
|
|
|
|
@@ -289,10 +318,9 @@ class EndToEndAutoencoder(tf.keras.Model):
|
|
|
if plt_show:
|
|
|
plt.show()
|
|
|
|
|
|
- print("SYMBOL ERROR RATE: {}".format(self.symbol_error_rate))
|
|
|
- print("BIT ERROR RATE: {}".format(self.bit_error_rate))
|
|
|
-
|
|
|
- pass
|
|
|
+ print("SYMBOL ERROR RATE: {:e}".format(self.symbol_error_rate))
|
|
|
+ print("BIT ERROR RATE: {:e}".format(self.bit_error_rate))
|
|
|
+ return self.symbol_error_rate, self.bit_error_rate
|
|
|
|
|
|
def view_encoder(self):
|
|
|
'''
|
|
|
@@ -435,25 +463,25 @@ def load_model(model_name=None):
|
|
|
return ae_model, params
|
|
|
|
|
|
|
|
|
-if __name__ == '__main__':
|
|
|
-
|
|
|
- params = {"fs": 336e9,
|
|
|
- "cardinality": 32,
|
|
|
- "samples_per_symbol": 32,
|
|
|
- "messages_per_block": 9,
|
|
|
- "dispersion_factor": (-21.7 * 1e-24),
|
|
|
- "fiber_length": 50,
|
|
|
- "fiber_length_stddev": 1,
|
|
|
- "lpf_cutoff": 32e9,
|
|
|
- "rx_stddev": 0.01,
|
|
|
- "sig_avg": 0.5,
|
|
|
- "enob": 8,
|
|
|
- "custom_loss_fn": True
|
|
|
- }
|
|
|
-
|
|
|
- force_training = False
|
|
|
-
|
|
|
- model_save_name = ""
|
|
|
+def run_tests(distance=50):
|
|
|
+ params = {
|
|
|
+ "fs": 336e9,
|
|
|
+ "cardinality": 64,
|
|
|
+ "samples_per_symbol": 48,
|
|
|
+ "messages_per_block": 9,
|
|
|
+ "dispersion_factor": (-21.7 * 1e-24),
|
|
|
+ "fiber_length": 50,
|
|
|
+ "fiber_length_stddev": 1,
|
|
|
+ "lpf_cutoff": 32e9,
|
|
|
+ "rx_stddev": 0.01,
|
|
|
+ "sig_avg": 0.5,
|
|
|
+ "enob": 6,
|
|
|
+ "custom_loss_fn": True
|
|
|
+ }
|
|
|
+
|
|
|
+ force_training = True
|
|
|
+
|
|
|
+ model_save_name = f'{params["fiber_length"]}km-{params["cardinality"]}' # "50km-64" # "20210401-145416"
|
|
|
param_file_path = os.path.join("exports", model_save_name, "params.json")
|
|
|
|
|
|
if os.path.isfile(param_file_path) and not force_training:
|
|
|
@@ -461,27 +489,129 @@ if __name__ == '__main__':
|
|
|
with open(param_file_path, 'r') as file:
|
|
|
params = json.load(file)
|
|
|
|
|
|
- optical_channel = OpticalChannel(fs=params["fs"],
|
|
|
- num_of_samples=params["messages_per_block"] * params["samples_per_symbol"],
|
|
|
- dispersion_factor=params["dispersion_factor"],
|
|
|
- fiber_length=params["fiber_length"],
|
|
|
- fiber_length_stddev=params["fiber_length_stddev"],
|
|
|
- lpf_cutoff=params["lpf_cutoff"],
|
|
|
- rx_stddev=params["rx_stddev"],
|
|
|
- sig_avg=params["sig_avg"],
|
|
|
- enob=params["enob"])
|
|
|
-
|
|
|
- ae_model = EndToEndAutoencoder(cardinality=params["cardinality"],
|
|
|
- samples_per_symbol=params["samples_per_symbol"],
|
|
|
- messages_per_block=params["messages_per_block"],
|
|
|
- channel=optical_channel,
|
|
|
- custom_loss_fn=params["custom_loss_fn"])
|
|
|
+ optical_channel = OpticalChannel(
|
|
|
+ fs=params["fs"],
|
|
|
+ num_of_samples=params["messages_per_block"] * params["samples_per_symbol"],
|
|
|
+ dispersion_factor=params["dispersion_factor"],
|
|
|
+ fiber_length=params["fiber_length"],
|
|
|
+ fiber_length_stddev=params["fiber_length_stddev"],
|
|
|
+ lpf_cutoff=params["lpf_cutoff"],
|
|
|
+ rx_stddev=params["rx_stddev"],
|
|
|
+ sig_avg=params["sig_avg"],
|
|
|
+ enob=params["enob"],
|
|
|
+ )
|
|
|
+
|
|
|
+ ae_model = EndToEndAutoencoder(
|
|
|
+ cardinality=params["cardinality"],
|
|
|
+ samples_per_symbol=params["samples_per_symbol"],
|
|
|
+ messages_per_block=params["messages_per_block"],
|
|
|
+ channel=optical_channel,
|
|
|
+ custom_loss_fn=params["custom_loss_fn"],
|
|
|
+ alpha=5,
|
|
|
+ )
|
|
|
+
|
|
|
+ checkpoint_name = f'/tmp/checkpoint/normal_{params["fiber_length"]}km'
|
|
|
+ model_checkpoint_callback0 = tf.keras.callbacks.ModelCheckpoint(
|
|
|
+ filepath=checkpoint_name,
|
|
|
+ save_weights_only=True,
|
|
|
+ monitor='val_accuracy',
|
|
|
+ mode='max',
|
|
|
+ save_best_only=True
|
|
|
+ )
|
|
|
+
|
|
|
+ early_stop = tf.keras.callbacks.EarlyStopping(
|
|
|
+ monitor='val_loss', min_delta=1e-2, patience=3, verbose=0,
|
|
|
+ mode='auto', baseline=None, restore_best_weights=True
|
|
|
+ )
|
|
|
+
|
|
|
+
|
|
|
+ # model_checkpoint_callback1 = tf.keras.callbacks.ModelCheckpoint(
|
|
|
+ # filepath='/tmp/checkpoint/quantised',
|
|
|
+ # save_weights_only=True,
|
|
|
+ # monitor='val_accuracy',
|
|
|
+ # mode='max',
|
|
|
+ # save_best_only=True
|
|
|
+ # )
|
|
|
+
|
|
|
+ # if os.path.isfile(param_file_path) and not force_training:
|
|
|
+ # ae_model.encoder = tf.keras.models.load_model(os.path.join("exports", model_save_name, "encoder"))
|
|
|
+ # ae_model.decoder = tf.keras.models.load_model(os.path.join("exports", model_save_name, "decoder"))
|
|
|
+ # print("Loaded existing model from " + model_save_name)
|
|
|
+ # else:
|
|
|
+ if not os.path.isfile(checkpoint_name + '.index'):
|
|
|
+ history = ae_model.train(num_of_blocks=1e3, epochs=30, callbacks=[model_checkpoint_callback0, early_stop])
|
|
|
+ graphs.show_train_history(history, f"Autoencoder training at {params['fiber_length']}km")
|
|
|
+ ae_model.save_end_to_end(model_save_name)
|
|
|
+
|
|
|
+ ae_model.load_weights(checkpoint_name)
|
|
|
+ ser, ber = ae_model.test(num_of_blocks=3e6)
|
|
|
+ data = [(params["fiber_length"], ser, ber)]
|
|
|
+ for l in np.linspace(params["fiber_length"] - 2.5, params["fiber_length"] + 2.5, 6):
|
|
|
+ optical_channel = OpticalChannel(
|
|
|
+ fs=params["fs"],
|
|
|
+ num_of_samples=params["messages_per_block"] * params["samples_per_symbol"],
|
|
|
+ dispersion_factor=params["dispersion_factor"],
|
|
|
+ fiber_length=l,
|
|
|
+ fiber_length_stddev=params["fiber_length_stddev"],
|
|
|
+ lpf_cutoff=params["lpf_cutoff"],
|
|
|
+ rx_stddev=params["rx_stddev"],
|
|
|
+ sig_avg=params["sig_avg"],
|
|
|
+ enob=params["enob"],
|
|
|
+ )
|
|
|
+ ae_model = EndToEndAutoencoder(
|
|
|
+ cardinality=params["cardinality"],
|
|
|
+ samples_per_symbol=params["samples_per_symbol"],
|
|
|
+ messages_per_block=params["messages_per_block"],
|
|
|
+ channel=optical_channel,
|
|
|
+ custom_loss_fn=params["custom_loss_fn"],
|
|
|
+ alpha=5,
|
|
|
+ )
|
|
|
+ ae_model.load_weights(checkpoint_name)
|
|
|
+ print(f"Testing {l}km")
|
|
|
+ ser, ber = ae_model.test(num_of_blocks=3e6)
|
|
|
+ data.append((l, ser, ber))
|
|
|
+ return data
|
|
|
|
|
|
- if os.path.isfile(param_file_path) and not force_training:
|
|
|
- ae_model.encoder = tf.keras.models.load_model(os.path.join("exports", model_save_name, "encoder"))
|
|
|
- ae_model.decoder = tf.keras.models.load_model(os.path.join("exports", model_save_name, "decoder"))
|
|
|
- else:
|
|
|
- ae_model.train(num_of_blocks=1e5, epochs=5)
|
|
|
- ae_model.save_end_to_end()
|
|
|
|
|
|
+if __name__ == '__main__':
|
|
|
+ data0 = run_tests(90)
|
|
|
+ # data1 = run_tests(70)
|
|
|
+ # data2 = run_tests(80)
|
|
|
+ # print('Results 60: ', data0)
|
|
|
+ # print('Results 70: ', data1)
|
|
|
+ print('Results 90: ', data0)
|
|
|
+
|
|
|
+ # ae_model.test(num_of_blocks=3e6)
|
|
|
+ # ae_model.load_weights('/tmp/checkpoint/normal')
|
|
|
+
|
|
|
+ #
|
|
|
+ # quantize_model = tfmot.quantization.keras.quantize_model
|
|
|
+ # ae_model.decoder = quantize_model(ae_model.decoder)
|
|
|
+ #
|
|
|
+ # # ae_model.load_weights('/tmp/checkpoint/quantised')
|
|
|
+ #
|
|
|
+ # history = ae_model.train(num_of_blocks=1e3, epochs=20, callbacks=[model_checkpoint_callback1])
|
|
|
+ # graphs.show_train_history(history, f"Autoencoder quantised finetune at {params['fiber_length']}km")
|
|
|
+
|
|
|
+ # SYMBOL ERROR RATE: 2.039667e-03
|
|
|
+ # 2.358000e-03
|
|
|
+ # BIT ERROR RATE: 4.646000e-04
|
|
|
+ # 6.916000e-04
|
|
|
+
|
|
|
+ # SYMBOL ERROR RATE: 4.146667e-04
|
|
|
+ # BIT ERROR RATE: 1.642667e-04
|
|
|
+ # ae_model.save_end_to_end("50km-q3+")
|
|
|
+ # ae_model.test(num_of_blocks=3e6)
|
|
|
+
|
|
|
+ # Fibre, SER, BER
|
|
|
+ # 50, 2.233333e-05, 5.000000e-06
|
|
|
+ # 60, 6.556667e-04, 1.343333e-04
|
|
|
+ # 75, 1.570333e-03, 3.144667e-04
|
|
|
+ ## 80, 8.061667e-03, 1.612333e-03
|
|
|
+ # 85, 7.811333e-03, 1.601600e-03
|
|
|
+ # 90, 1.121933e-02, 2.255200e-03
|
|
|
+ ## 90, 1.266433e-02, 2.767467e-03
|
|
|
+
|
|
|
+ # 64 cardinality
|
|
|
+ # 50, 5.488000e-03, 1.089000e-03
|
|
|
pass
|