|
@@ -5,6 +5,7 @@ import numpy as np
|
|
|
import matplotlib.pyplot as plt
|
|
import matplotlib.pyplot as plt
|
|
|
from sklearn.preprocessing import OneHotEncoder
|
|
from sklearn.preprocessing import OneHotEncoder
|
|
|
from tensorflow.keras import layers, losses
|
|
from tensorflow.keras import layers, losses
|
|
|
|
|
+from scipy.signal import bessel, lfilter, filtfilt
|
|
|
import os
|
|
import os
|
|
|
|
|
|
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
|
@@ -205,7 +206,7 @@ class ModulationModel(tf.keras.Model):
|
|
|
out_val = np.reshape(rand_int, (num_of_blocks, self.messages_per_block, 1))
|
|
out_val = np.reshape(rand_int, (num_of_blocks, self.messages_per_block, 1))
|
|
|
return out_val, out_arr, out_arr[:, mid_idx, :]
|
|
return out_val, out_arr, out_arr[:, mid_idx, :]
|
|
|
|
|
|
|
|
- return t_out_arr, out_arr[:, mid_idx]
|
|
|
|
|
|
|
+ return t_out_arr, out_arr
|
|
|
|
|
|
|
|
def view_encoder(self):
|
|
def view_encoder(self):
|
|
|
'''
|
|
'''
|
|
@@ -297,12 +298,40 @@ class ModulationModel(tf.keras.Model):
|
|
|
pass
|
|
pass
|
|
|
|
|
|
|
|
def plot(self, inputs, signal, size):
|
|
def plot(self, inputs, signal, size):
|
|
|
- t = np.arange(self.messages_per_block * self.samples_per_symbol * 3)
|
|
|
|
|
|
|
+ t = np.arange(self.messages_per_block * self.samples_per_symbol)
|
|
|
|
|
+ frequency = SAMPLING_FREQUENCY*1e-9
|
|
|
|
|
+ t = np.divide(t, frequency)
|
|
|
plt.figure()
|
|
plt.figure()
|
|
|
- plt.plot(t, signal.flatten()[:self.messages_per_block * self.samples_per_symbol * 3])
|
|
|
|
|
- plt.plot(t, inputs.flatten()[:self.messages_per_block * self.samples_per_symbol * 3])
|
|
|
|
|
|
|
+ plt.plot(t, signal.flatten()[:self.messages_per_block * self.samples_per_symbol], label='Received Signal')
|
|
|
|
|
+ inputs = np.square(inputs)
|
|
|
|
|
+ plt.plot(t, inputs.flatten()[:self.messages_per_block * self.samples_per_symbol], label='Transmitted Signal')
|
|
|
|
|
+ plt.xlabel('Time (ns)')
|
|
|
|
|
+ plt.ylabel('Power')
|
|
|
|
|
+ plt.title('Effect of Chromatic Dispersion and Noise on Generated Signal')
|
|
|
|
|
+ plt.legend()
|
|
|
plt.show()
|
|
plt.show()
|
|
|
|
|
|
|
|
|
|
+ def demodulate(self, validation, outputs):
|
|
|
|
|
+ b, a = bessel(5, 2 / 45, btype='low', analog=False)
|
|
|
|
|
+ outputsfilt = filtfilt(b, a, outputs)
|
|
|
|
|
+ demodulate = np.sqrt(outputsfilt)
|
|
|
|
|
+ modulation_scheme.plot(inputs, demodulate, size)
|
|
|
|
|
+ average = np.mean(outputs.reshape(1000, -1, SAMPLES_PER_SYMBOL), axis=2).flatten()
|
|
|
|
|
+ validation = validation.flatten()
|
|
|
|
|
+ decisions = []
|
|
|
|
|
+ for symbol in average:
|
|
|
|
|
+ if symbol <=0.5 or np.isnan(symbol):
|
|
|
|
|
+ decisions.append(0)
|
|
|
|
|
+ elif symbol >0.5 and symbol<=1.5:
|
|
|
|
|
+ decisions.append(1)
|
|
|
|
|
+ elif symbol > 1.5 and symbol <= 2.5:
|
|
|
|
|
+ decisions.append(2)
|
|
|
|
|
+ else:
|
|
|
|
|
+ decisions.append(3)
|
|
|
|
|
+ decisions = np.array(decisions)
|
|
|
|
|
+ return decisions
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
def call(self, inputs, training=None, mask=None):
|
|
def call(self, inputs, training=None, mask=None):
|
|
|
tx = self.encoder(inputs)
|
|
tx = self.encoder(inputs)
|
|
|
rx = self.channel(tx)
|
|
rx = self.channel(tx)
|
|
@@ -313,7 +342,7 @@ class ModulationModel(tf.keras.Model):
|
|
|
if __name__ == '__main__':
|
|
if __name__ == '__main__':
|
|
|
SAMPLING_FREQUENCY = 336e9
|
|
SAMPLING_FREQUENCY = 336e9
|
|
|
CARDINALITY = 4
|
|
CARDINALITY = 4
|
|
|
- SAMPLES_PER_SYMBOL = 24
|
|
|
|
|
|
|
+ SAMPLES_PER_SYMBOL = 128
|
|
|
MESSAGES_PER_BLOCK = 9
|
|
MESSAGES_PER_BLOCK = 9
|
|
|
DISPERSION_FACTOR = -21.7 * 1e-24
|
|
DISPERSION_FACTOR = -21.7 * 1e-24
|
|
|
FIBER_LENGTH = 50
|
|
FIBER_LENGTH = 50
|
|
@@ -323,20 +352,16 @@ if __name__ == '__main__':
|
|
|
dispersion_factor=DISPERSION_FACTOR,
|
|
dispersion_factor=DISPERSION_FACTOR,
|
|
|
fiber_length=FIBER_LENGTH)
|
|
fiber_length=FIBER_LENGTH)
|
|
|
|
|
|
|
|
- #channel_output = optical_channel(input)
|
|
|
|
|
-
|
|
|
|
|
modulation_scheme = ModulationModel(cardinality=CARDINALITY,
|
|
modulation_scheme = ModulationModel(cardinality=CARDINALITY,
|
|
|
samples_per_symbol=SAMPLES_PER_SYMBOL,
|
|
samples_per_symbol=SAMPLES_PER_SYMBOL,
|
|
|
messages_per_block=MESSAGES_PER_BLOCK,
|
|
messages_per_block=MESSAGES_PER_BLOCK,
|
|
|
channel=optical_channel)
|
|
channel=optical_channel)
|
|
|
|
|
|
|
|
- size = 1000
|
|
|
|
|
|
|
+ size = 1000
|
|
|
inputs, validation = modulation_scheme.generate_random_inputs(num_of_blocks=size)
|
|
inputs, validation = modulation_scheme.generate_random_inputs(num_of_blocks=size)
|
|
|
outputs = optical_channel(inputs).numpy()
|
|
outputs = optical_channel(inputs).numpy()
|
|
|
- modulation_scheme.plot(inputs, outputs, size)
|
|
|
|
|
|
|
+ decisions = modulation_scheme.demodulate(validation, outputs)
|
|
|
|
|
+ #modulation_scheme.plot(inputs, outputs, size)
|
|
|
print("done")
|
|
print("done")
|
|
|
-"""
|
|
|
|
|
- ae_model.view_encoder()
|
|
|
|
|
- ae_model.view_sample_block()
|
|
|
|
|
-"""
|
|
|
|
|
|
|
+
|
|
|
pass
|
|
pass
|