|
|
@@ -143,10 +143,10 @@ def view_encoder(encoder, N, samples=1000):
|
|
|
pass
|
|
|
|
|
|
def test_batch_sizes(autoencoder, sizes):
|
|
|
- accuracy = []
|
|
|
+ cost = []
|
|
|
for batch_size in sizes:
|
|
|
- autoencoder.fit(x_train_ho, x_train_ho,
|
|
|
- epochs=1,
|
|
|
+ history = autoencoder.fit(x_train_ho, x_train_ho,
|
|
|
+ epochs=4,
|
|
|
batch_size=batch_size,
|
|
|
shuffle=False,
|
|
|
validation_data=(x_test_ho, x_test_ho))
|
|
|
@@ -156,19 +156,18 @@ def test_batch_sizes(autoencoder, sizes):
|
|
|
|
|
|
result = misc.int2bit_array(decoded_data.argmax(axis=1), n)
|
|
|
print("Accuracy: %.4f" % accuracy_score(x_test_array, result.reshape(-1, )))
|
|
|
- accuracy.append(accuracy_score(x_test_array, result.reshape(-1, )))
|
|
|
- plt.plot(sizes,accuracy)
|
|
|
- plt.xscale('log')
|
|
|
+ cost.append(history.history["loss"][-1])
|
|
|
+ plt.plot(sizes,cost)
|
|
|
plt.grid()
|
|
|
plt.xlabel('batch size')
|
|
|
- plt.ylabel('Accuracy')
|
|
|
+ plt.ylabel('Cost')
|
|
|
plt.legend()
|
|
|
plt.show()
|
|
|
|
|
|
def test_epoch_sizes(autoencoder, sizes):
|
|
|
- accuracy = []
|
|
|
+ cost = []
|
|
|
for epoch_size in sizes:
|
|
|
- autoencoder.fit(x_train_ho, x_train_ho,
|
|
|
+ history = autoencoder.fit(x_train_ho, x_train_ho,
|
|
|
epochs=epoch_size,
|
|
|
shuffle=False,
|
|
|
validation_data=(x_test_ho, x_test_ho))
|
|
|
@@ -178,12 +177,12 @@ def test_epoch_sizes(autoencoder, sizes):
|
|
|
|
|
|
result = misc.int2bit_array(decoded_data.argmax(axis=1), n)
|
|
|
print("Accuracy: %.4f" % accuracy_score(x_test_array, result.reshape(-1, )))
|
|
|
- accuracy.append(accuracy_score(x_test_array, result.reshape(-1, )))
|
|
|
- plt.plot(sizes,accuracy)
|
|
|
+ cost.append(history["loss"][-1])
|
|
|
+ plt.plot(sizes,cost)
|
|
|
plt.xscale('log')
|
|
|
plt.grid()
|
|
|
plt.xlabel('epoch size')
|
|
|
- plt.ylabel('Accuracy')
|
|
|
+ plt.ylabel('Cost')
|
|
|
plt.legend()
|
|
|
plt.show()
|
|
|
|
|
|
@@ -331,20 +330,21 @@ if __name__ == '__main__':
|
|
|
|
|
|
pass
|
|
|
|
|
|
-"""
|
|
|
+
|
|
|
autoencoder = Autoencoder(n, -8)
|
|
|
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
|
|
|
|
|
|
- batch_sizes = [10,20,30,40, 100,200,300,400,500, 1000,2000,3000,4000,5000]
|
|
|
+ batch_sizes = [i for i in range(5, 100, 5)]
|
|
|
epoch_sizes = [1, 10, 20, 50, 100, 200, 500]
|
|
|
|
|
|
- test_epoch_sizes(autoencoder, epoch_sizes)
|
|
|
-
|
|
|
+ test_batch_sizes(autoencoder, batch_sizes)
|
|
|
+"""
|
|
|
pass
|
|
|
|
|
|
-"""
|
|
|
- autoencoder.fit(x_train_ho, x_train_ho,
|
|
|
- epochs=1,
|
|
|
+ autoencoder = Autoencoder(n, -8)
|
|
|
+ autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
|
|
|
+ history = autoencoder.fit(x_train_ho, x_train_ho,
|
|
|
+ epochs=100,
|
|
|
shuffle=False,
|
|
|
validation_data=(x_test_ho, x_test_ho))
|
|
|
|
|
|
@@ -353,6 +353,13 @@ if __name__ == '__main__':
|
|
|
|
|
|
result = misc.int2bit_array(decoded_data.argmax(axis=1), n)
|
|
|
print("Accuracy: %.4f" % accuracy_score(x_test_array, result.reshape(-1, )))
|
|
|
- view_encoder(autoencoder.encoder, n)
|
|
|
-"""
|
|
|
+ # view_encoder(autoencoder.encoder, n)
|
|
|
+ plt.plot(history.history['loss'])
|
|
|
+ plt.plot(history.history['val_loss'])
|
|
|
+ plt.title('Cost vs Number of Epochs use in Training Neural Network')
|
|
|
+ plt.ylabel('Cost')
|
|
|
+ plt.xlabel('Number of Epochs')
|
|
|
+ plt.legend(['train', 'test'], loc='upper left')
|
|
|
+ plt.show()
|
|
|
+
|
|
|
|