tutorial.py 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. import numpy as np
  2. import matplotlib.pyplot as plt
  3. from sklearn.datasets import load_iris
  4. # Import PySwarms
  5. import pyswarms as ps
  6. data = load_iris()
  7. # Store the features as X and the labels as y
  8. X = data.data
  9. y = data.target
  10. n_inputs = 4
  11. n_hidden = 20
  12. n_classes = 3
  13. num_samples = 150
  14. def logits_function(p):
  15. """ Calculate roll-back the weights and biases
  16. Inputs
  17. ------
  18. p: np.ndarray
  19. The dimensions should include an unrolled version of the
  20. weights and biases.
  21. Returns
  22. -------
  23. numpy.ndarray of logits for layer 2
  24. """
  25. # Roll-back the weights and biases
  26. W1 = p[0:80].reshape((n_inputs,n_hidden))
  27. b1 = p[80:100].reshape((n_hidden,))
  28. W2 = p[100:160].reshape((n_hidden,n_classes))
  29. b2 = p[160:163].reshape((n_classes,))
  30. # Perform forward propagation
  31. z1 = X.dot(W1) + b1 # Pre-activation in Layer 1
  32. a1 = np.tanh(z1) # Activation in Layer 1
  33. logits = a1.dot(W2) + b2 # Pre-activation in Layer 2
  34. return logits # Logits for Layer 2
  35. # Forward propagation
  36. def forward_prop(params):
  37. """Forward propagation as objective function
  38. This computes for the forward propagation of the neural network, as
  39. well as the loss.
  40. Inputs
  41. ------
  42. params: np.ndarray
  43. The dimensions should include an unrolled version of the
  44. weights and biases.
  45. Returns
  46. -------
  47. float
  48. The computed negative log-likelihood loss given the parameters
  49. """
  50. logits = logits_function(params)
  51. # Compute for the softmax of the logits
  52. exp_scores = np.exp(logits)
  53. probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
  54. # Compute for the negative log likelihood
  55. corect_logprobs = -np.log(probs[range(num_samples), y])
  56. loss = np.sum(corect_logprobs) / num_samples
  57. return loss
  58. def f(x):
  59. """Higher-level method to do forward_prop in the
  60. whole swarm.
  61. Inputs
  62. ------
  63. x: numpy.ndarray of shape (n_particles, dimensions)
  64. The swarm that will perform the search
  65. Returns
  66. -------
  67. numpy.ndarray of shape (n_particles, )
  68. The computed loss for each particle
  69. """
  70. n_particles = x.shape[0]
  71. j = [forward_prop(x[i]) for i in range(n_particles)]
  72. return np.array(j)
  73. # Initialize swarm
  74. options = {'c1': 0.5, 'c2': 0.3, 'w':0.9}
  75. # Call instance of PSO
  76. dimensions = (n_inputs * n_hidden) + (n_hidden * n_classes) + n_hidden + n_classes
  77. optimizer = ps.single.GlobalBestPSO(n_particles=100, dimensions=dimensions, options=options)
  78. # Perform optimization
  79. cost, pos = optimizer.optimize(f, iters=1000)
  80. def predict(pos):
  81. """
  82. Use the trained weights to perform class predictions.
  83. Inputs
  84. ------
  85. pos: numpy.ndarray
  86. Position matrix found by the swarm. Will be rolled
  87. into weights and biases.
  88. """
  89. logits = logits_function(pos)
  90. y_pred = np.argmax(logits, axis=1)
  91. return y_pred
  92. (predict(pos) == y).mean()