import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_iris # Import PySwarms import pyswarms as ps data = load_iris() # Store the features as X and the labels as y X = data.data y = data.target n_inputs = 4 n_hidden = 20 n_classes = 3 num_samples = 150 def logits_function(p): """ Calculate roll-back the weights and biases Inputs ------ p: np.ndarray The dimensions should include an unrolled version of the weights and biases. Returns ------- numpy.ndarray of logits for layer 2 """ # Roll-back the weights and biases W1 = p[0:80].reshape((n_inputs,n_hidden)) b1 = p[80:100].reshape((n_hidden,)) W2 = p[100:160].reshape((n_hidden,n_classes)) b2 = p[160:163].reshape((n_classes,)) # Perform forward propagation z1 = X.dot(W1) + b1 # Pre-activation in Layer 1 a1 = np.tanh(z1) # Activation in Layer 1 logits = a1.dot(W2) + b2 # Pre-activation in Layer 2 return logits # Logits for Layer 2 # Forward propagation def forward_prop(params): """Forward propagation as objective function This computes for the forward propagation of the neural network, as well as the loss. Inputs ------ params: np.ndarray The dimensions should include an unrolled version of the weights and biases. Returns ------- float The computed negative log-likelihood loss given the parameters """ logits = logits_function(params) # Compute for the softmax of the logits exp_scores = np.exp(logits) probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # Compute for the negative log likelihood corect_logprobs = -np.log(probs[range(num_samples), y]) loss = np.sum(corect_logprobs) / num_samples return loss def f(x): """Higher-level method to do forward_prop in the whole swarm. Inputs ------ x: numpy.ndarray of shape (n_particles, dimensions) The swarm that will perform the search Returns ------- numpy.ndarray of shape (n_particles, ) The computed loss for each particle """ n_particles = x.shape[0] j = [forward_prop(x[i]) for i in range(n_particles)] return np.array(j) # Initialize swarm options = {'c1': 0.5, 'c2': 0.3, 'w':0.9} # Call instance of PSO dimensions = (n_inputs * n_hidden) + (n_hidden * n_classes) + n_hidden + n_classes optimizer = ps.single.GlobalBestPSO(n_particles=100, dimensions=dimensions, options=options) # Perform optimization cost, pos = optimizer.optimize(f, iters=100) def predict(pos): """ Use the trained weights to perform class predictions. Inputs ------ pos: numpy.ndarray Position matrix found by the swarm. Will be rolled into weights and biases. """ logits = logits_function(pos) y_pred = np.argmax(logits, axis=1) return y_pred print((predict(pos) == y).mean())