#### Back Propagation Neural Network

p
```#!/usr/bin/python

"""

A Framework of Back Propagation Neural Network (BP) model

Easy to use:
* add many layers as you want ! ! !
* clearly see how the loss decreasing
Easy to expand:
* more activation functions
* more loss functions
* more optimization method

Author: Stephen Lee
Github : https://github.com/RiptideBo
Date: 2017.11.23

"""

import numpy as np
from matplotlib import pyplot as plt

def sigmoid(x: np.ndarray) -> np.ndarray:
return 1 / (1 + np.exp(-x))

class DenseLayer:
"""
Layers of BP neural network
"""

def __init__(
self, units, activation=None, learning_rate=None, is_input_layer=False
):
"""
common connected layer of bp network
:param units: numbers of neural units
:param activation: activation function
:param learning_rate: learning rate for paras
:param is_input_layer: whether it is input layer or not
"""
self.units = units
self.weight = None
self.bias = None
self.activation = activation
if learning_rate is None:
learning_rate = 0.3
self.learn_rate = learning_rate
self.is_input_layer = is_input_layer

def initializer(self, back_units):
rng = np.random.default_rng()
self.weight = np.asmatrix(rng.normal(0, 0.5, (self.units, back_units)))
self.bias = np.asmatrix(rng.normal(0, 0.5, self.units)).T
if self.activation is None:
self.activation = sigmoid

# activation function may be sigmoid or linear
if self.activation == sigmoid:
gradient_mat = np.dot(self.output, (1 - self.output).T)
else:

def forward_propagation(self, xdata):
self.xdata = xdata
if self.is_input_layer:
# input layer
self.wx_plus_b = xdata
self.output = xdata
return xdata
else:
self.wx_plus_b = np.dot(self.weight, self.xdata) - self.bias
self.output = self.activation(self.wx_plus_b)
return self.output

self.weight = self.weight - self.learn_rate * self.gradient_weight
self.bias = self.bias - self.learn_rate * self.gradient_bias.T
# updates the weights and bias according to learning rate (0.3 if undefined)

class BPNN:
"""
Back Propagation Neural Network model
"""

def __init__(self):
self.layers = []
self.train_mse = []
self.fig_loss = plt.figure()

self.layers.append(layer)

def build(self):
for i, layer in enumerate(self.layers[:]):
if i < 1:
layer.is_input_layer = True
else:
layer.initializer(self.layers[i - 1].units)

def summary(self):
for i, layer in enumerate(self.layers[:]):
print(f"------- layer {i} -------")
print("weight.shape ", np.shape(layer.weight))
print("bias.shape ", np.shape(layer.bias))

def train(self, xdata, ydata, train_round, accuracy):
self.train_round = train_round
self.accuracy = accuracy

self.ax_loss.hlines(self.accuracy, 0, self.train_round * 1.1)

x_shape = np.shape(xdata)
for _ in range(train_round):
all_loss = 0
for row in range(x_shape[0]):
_xdata = np.asmatrix(xdata[row, :]).T
_ydata = np.asmatrix(ydata[row, :]).T

# forward propagation
for layer in self.layers:
_xdata = layer.forward_propagation(_xdata)

all_loss = all_loss + loss

# back propagation: the input_layer does not upgrade
for layer in self.layers[:0:-1]:

mse = all_loss / x_shape[0]
self.train_mse.append(mse)

self.plot_loss()

if mse < self.accuracy:
print("----达到精度----")
return mse
return None

def cal_loss(self, ydata, ydata_):
self.loss = np.sum(np.power((ydata - ydata_), 2))
self.loss_gradient = 2 * (ydata_ - ydata)
# vector (shape is the same as _ydata.shape)

def plot_loss(self):
if self.ax_loss.lines:
self.ax_loss.lines.remove(self.ax_loss.lines[0])
self.ax_loss.plot(self.train_mse, "r-")
plt.ion()
plt.xlabel("step")
plt.ylabel("loss")
plt.show()
plt.pause(0.1)

def example():
rng = np.random.default_rng()
x = rng.normal(size=(10, 10))
y = np.asarray(
[
[0.8, 0.4],
[0.4, 0.3],
[0.34, 0.45],
[0.67, 0.32],
[0.88, 0.67],
[0.78, 0.77],
[0.55, 0.66],
[0.55, 0.43],
[0.54, 0.1],
[0.1, 0.5],
]
)
model = BPNN()
for i in (10, 20, 30, 2):