• 欢迎访问搞代码网站,推荐使用最新版火狐浏览器和Chrome浏览器访问本网站!
  • 如果您觉得本站非常有看点,那么赶紧使用Ctrl+D 收藏搞代码吧

Python利用神经网络解决非线性回归问题实例详解

python 搞代码 4年前 (2022-01-09) 64次浏览 已收录 0个评论

本文实例讲述了Python利用神经网络解决非线性回归问题。分享给大家供大家参考,具体如下:

问题描述

现在我们通常使用神经网络进行分类,但是有时我们也会进行回归分析。
如本文的问题:
我们知道一个生物体内的原始有毒物质的量,然后对这个生物体进行治疗,向其体内注射一个物质,过一段时间后重新测量这个生物体内有毒物质量的多少。
因此,问题中有两个输入,都是标量数据,分别为有毒物质的量和注射物质的量,一个输出,也就是注射治疗物质后一段时间生物体的有毒物质的量。
数据如下图:

其中Dose of Mycotoxins 就是有毒物质,Dose of QCT就是治疗的药物。
其中蓝色底纹的数字就是输出结果。

一些说明

由于本文是进行回归分析,所以最后一层不进行激活,而直接输出。
本文程序使用sigmoid函数进行激活。
本文程序要求程序有一定的可重复性,隐含层可以指定。

另外,注意到
本文将使用数据预处理,也就是将数据减去均值再除以方差,否则使用sigmoid将会导致梯度消失。
因为数据比较大,比如200,这时输入200,当sigmoid函数的梯度就是接近于0了。
与此同时,我们在每一次激活前都进行BN处理,也就是batch normalize,中文可以翻译成规范化。
否则也会导致梯度消失的问题。与预处理情况相同。

程序

程序包括两部分,一部分是模型框架,一个是训练模型

第一部分:

# coding=utf-8
import numpy as np
def basic_forard(x, w, b):
  x = x.reshape(x.shape[0], -1)
  out = np.dot(x, w) + b
  cache = (x, w, b)
  return out, cache
def basic_backward(dout, cache):
  x, w, b = cache
  dout = np.array(dout)
  dx = np.dot(dout, w.T)
  # dx = np.reshape(dx, x.shape)
  # x = x.reshape(x.shape[0], -1)
  dw = np.dot(x.T, dout)
  db = np.reshape(np.sum(dout, axis=0), b.shape)
  return dx, dw, db
def batchnorm_forward(x, gamma, beta, bn_param):
  mode = bn_param['mode']
  eps = bn_param.get('eps', 1e-5)
  momentum = bn_param.get('momentum', 0.9)
  N, D = x.shape
  running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
  running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
  out, cache = None, None
  if mode == 'train':
    sample_mean = np.mean(x, axis=0)
    sample_var = np.var(x, axis=0)
    x_hat = (x - sample_mean) / (np.sqrt(sample_var + eps))
    out = gamma * x_hat + beta
    cache = (gamma, x, sample_mean, sample_var, eps, x_hat)
    running_mean = momentum * running_mean + (1 - momentum) * sample_mean
    running_var = momentum * running_var + (1 - momentum) * sample_var
  elif mode == 'test':
    scale = gamma / (np.sqrt(running_var + eps))
    out = x * scale + (beta - running_mean * scale)
  else:
    raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
  bn_param['running_mean'] = running_mean
  bn_param['running_var'] = running_var
  return out, cache
def batchnorm_backward(dout, cache):
  gamma, x, u_b, sigma_squared_b, eps, x_hat = cache
  N = x.shape[0]
  dx_1 = gamma * dout
  dx_2_b = np.sum((x - u_b) * dx_1, axis=0)
  dx_2_a = ((sigma_squared_b + eps) ** -0.5) * dx_1
  dx_3_b = (-0.5) * ((sigma_squared_b + eps) ** -1.5) * dx_2_b
  dx_4_b = dx_3_b * 1
  dx_5_b = np.ones_like(x) / N * dx_4_b
  dx_6_b = 2 * (x - u_b) * dx_5_b
  dx_7_a = dx_6_b * 1 + dx_2_a * 1
  dx_7_b = dx_6_b * 1 + dx_2_a * 1
  dx_8_b = -1 * np.sum(dx_7_b, axis=0)
  dx_9_b = np.ones_like(x) / N * dx_8_b
  dx_10 = dx_9_b + dx_7_a
  dgamma = np.sum(x_hat * dout, axis=0)
  dbeta = np.sum(dout, axis=0)
  dx = dx_10
  return dx, dgamma, dbeta
# def relu_forward(x):
#   out = None
#   out = np.maximum(0,x)
#   cache = x
#   return out, cache
#
#
# def relu_backward(dout, cache):
#   dx, x = None, cache
#   dx = (x >= 0) * dout
#   return dx
def sigmoid_forward(x):
  x = x.reshape(x.shape[0], -1)
  out = 1 / (1 + np.exp(-1 * x))
  cache = out
  return out, cache
def sigmoid_backward(dout, cache):
  out = cache
  dx = out * (1 - out)
  dx *= dout
  return dx
def basic_sigmoid_forward(x, w, b):
  basic_out, basic_cache = basic_forard(x, w, b)
  sigmoid_out, sigmoid_cache = sigmoid_forward(basic_out)
  cache = (basic_cache, sigmoid_cache)
  return sigmoid_out, cache
# def basic_relu_forward(x, w, b):
#   basic_out, basic_cache = basic_forard(x, w, b)
#   relu_out, relu_cache = relu_forward(basic_out)
#   cache = (basic_cache, relu_cache)
#
#   return relu_out, cache
def basic_sigmoid_backward(dout, cache):
  basic_cache, sigmoid_cache = cache
  dx_sigmoid = sigmoid_backward(dout, sigmoid_cache)
  dx, dw, db = basic_backward(dx_sigmoid, basic_cache)
  return dx, dw, db
# def basic_relu_backward(dout, cache):
#   basic_cache, relu_cache = cache
#   dx_relu = relu_backward(dout, relu_cache)
#   dx, dw, db = basic_backward(dx_relu, basic_cache)
#
#   return dx, dw, db
def mean_square_error(x, y):
  x = np.ravel(x)
  loss = 0.5 * np.sum(np.square(y - x)) / x.shape[0]
  dx = (x - y).reshape(-1, 1)
  return loss, dx
class muliti_layer_net(object):
  def __init__(self, hidden_dim, input_dim=2, num_classes=2, weight_scale=0.01, dtype=np.float32, seed=None, reg=0.0, use_batchnorm=True):
    self.num_layers = 1 + len(hidden_dim)
    self.dtype = dtype
    self.reg = reg
    self.params = {}
    self.weight_scale = weight_scale
    self.use_batchnorm = use_batchnorm
    # init all parameters
    layers_dims = [input_dim] + hidden_dim + [num_classes]
    for i in range(self.num_layers):
      self.params['W' + str(i + 1)] = np.random.randn(layers_dims[i], layers_dims[i + 1]) * self.weight_scale
      self.params['b' + str(i + 1)] = np.zeros((1, layers_dims[i + 1]))
      if self.use_batchnorm and i < (self.num_layers - 1):
        self.params['gamma' + str(i + 1)] = np.ones((1, layers_dims[i + 1]))
        self.params['beta' + str(i + 1)] = np.zeros((1, layers_dims[i + 1]))
    self.bn_params = [] # list
    if self.use_batchnorm:
      self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]
  def loss(self, X, y=None):
    X = X.astype(self.dtype)
    mode = 'test' if y is None else 'train'
    # compute the forward data and c<mark>本文来源gaodaimacom搞#^代%!码&网(</mark>ache
    basic_sigmoid_cache = {}
    layer_out = {}
    layer_out[0] = X
    out_basic_forward, cache_basic_forward = {}, {}
    out_bn, cache_bn = {}, {}
    out_sigmoid_forward, cache_sigmoid_forward = {}, {}
    for lay in range(self.num_layers - 1):
      # print('lay: %f' % lay)
      W = self.params['W' + str(lay + 1)]
      b = self.params['b' + str(lay + 1)]
      if self.use_batchnorm:
        gamma, beta = self.params['gamma' + str(lay + 1)], self.params['beta' + str(lay + 1)]
        out_basic_forward[lay], cache_basic_forward[lay] = basic_forard(np.array(layer_out[lay]), W, b)
        out_bn[lay], cache_bn[lay] = batchnorm_forward(np.array(out_basic_forward[lay]), gamma, beta, self.bn_params[lay])
        layer_out[lay + 1], cache_sigmoid_forward[lay] = sigmoid_forward(np.array(out_bn[lay]))
         # = out_sigmoid_forward[lay]
      else:
        layer_out[lay+1], basic_sigmoid_cache[lay] = basic_sigmoid_forward(layer_out[lay], W, b)
    score, basic_cache = basic_forard(layer_out[self.num_layers-1], self.params['W' + str(self.num_layers)], self.params['b' + str(self.num_layers)])
    # print('Congratulations: Loss is computed successfully!')
    if mode == 'test':
      return score
    # compute the gradient
    grads = {}
    loss, dscore = mean_square_error(score, y)
    dx, dw, db = basic_backward(dscore, basic_cache)
    grads['W' + str(self.num_layers)] = dw + self.reg * self.params['W' + str(self.num_layers)]
    grads['b' + str(self.num_layers)] = db
    loss += 0.5 * self.reg * np.sum(self.params['W' + str(self.num_layers)] * self.params['b' + str(self.num_layers)])
    dbn, dsigmoid = {}, {}
    for index in range(self.num_layers - 1):
      lay = self.num_layers - 1 - index - 1
      loss += 0.5 * self.reg * np.sum(self.params['W' + str(lay + 1)] * self.params['b' + str(lay + 1)])
      if self.use_batchnorm:
        dsigmoid[lay] = sigmoid_backward(dx, cache_sigmoid_forward[lay])
        dbn[lay], grads['gamma' + str(lay + 1)], grads['beta' + str(lay + 1)] = batchnorm_backward(dsigmoid[lay], cache_bn[lay])
        dx, grads['W' + str(lay + 1)], grads['b' + str(lay + 1)] = basic_backward(dbn[lay], cache_basic_forward[lay])
      else:
        dx, dw, db = basic_sigmoid_backward(dx, basic_sigmoid_cache[lay])
    for lay in range(self.num_layers):
      grads['W' + str(lay + 1)] += self.reg * self.params['W' + str(lay + 1)]
    return loss, grads
def sgd_momentum(w, dw, config=None):
  if config is None: config = {}
  config.setdefault('learning_rate', 1e-2)
  config.setdefault('momentum', 0.9)
  v = config.get('velocity', np.zeros_like(w))
  v = config['momentum'] * v - config['learning_rate'] * dw
  next_w = w + v
  config['velocity'] = v
  return next_w, config
class Solver(object):
  def __init__(self, model, data, **kwargs):
    self.model = model
    self.X_train = data['X_train']
    self.y_train = data['y_train']
    self.X_val = data['X_val']
    self.y_val = data['y_val']
    self.update_rule = kwargs.pop('update_rule', 'sgd_momentum')
    self.optim_config = kwargs.pop('optim_config', {})
    self.lr_decay = kwargs.pop('lr_decay', 1.0)
    self.batch_size = kwargs.pop('batch_size', 100)
    self.num_epochs = kwargs.pop('num_epochs', 10)
    self.weight_scale = kwargs.pop('weight_scale', 0.01)
    self.print_every = kwargs.pop('print_every', 10)
    self.verbose = kwargs.pop('verbose', True)
    if len(kwargs) > 0:
      extra = ', '.join('"%s"' % k for k in kwargs.keys())
      raise ValueError('Unrecognized argements %s' % extra)
    self._reset()
  def _reset(self):
    self.epoch = 100
    self.best_val_acc = 0
    self.best_params = {}
    self.loss_history = []
    self.train_acc_history = []
    self.val_acc_history = []
    self.optim_configs = {}
    for p in self.model.params:
      d = {k: v for k, v in self.optim_config.items()}
      self.optim_configs[p] = d
  def _step(self):
    loss, grads = self.model.loss(self.X_train, self.y_train)
    self.loss_history.append(loss)
    for p, w in self.model.params.items():
      dw = grads[p]
      config = self.optim_configs[p]
      next_w, next_config = sgd_momentum(w, dw, config)
      self.model.params[p] = next_w
      self.optim_configs[p] = next_config
    return loss
  def train(self):
    min_loss = 100000000
    num_train = self.X_train.shape[0]
    iterations_per_epoch = max(num_train / self.batch_size, 1)
    num_iterations = self.num_epochs * iterations_per_epoch
    for t in range(int(num_iterations)):
      loss = self._step()
      if self.verbose:
#         print(self.loss_history[-1])
        pass
      if loss < min_loss:
        min_loss = loss
        for k, v in self.model.params.items():
          self.best_params[k] = v.copy()
    self.model.params = self.best_params


搞代码网(gaodaima.com)提供的所有资源部分来自互联网,如果有侵犯您的版权或其他权益,请说明详细缘由并提供版权或权益证明然后发送到邮箱[email protected],我们会在看到邮件的第一时间内为您处理,或直接联系QQ:872152909。本网站采用BY-NC-SA协议进行授权
转载请注明原文链接:Python利用神经网络解决非线性回归问题实例详解
喜欢 (0)
[搞代码]
分享 (0)
发表我的评论
取消评论

表情 贴图 加粗 删除线 居中 斜体 签到

Hi,您需要填写昵称和邮箱!

  • 昵称 (必填)
  • 邮箱 (必填)
  • 网址