rpropgradient.go 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /*
  2. * MIT License
  3. *
  4. * Copyright (c) 2019 Alexey Edelev <semlanik@gmail.com>
  5. *
  6. * This file is part of NeuralNetwork project https://git.semlanik.org/semlanik/NeuralNetwork
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy of this
  9. * software and associated documentation files (the "Software"), to deal in the Software
  10. * without restriction, including without limitation the rights to use, copy, modify,
  11. * merge, publish, distribute, sublicense, and/or sell copies of the Software, and
  12. * to permit persons to whom the Software is furnished to do so, subject to the following
  13. * conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all copies
  16. * or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
  19. * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
  20. * PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
  21. * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  22. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  23. * DEALINGS IN THE SOFTWARE.
  24. */
  25. package gradients
  26. import (
  27. "math"
  28. neuralnetwork "git.semlanik.org/semlanik/NeuralNetwork/neuralnetwork"
  29. mat "gonum.org/v1/gonum/mat"
  30. )
  31. // Resilient backpropagation
  32. type rPropGradient struct {
  33. gradientsPrev *mat.Dense
  34. gradients *mat.Dense
  35. deltas *mat.Dense
  36. batchSize int
  37. config RPropConfig
  38. }
  39. type RPropConfig struct {
  40. NuPlus float64
  41. NuMinus float64
  42. DeltaMax float64
  43. DeltaMin float64
  44. }
  45. func NewRPropInitializer(config RPropConfig) neuralnetwork.GradientDescentInitializer {
  46. return func(nn *neuralnetwork.NeuralNetwork, layer, gradientType int) interface{} {
  47. if gradientType == neuralnetwork.BiasGradient {
  48. return newRPropGradient(nn.Sizes[layer], 1, config)
  49. }
  50. return newRPropGradient(nn.Sizes[layer], nn.Sizes[layer-1], config)
  51. }
  52. }
  53. func newRPropGradient(r, c int, config RPropConfig) (g *rPropGradient) {
  54. g = &rPropGradient{}
  55. deltas := make([]float64, r*c)
  56. for j, _ := range deltas {
  57. deltas[j] = 0.1
  58. }
  59. g.gradients = mat.NewDense(r, c, nil)
  60. g.gradientsPrev = mat.NewDense(r, c, nil)
  61. g.deltas = mat.NewDense(r, c, deltas)
  62. g.config = config
  63. return
  64. }
  65. func (g *rPropGradient) ApplyDelta(m mat.Matrix) (result *mat.Dense) {
  66. nuPlus := g.config.NuPlus
  67. nuMinus := g.config.NuMinus
  68. deltaMax := g.config.DeltaMax
  69. deltaMin := g.config.DeltaMin
  70. result = &mat.Dense{}
  71. gradient := g.gradients
  72. r, c := gradient.Dims()
  73. dividers := make([]float64, r*c)
  74. for i := range dividers {
  75. dividers[i] = float64(g.batchSize)
  76. }
  77. gradientDivider := mat.NewDense(r, c, dividers)
  78. gradient.DivElem(gradient, gradientDivider)
  79. result.Apply(func(i, j int, v float64) (outV float64) {
  80. gradientSign := g.gradientsPrev.At(i, j) * gradient.At(i, j)
  81. if gradientSign > 0 {
  82. g.deltas.Set(i, j, math.Min(nuPlus*g.deltas.At(i, j), deltaMax))
  83. outV = v - sign(gradient.At(i, j))*g.deltas.At(i, j)
  84. g.gradientsPrev.Set(i, j, gradient.At(i, j))
  85. } else if gradientSign < 0 {
  86. outV = v
  87. g.deltas.Set(i, j, math.Max(nuMinus*g.deltas.At(i, j), deltaMin))
  88. g.gradientsPrev.Set(i, j, 0.0)
  89. } else {
  90. outV = v - sign(gradient.At(i, j))*g.deltas.At(i, j)
  91. g.gradientsPrev.Set(i, j, gradient.At(i, j))
  92. }
  93. return
  94. }, m)
  95. g.batchSize = 0
  96. g.gradients = mat.NewDense(r, c, nil)
  97. return result
  98. }
  99. func (g *rPropGradient) AccumGradients(gradient mat.Matrix, batchSize int) {
  100. g.gradients.Apply(func(i, j int, v float64) float64 {
  101. v += gradient.At(i, j)
  102. return v
  103. }, g.gradients)
  104. g.batchSize += batchSize
  105. }
  106. func (g rPropGradient) Gradients() *mat.Dense {
  107. return g.gradients
  108. }