123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128 |
- package neuralnetworkbase
- import (
- "math"
- mat "gonum.org/v1/gonum/mat"
- )
- type RPropGradient struct {
- GradientsPrev *mat.Dense
- Gradients *mat.Dense
- Deltas *mat.Dense
- batchSize int
- }
- func NewRPropGradient(r, c int) (g *RPropGradient) {
- g = &RPropGradient{}
- deltas := make([]float64, r*c)
- for j, _ := range deltas {
- deltas[j] = 0.1
- }
- g.Gradients = mat.NewDense(r, c, nil)
- g.GradientsPrev = mat.NewDense(r, c, nil)
- g.Deltas = mat.NewDense(r, c, deltas)
- return
- }
- func (g *RPropGradient) ApplyDelta(m mat.Matrix, _ mat.Matrix) (result *mat.Dense) {
-
- nuPlus := 1.2
- nuMinus := 0.5
- deltaMax := 50.0
- deltaMin := 0.000001
- result = &mat.Dense{}
- gradient := g.Gradients
- r, c := gradient.Dims()
- dividers := make([]float64, r*c)
- for i := range dividers {
- dividers[i] = float64(g.batchSize)
- }
- gradientDivider := mat.NewDense(r, c, dividers)
- gradient.DivElem(gradient, gradientDivider)
- result.Apply(func(i, j int, v float64) (outV float64) {
- gradientSign := g.GradientsPrev.At(i, j) * gradient.At(i, j)
- if gradientSign > 0 {
- g.Deltas.Set(i, j, math.Min(nuPlus*g.Deltas.At(i, j), deltaMax))
- outV = v - sign(gradient.At(i, j))*g.Deltas.At(i, j)
- g.GradientsPrev.Set(i, j, gradient.At(i, j))
- } else if gradientSign < 0 {
- outV = v + sign(g.GradientsPrev.At(i, j))*g.Deltas.At(i, j)
- g.Deltas.Set(i, j, math.Max(nuMinus*g.Deltas.At(i, j), deltaMin))
- g.GradientsPrev.Set(i, j, 0.0)
- } else {
- outV = v - sign(gradient.At(i, j))*g.Deltas.At(i, j)
- g.GradientsPrev.Set(i, j, gradient.At(i, j))
- }
- return
- }, m)
- g.batchSize = 0
- return result
- }
- func (g *RPropGradient) AccumGradients(gradient mat.Matrix) {
- g.Gradients.Apply(func(i, j int, v float64) float64 {
- v += gradient.At(i, j)
- return v
- }, g.Gradients)
- g.batchSize++
- }
- type BackPropGradient struct {
- alpha float64
- }
- func (g *BackPropGradient) ApplyDelta(m mat.Matrix, gradient mat.Matrix) (result *mat.Dense) {
-
-
-
- scaled := &mat.Dense{}
- result = &mat.Dense{}
-
- scaled.Scale(g.alpha, gradient)
-
- result.Sub(m, scaled)
- return result
- }
- func (g *BackPropGradient) AccumGradients(gradient mat.Matrix) {
- }
|