123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130 |
- /*
- * MIT License
- *
- * Copyright (c) 2019 Alexey Edelev <semlanik@gmail.com>
- *
- * This file is part of NeuralNetwork project https://git.semlanik.org/semlanik/NeuralNetwork
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy of this
- * software and associated documentation files (the "Software"), to deal in the Software
- * without restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software, and
- * to permit persons to whom the Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be included in all copies
- * or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
- * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
- * PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
- * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
- package gradients
- import (
- "math"
- neuralnetwork "git.semlanik.org/semlanik/NeuralNetwork/neuralnetwork"
- mat "gonum.org/v1/gonum/mat"
- )
- // Resilient backpropagation
- type rPropGradient struct {
- gradientsPrev *mat.Dense
- gradients *mat.Dense
- deltas *mat.Dense
- batchSize int
- config RPropConfig
- }
- type RPropConfig struct {
- NuPlus float64
- NuMinus float64
- DeltaMax float64
- DeltaMin float64
- }
- func NewRPropInitializer(config RPropConfig) neuralnetwork.GradientDescentInitializer {
- return func(nn *neuralnetwork.NeuralNetwork, layer, gradientType int) interface{} {
- if gradientType == neuralnetwork.BiasGradient {
- return newRPropGradient(nn.Sizes[layer], 1, config)
- }
- return newRPropGradient(nn.Sizes[layer], nn.Sizes[layer-1], config)
- }
- }
- func newRPropGradient(r, c int, config RPropConfig) (g *rPropGradient) {
- g = &rPropGradient{}
- deltas := make([]float64, r*c)
- for j, _ := range deltas {
- deltas[j] = 0.1
- }
- g.gradients = mat.NewDense(r, c, nil)
- g.gradientsPrev = mat.NewDense(r, c, nil)
- g.deltas = mat.NewDense(r, c, deltas)
- g.config = config
- return
- }
- func (g *rPropGradient) ApplyDelta(m mat.Matrix) (result *mat.Dense) {
- nuPlus := g.config.NuPlus
- nuMinus := g.config.NuMinus
- deltaMax := g.config.DeltaMax
- deltaMin := g.config.DeltaMin
- result = &mat.Dense{}
- gradient := g.gradients
- r, c := gradient.Dims()
- dividers := make([]float64, r*c)
- for i := range dividers {
- dividers[i] = float64(g.batchSize)
- }
- gradientDivider := mat.NewDense(r, c, dividers)
- gradient.DivElem(gradient, gradientDivider)
- result.Apply(func(i, j int, v float64) (outV float64) {
- gradientSign := g.gradientsPrev.At(i, j) * gradient.At(i, j)
- if gradientSign > 0 {
- g.deltas.Set(i, j, math.Min(nuPlus*g.deltas.At(i, j), deltaMax))
- outV = v - sign(gradient.At(i, j))*g.deltas.At(i, j)
- g.gradientsPrev.Set(i, j, gradient.At(i, j))
- } else if gradientSign < 0 {
- outV = v
- g.deltas.Set(i, j, math.Max(nuMinus*g.deltas.At(i, j), deltaMin))
- g.gradientsPrev.Set(i, j, 0.0)
- } else {
- outV = v - sign(gradient.At(i, j))*g.deltas.At(i, j)
- g.gradientsPrev.Set(i, j, gradient.At(i, j))
- }
- return
- }, m)
- g.batchSize = 0
- g.gradients = mat.NewDense(r, c, nil)
- return result
- }
- func (g *rPropGradient) AccumGradients(gradient mat.Matrix, batchSize int) {
- g.gradients.Apply(func(i, j int, v float64) float64 {
- v += gradient.At(i, j)
- return v
- }, g.gradients)
- g.batchSize += batchSize
- }
- func (g rPropGradient) Gradients() *mat.Dense {
- return g.gradients
- }
|