|
@@ -45,9 +45,9 @@ type plusRPropGradient struct {
|
|
func NewPlusRPropInitializer(config RPropConfig) neuralnetwork.GradientDescentInitializer {
|
|
func NewPlusRPropInitializer(config RPropConfig) neuralnetwork.GradientDescentInitializer {
|
|
return func(nn *neuralnetwork.NeuralNetwork, layer, gradientType int) interface{} {
|
|
return func(nn *neuralnetwork.NeuralNetwork, layer, gradientType int) interface{} {
|
|
if gradientType == neuralnetwork.BiasGradient {
|
|
if gradientType == neuralnetwork.BiasGradient {
|
|
- return newRPropGradient(nn.Sizes[layer], 1, config)
|
|
|
|
|
|
+ return newPlusRPropGradient(nn.Sizes[layer], 1, config)
|
|
}
|
|
}
|
|
- return newRPropGradient(nn.Sizes[layer], nn.Sizes[layer-1], config)
|
|
|
|
|
|
+ return newPlusRPropGradient(nn.Sizes[layer], nn.Sizes[layer-1], config)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -110,12 +110,12 @@ func (g *plusRPropGradient) ApplyDelta(m mat.Matrix) (result *mat.Dense) {
|
|
return result
|
|
return result
|
|
}
|
|
}
|
|
|
|
|
|
-func (g *plusRPropGradient) AccumGradients(gradient mat.Matrix) {
|
|
|
|
|
|
+func (g *plusRPropGradient) AccumGradients(gradient mat.Matrix, batchSize int) {
|
|
g.gradients.Apply(func(i, j int, v float64) float64 {
|
|
g.gradients.Apply(func(i, j int, v float64) float64 {
|
|
v += gradient.At(i, j)
|
|
v += gradient.At(i, j)
|
|
return v
|
|
return v
|
|
}, g.gradients)
|
|
}, g.gradients)
|
|
- g.batchSize++
|
|
|
|
|
|
+ g.batchSize += batchSize
|
|
}
|
|
}
|
|
|
|
|
|
func (g plusRPropGradient) Gradients() *mat.Dense {
|
|
func (g plusRPropGradient) Gradients() *mat.Dense {
|