Forráskód Böngészése

Move gradients for separate package

Alexey Edelev 5 éve
szülő
commit
d44076d372

+ 2 - 1
handwriting/handwriting/handwriting.go

@@ -32,6 +32,7 @@ import (
 	"net"
 
 	neuralnetwork "../../neuralnetwork/neuralnetwork"
+	gradients "../../neuralnetwork/neuralnetwork/gradients"
 	training "../../neuralnetwork/training"
 	"gonum.org/v1/gonum/mat"
 	grpc "google.golang.org/grpc"
@@ -43,7 +44,7 @@ type HandwritingService struct {
 
 func NewHandwritingService() (hws *HandwritingService) {
 	hws = &HandwritingService{}
-	hws.nn, _ = neuralnetwork.NewNeuralNetwork([]int{784, 300, 10}, neuralnetwork.NewRPropInitializer(neuralnetwork.RPropConfig{
+	hws.nn, _ = neuralnetwork.NewNeuralNetwork([]int{784, 300, 10}, gradients.NewRPropInitializer(gradients.RPropConfig{
 		NuPlus:   1.2,
 		NuMinus:  0.5,
 		DeltaMax: 50.0,

+ 2 - 1
neuralnetwork/main.go

@@ -27,13 +27,14 @@ package main
 
 import (
 	"./neuralnetwork"
+	"./neuralnetwork/gradients"
 	"./remotecontrol"
 )
 
 func main() {
 	rc := remotecontrol.NewRemoteControl()
 	sizes := []int{13, 8, 12, 3}
-	nn, _ := neuralnetwork.NewNeuralNetwork(sizes, neuralnetwork.NewRPropInitializer(neuralnetwork.RPropConfig{
+	nn, _ := neuralnetwork.NewNeuralNetwork(sizes, gradients.NewRPropInitializer(gradients.RPropConfig{
 		NuPlus:   1.2,
 		NuMinus:  0.5,
 		DeltaMax: 50.0,

+ 4 - 3
neuralnetwork/neuralnetwork/backpropgradient.go → neuralnetwork/neuralnetwork/gradients/backpropgradient.go

@@ -23,9 +23,10 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-package neuralnetwork
+package gradients
 
 import (
+	neuralnetwork ".."
 	mat "gonum.org/v1/gonum/mat"
 )
 
@@ -34,8 +35,8 @@ type backPropGradient struct {
 	alpha float64
 }
 
-func NewBackPropInitializer(nu float64) GradientDescentInitializer {
-	return func(nn *NeuralNetwork, layer, gradientType int) interface{} {
+func NewBackPropInitializer(nu float64) neuralnetwork.GradientDescentInitializer {
+	return func(nn *neuralnetwork.NeuralNetwork, layer, gradientType int) interface{} {
 		return newBackPropGradient(nu / float64(nn.Sizes[0]))
 	}
 }

+ 38 - 0
neuralnetwork/neuralnetwork/gradients/common.go

@@ -0,0 +1,38 @@
+/*
+ * MIT License
+ *
+ * Copyright (c) 2019 Alexey Edelev <semlanik@gmail.com>
+ *
+ * This file is part of NeuralNetwork project https://git.semlanik.org/semlanik/NeuralNetwork
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this
+ * software and associated documentation files (the "Software"), to deal in the Software
+ * without restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies
+ * or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+ * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
+ * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+package gradients
+
+import (
+	"math"
+)
+
+func sign(v float64) float64 {
+	if v == 0 {
+		return 0
+	}
+	// fmt.Printf("%v / math.Abs(%v) = %v\n", v, math.Abs(v), v/math.Abs(v))
+	return v / math.Abs(v)
+}

+ 5 - 4
neuralnetwork/neuralnetwork/plusrpropgradient.go → neuralnetwork/neuralnetwork/gradients/plusrpropgradient.go

@@ -23,11 +23,12 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-package neuralnetwork
+package gradients
 
 import (
 	"math"
 
+	neuralnetwork ".."
 	mat "gonum.org/v1/gonum/mat"
 )
 
@@ -41,9 +42,9 @@ type plusRPropGradient struct {
 	config        RPropConfig
 }
 
-func NewPlusRPropInitializer(config RPropConfig) GradientDescentInitializer {
-	return func(nn *NeuralNetwork, layer, gradientType int) interface{} {
-		if gradientType == BiasGradient {
+func NewPlusRPropInitializer(config RPropConfig) neuralnetwork.GradientDescentInitializer {
+	return func(nn *neuralnetwork.NeuralNetwork, layer, gradientType int) interface{} {
+		if gradientType == neuralnetwork.BiasGradient {
 			return newRPropGradient(nn.Sizes[layer], 1, config)
 		}
 		return newRPropGradient(nn.Sizes[layer], nn.Sizes[layer-1], config)

+ 5 - 4
neuralnetwork/neuralnetwork/rpropgradient.go → neuralnetwork/neuralnetwork/gradients/rpropgradient.go

@@ -23,11 +23,12 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-package neuralnetwork
+package gradients
 
 import (
 	"math"
 
+	neuralnetwork ".."
 	mat "gonum.org/v1/gonum/mat"
 )
 
@@ -48,9 +49,9 @@ type RPropConfig struct {
 	DeltaMin float64
 }
 
-func NewRPropInitializer(config RPropConfig) GradientDescentInitializer {
-	return func(nn *NeuralNetwork, layer, gradientType int) interface{} {
-		if gradientType == BiasGradient {
+func NewRPropInitializer(config RPropConfig) neuralnetwork.GradientDescentInitializer {
+	return func(nn *neuralnetwork.NeuralNetwork, layer, gradientType int) interface{} {
+		if gradientType == neuralnetwork.BiasGradient {
 			return newRPropGradient(nn.Sizes[layer], 1, config)
 		}
 		return newRPropGradient(nn.Sizes[layer], nn.Sizes[layer-1], config)

+ 0 - 8
neuralnetwork/neuralnetwork/mathcommon.go

@@ -61,11 +61,3 @@ func sigmoidPrime(x float64) float64 {
 	sig := sigmoid(x)
 	return sig * (1 - sig)
 }
-
-func sign(v float64) float64 {
-	if v == 0 {
-		return 0
-	}
-	// fmt.Printf("%v / math.Abs(%v) = %v\n", v, math.Abs(v), v/math.Abs(v))
-	return v / math.Abs(v)
-}

+ 7 - 7
neuralnetwork/neuralnetwork/neuralnetwork_test.go

@@ -7,39 +7,39 @@ import (
 )
 
 func TestNewNeuralNetwork(t *testing.T) {
-	nn, err := NewNeuralNetwork([]int{}, NewBackPropInitializer(0.1))
+	nn, err := NewNeuralNetwork([]int{}, nil)
 	if nn != nil || err == nil {
 		t.Error("nn initialized, but shouldn't ", err)
 	}
 
-	nn, err = NewNeuralNetwork([]int{0, 0, 0, 0}, NewBackPropInitializer(0.1))
+	nn, err = NewNeuralNetwork([]int{0, 0, 0, 0}, nil)
 	if nn != nil || err == nil {
 		t.Error("nn initialized, but shouldn't ", err)
 	}
 
-	nn, err = NewNeuralNetwork([]int{1, 1, 1, 1}, NewBackPropInitializer(0.1))
+	nn, err = NewNeuralNetwork([]int{1, 1, 1, 1}, nil)
 	if nn != nil || err == nil {
 		t.Error("nn initialized, but shouldn't ", err)
 	}
 
-	nn, err = NewNeuralNetwork([]int{5, 5}, NewBackPropInitializer(0.1))
+	nn, err = NewNeuralNetwork([]int{5, 5}, nil)
 	if nn != nil || err == nil {
 		t.Error("nn initialized, but shouldn't ", err)
 	}
 
-	nn, err = NewNeuralNetwork([]int{5, 1, 5, 5}, NewBackPropInitializer(0.1))
+	nn, err = NewNeuralNetwork([]int{5, 1, 5, 5}, nil)
 	if nn != nil || err == nil {
 		t.Error("nn initialized, but shouldn't ", err)
 	}
 
-	nn, err = NewNeuralNetwork([]int{5, 4, 4, 5}, NewBackPropInitializer(0.1))
+	nn, err = NewNeuralNetwork([]int{5, 4, 4, 5}, nil)
 	if nn == nil || err != nil {
 		t.Error("nn is not initialized, but should be ", err)
 	}
 }
 
 func TestNeuralNetworkPredict(t *testing.T) {
-	nn, _ := NewNeuralNetwork([]int{3, 4, 4, 2}, NewBackPropInitializer(0.1))
+	nn, _ := NewNeuralNetwork([]int{3, 4, 4, 2}, nil)
 
 	aIn := &mat.Dense{}
 	index, max := nn.Predict(aIn)