Browse Source

Start multithread adoptation

Alexey Edelev 5 years ago
parent
commit
d1718415d0

+ 2 - 2
neuralnetwork/main.go

@@ -11,7 +11,7 @@ import (
 
 func main() {
 	sizes := []int{13, 14, 14, 3}
-	nn, _ := neuralnetwork.NewNeuralNetwork(sizes, 200, neuralnetwork.NewPlusRPropInitializer(neuralnetwork.RPropConfig{
+	nn, _ := neuralnetwork.NewNeuralNetwork(sizes, 130, neuralnetwork.NewRPropInitializer(neuralnetwork.RPropConfig{
 		NuPlus:   1.2,
 		NuMinus:  0.8,
 		DeltaMax: 50.0,
@@ -29,7 +29,7 @@ func main() {
 	// 	fmt.Printf("A before:\n%v\n\n", mat.Formatted(nn.A[i], mat.Prefix(""), mat.Excerpt(0)))
 	// }
 
-	teacher := teach.NewTextDataReader("./wine.data", 5)
+	teacher := teach.NewTextDataReader("./wine.data", 1)
 	nn.Teach(teacher)
 
 	// for i := 0; i < nn.Count; i++ {

+ 67 - 0
neuralnetwork/neuralnetworkbase/batchworker.go

@@ -0,0 +1,67 @@
+/*
+ * MIT License
+ *
+ * Copyright (c) 2019 Alexey Edelev <semlanik@gmail.com>
+ *
+ * This file is part of NeuralNetwork project https://git.semlanik.org/semlanik/NeuralNetwork
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this
+ * software and associated documentation files (the "Software"), to deal in the Software
+ * without restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies
+ * or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+ * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
+ * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+package neuralnetworkbase
+
+import (
+	teach "../teach"
+	mat "gonum.org/v1/gonum/mat"
+)
+
+type batchWorker struct {
+	network   *NeuralNetwork
+	BGradient []BatchGradientDescent
+	WGradient []BatchGradientDescent
+	batchSize int
+}
+
+func newBatchWorker(nn *NeuralNetwork) (bw *batchWorker) {
+	bw = &batchWorker{
+		network:   nn,
+		BGradient: make([]BatchGradientDescent, nn.LayerCount),
+		WGradient: make([]BatchGradientDescent, nn.LayerCount),
+	}
+
+	for l := 1; l < nn.LayerCount; l++ {
+		bw.BGradient[l] = nn.gradientDescentInitializer(nn, l, BiasGradient).(BatchGradientDescent)
+		bw.WGradient[l] = nn.gradientDescentInitializer(nn, l, WeightGradient).(BatchGradientDescent)
+	}
+	return
+}
+
+func (bw *batchWorker) Run(teacher teach.Teacher) {
+	for teacher.NextData() {
+		dB, dW := bw.network.backward(teacher.GetData())
+		for l := 1; l < bw.network.LayerCount; l++ {
+			bw.BGradient[l].AccumGradients(dB[l])
+			bw.WGradient[l].AccumGradients(dW[l])
+		}
+	}
+	teacher.Reset()
+}
+
+func (bw *batchWorker) Result(layer int) (dB, dW *mat.Dense) {
+	return bw.BGradient[layer].Gradients(), bw.WGradient[layer].Gradients()
+}

+ 1 - 0
neuralnetwork/neuralnetworkbase/interface.go

@@ -43,4 +43,5 @@ type OnlineGradientDescent interface {
 type BatchGradientDescent interface {
 	ApplyDelta(m mat.Matrix) *mat.Dense
 	AccumGradients(gradient mat.Matrix)
+	Gradients() *mat.Dense
 }

+ 51 - 58
neuralnetwork/neuralnetworkbase/neuralnetwork.go

@@ -30,6 +30,7 @@ import (
 	"errors"
 	"fmt"
 	"io"
+	"sync"
 
 	teach "../teach"
 	mat "gonum.org/v1/gonum/mat"
@@ -96,15 +97,14 @@ import (
 //       L = len(Sizes) - Number of neural network layers
 
 type NeuralNetwork struct {
-	LayerCount int
-	Sizes      []int
-	Biases     []*mat.Dense
-	Weights    []*mat.Dense
-	A          []*mat.Dense
-	Z          []*mat.Dense
-	BGradient  []interface{}
-	WGradient  []interface{}
-	epocs      int
+	LayerCount                 int
+	Sizes                      []int
+	Biases                     []*mat.Dense
+	Weights                    []*mat.Dense
+	BGradient                  []interface{}
+	WGradient                  []interface{}
+	epocs                      int
+	gradientDescentInitializer GradientDescentInitializer
 }
 
 func NewNeuralNetwork(sizes []int, epocs int, gradientDescentInitializer GradientDescentInitializer) (nn *NeuralNetwork, err error) {
@@ -138,32 +138,14 @@ func NewNeuralNetwork(sizes []int, epocs int, gradientDescentInitializer Gradien
 	nn.BGradient = make([]interface{}, nn.LayerCount)
 	nn.WGradient = make([]interface{}, nn.LayerCount)
 
-	nn.A = make([]*mat.Dense, nn.LayerCount)
-	nn.Z = make([]*mat.Dense, nn.LayerCount)
 	nn.epocs = epocs
+	nn.gradientDescentInitializer = gradientDescentInitializer
 
 	for l := 1; l < nn.LayerCount; l++ {
 		nn.Biases[l] = generateRandomDense(nn.Sizes[l], 1)
 		nn.Weights[l] = generateRandomDense(nn.Sizes[l], nn.Sizes[l-1])
-		nn.BGradient[l] = gradientDescentInitializer(nn, l, BiasGradient)
-		nn.WGradient[l] = gradientDescentInitializer(nn, l, WeightGradient)
-	}
-	return
-}
-
-func (nn *NeuralNetwork) Copy() (out *NeuralNetwork) {
-	out = &NeuralNetwork{}
-	out.Sizes = nn.Sizes
-	out.LayerCount = nn.LayerCount
-	out.Weights = make([]*mat.Dense, nn.LayerCount)
-	out.Biases = make([]*mat.Dense, nn.LayerCount)
-	out.A = make([]*mat.Dense, nn.LayerCount)
-	out.Z = make([]*mat.Dense, nn.LayerCount)
-	out.epocs = nn.epocs
-
-	for l := 1; l < out.LayerCount; l++ {
-		out.Weights[l] = mat.DenseCopyOf(nn.Weights[l])
-		out.Biases[l] = mat.DenseCopyOf(nn.Biases[l])
+		nn.BGradient[l] = nn.gradientDescentInitializer(nn, l, BiasGradient)
+		nn.WGradient[l] = nn.gradientDescentInitializer(nn, l, WeightGradient)
 	}
 	return
 }
@@ -175,8 +157,8 @@ func (nn *NeuralNetwork) Predict(aIn mat.Matrix) (maxIndex int, max float64) {
 		return -1, 0.0
 	}
 
-	nn.forward(aIn)
-	result := nn.result()
+	A, _ := nn.forward(aIn)
+	result := A[nn.LayerCount-1]
 	r, _ = result.Dims()
 	max = 0.0
 	maxIndex = 0
@@ -220,11 +202,25 @@ func (nn *NeuralNetwork) TeachOnline(teacher teach.Teacher) {
 	}
 }
 
-func (nn *NeuralNetwork) TeachBatch(teacher teach.Teacher) {
+func (nn *NeuralNetwork) TeachBatch(_ teach.Teacher) {
+	wg := sync.WaitGroup{}
 	for t := 0; t < nn.epocs; t++ {
-		for teacher.NextData() {
-			dB, dW := nn.backward(teacher.GetData())
+		batchWorkers := []*batchWorker{newBatchWorker(nn), newBatchWorker(nn), newBatchWorker(nn)} //, newBatchWorker(nn), newBatchWorker(nn), newBatchWorker(nn)}
+		for i, _ := range batchWorkers {
+			wg.Add(1)
+			go func() {
+				teacher := teach.NewTextDataReader("./wine.data", 5)
+				batchWorkers[i].Run(teacher)
+				wg.Done()
+			}()
+		}
+		wg.Wait()
+
+		// teacher.Reset()
+
+		for _, bw := range batchWorkers {
 			for l := 1; l < nn.LayerCount; l++ {
+				dB, dW := bw.Result(l)
 				bGradient, ok := nn.BGradient[l].(BatchGradientDescent)
 				if !ok {
 					panic("bGradient is not a BatchGradientDescent")
@@ -233,11 +229,11 @@ func (nn *NeuralNetwork) TeachBatch(teacher teach.Teacher) {
 				if !ok {
 					panic("wGradient is not a BatchGradientDescent")
 				}
-				bGradient.AccumGradients(dB[l])
-				wGradient.AccumGradients(dW[l])
+				bGradient.AccumGradients(dB)
+				wGradient.AccumGradients(dW)
 			}
 		}
-		teacher.Reset()
+
 		for l := 1; l < nn.LayerCount; l++ {
 			bGradient := nn.BGradient[l].(BatchGradientDescent)
 			wGradient := nn.WGradient[l].(BatchGradientDescent)
@@ -309,19 +305,19 @@ func (nn *NeuralNetwork) LoadState(reader io.Reader) {
 		nn.Weights[i] = readDense(reader, nn.Weights[i])
 	}
 
-	nn.A = make([]*mat.Dense, nn.LayerCount)
-	nn.Z = make([]*mat.Dense, nn.LayerCount)
-
 	// fmt.Printf("\nLoadState end\n")
 }
 
-func (nn *NeuralNetwork) forward(aIn mat.Matrix) {
-	nn.A[0] = mat.DenseCopyOf(aIn)
+func (nn NeuralNetwork) forward(aIn mat.Matrix) (A, Z []*mat.Dense) {
+	A = make([]*mat.Dense, nn.LayerCount)
+	Z = make([]*mat.Dense, nn.LayerCount)
+
+	A[0] = mat.DenseCopyOf(aIn)
 
 	for l := 1; l < nn.LayerCount; l++ {
-		nn.A[l] = mat.NewDense(nn.Sizes[l], 1, nil)
-		aSrc := nn.A[l-1]
-		aDst := nn.A[l]
+		A[l] = mat.NewDense(nn.Sizes[l], 1, nil)
+		aSrc := A[l-1]
+		aDst := A[l]
 
 		// Each iteration implements formula bellow for neuron activation values
 		// A[l]=σ(W[l]*A[l−1]+B[l])
@@ -333,17 +329,18 @@ func (nn *NeuralNetwork) forward(aIn mat.Matrix) {
 		aDst.Add(aDst, nn.Biases[l])
 
 		// Save raw activation value for back propagation
-		nn.Z[l] = mat.DenseCopyOf(aDst)
+		Z[l] = mat.DenseCopyOf(aDst)
 
 		// σ(W[l]*A[l−1]+B[l])
 		aDst.Apply(applySigmoid, aDst)
 	}
+	return
 }
 
 // Function returns calculated bias and weights derivatives for each
 // layer arround aIn/aOut datasets
-func (nn *NeuralNetwork) backward(aIn, aOut mat.Matrix) (dB, dW []*mat.Dense) {
-	nn.forward(aIn)
+func (nn NeuralNetwork) backward(aIn, aOut mat.Matrix) (dB, dW []*mat.Dense) {
+	A, Z := nn.forward(aIn)
 
 	lastLayerNum := nn.LayerCount - 1
 	dB = make([]*mat.Dense, nn.LayerCount)
@@ -361,11 +358,11 @@ func (nn *NeuralNetwork) backward(aIn, aOut mat.Matrix) (dB, dW []*mat.Dense) {
 	// error = A[L]-y
 	// Where y is expected activations set
 	err := &mat.Dense{}
-	err.Sub(nn.result(), aOut)
+	err.Sub(A[nn.LayerCount-1], aOut)
 
 	// Calculate sigmoids prime σ'(Z[L]) for last layer L
 	sigmoidsPrime := &mat.Dense{}
-	sigmoidsPrime.Apply(applySigmoidPrime, nn.Z[lastLayerNum])
+	sigmoidsPrime.Apply(applySigmoidPrime, Z[lastLayerNum])
 
 	// (A[L]−y)⊙σ'(Z[L])
 	delta := &mat.Dense{}
@@ -376,7 +373,7 @@ func (nn *NeuralNetwork) backward(aIn, aOut mat.Matrix) (dB, dW []*mat.Dense) {
 
 	// ∂E/∂W[L] = A[L−1]*δ[L]
 	weights := &mat.Dense{}
-	weights.Mul(delta, nn.A[lastLayerNum-1].T())
+	weights.Mul(delta, A[lastLayerNum-1].T())
 
 	// Initialize new weights and biases values with last layer values
 	dB[lastLayerNum] = biases
@@ -393,7 +390,7 @@ func (nn *NeuralNetwork) backward(aIn, aOut mat.Matrix) (dB, dW []*mat.Dense) {
 	for l := nn.LayerCount - 2; l > 0; l-- {
 		// Calculate sigmoids prime σ'(Z[l]) for last layer l
 		sigmoidsPrime := &mat.Dense{}
-		sigmoidsPrime.Apply(applySigmoidPrime, nn.Z[l])
+		sigmoidsPrime.Apply(applySigmoidPrime, Z[l])
 
 		// (Wt[l+1])*δ[l+1]
 		// err bellow is delta from previous step(l+1)
@@ -439,14 +436,10 @@ func (nn *NeuralNetwork) backward(aIn, aOut mat.Matrix) (dB, dW []*mat.Dense) {
 		//         ⎢ ... ⎥
 		//         ⎣δ[s] ⎦
 		weights := &mat.Dense{}
-		weights.Mul(delta, nn.A[l-1].T())
+		weights.Mul(delta, A[l-1].T())
 
 		dB[l] = biases
 		dW[l] = weights
 	}
 	return
 }
-
-func (nn *NeuralNetwork) result() *mat.Dense {
-	return nn.A[nn.LayerCount-1]
-}

+ 23 - 18
neuralnetwork/neuralnetworkbase/plusrpropgradient.go

@@ -34,9 +34,9 @@ import (
 // Plus Resilient backpropagation
 
 type plusRPropGradient struct {
-	GradientsPrev *mat.Dense
-	Gradients     *mat.Dense
-	Deltas        *mat.Dense
+	gradientsPrev *mat.Dense
+	gradients     *mat.Dense
+	deltas        *mat.Dense
 	batchSize     int
 	config        RPropConfig
 }
@@ -59,9 +59,9 @@ func newPlusRPropGradient(r, c int, config RPropConfig) (g *plusRPropGradient) {
 		deltas[j] = 0.1
 	}
 
-	g.Gradients = mat.NewDense(r, c, nil)
-	g.GradientsPrev = mat.NewDense(r, c, nil)
-	g.Deltas = mat.NewDense(r, c, deltas)
+	g.gradients = mat.NewDense(r, c, nil)
+	g.gradientsPrev = mat.NewDense(r, c, nil)
+	g.deltas = mat.NewDense(r, c, deltas)
 	g.config = config
 	return
 }
@@ -75,7 +75,7 @@ func (g *plusRPropGradient) ApplyDelta(m mat.Matrix) (result *mat.Dense) {
 
 	result = &mat.Dense{}
 
-	gradient := g.Gradients
+	gradient := g.gradients
 	r, c := gradient.Dims()
 	dividers := make([]float64, r*c)
 	for i := range dividers {
@@ -85,33 +85,38 @@ func (g *plusRPropGradient) ApplyDelta(m mat.Matrix) (result *mat.Dense) {
 	gradient.DivElem(gradient, gradientDivider)
 
 	result.Apply(func(i, j int, v float64) (outV float64) {
-		gradientSign := g.GradientsPrev.At(i, j) * gradient.At(i, j)
+		gradientSign := g.gradientsPrev.At(i, j) * gradient.At(i, j)
 		if gradientSign > 0 {
-			g.Deltas.Set(i, j, math.Min(nuPlus*g.Deltas.At(i, j), deltaMax))
-			outV = v - sign(gradient.At(i, j))*g.Deltas.At(i, j)
+			g.deltas.Set(i, j, math.Min(nuPlus*g.deltas.At(i, j), deltaMax))
+			outV = v - sign(gradient.At(i, j))*g.deltas.At(i, j)
 
-			g.GradientsPrev.Set(i, j, gradient.At(i, j))
+			g.gradientsPrev.Set(i, j, gradient.At(i, j))
 		} else if gradientSign < 0 {
-			outV = v + sign(g.GradientsPrev.At(i, j))*g.Deltas.At(i, j)
-			g.Deltas.Set(i, j, math.Max(nuMinus*g.Deltas.At(i, j), deltaMin))
+			outV = v + sign(g.gradientsPrev.At(i, j))*g.deltas.At(i, j)
+			g.deltas.Set(i, j, math.Max(nuMinus*g.deltas.At(i, j), deltaMin))
 
-			g.GradientsPrev.Set(i, j, 0.0)
+			g.gradientsPrev.Set(i, j, 0.0)
 		} else {
-			outV = v - sign(gradient.At(i, j))*g.Deltas.At(i, j)
+			outV = v - sign(gradient.At(i, j))*g.deltas.At(i, j)
 
-			g.GradientsPrev.Set(i, j, gradient.At(i, j))
+			g.gradientsPrev.Set(i, j, gradient.At(i, j))
 		}
 		return
 	}, m)
 
 	g.batchSize = 0
+	g.gradients = mat.NewDense(r, c, nil)
 	return result
 }
 
 func (g *plusRPropGradient) AccumGradients(gradient mat.Matrix) {
-	g.Gradients.Apply(func(i, j int, v float64) float64 {
+	g.gradients.Apply(func(i, j int, v float64) float64 {
 		v += gradient.At(i, j)
 		return v
-	}, g.Gradients)
+	}, g.gradients)
 	g.batchSize++
 }
+
+func (g plusRPropGradient) Gradients() *mat.Dense {
+	return g.gradients
+}

+ 22 - 17
neuralnetwork/neuralnetworkbase/rpropgradient.go

@@ -34,9 +34,9 @@ import (
 // Resilient backpropagation
 
 type rPropGradient struct {
-	GradientsPrev *mat.Dense
-	Gradients     *mat.Dense
-	Deltas        *mat.Dense
+	gradientsPrev *mat.Dense
+	gradients     *mat.Dense
+	deltas        *mat.Dense
 	batchSize     int
 	config        RPropConfig
 }
@@ -66,9 +66,9 @@ func newRPropGradient(r, c int, config RPropConfig) (g *rPropGradient) {
 		deltas[j] = 0.1
 	}
 
-	g.Gradients = mat.NewDense(r, c, nil)
-	g.GradientsPrev = mat.NewDense(r, c, nil)
-	g.Deltas = mat.NewDense(r, c, deltas)
+	g.gradients = mat.NewDense(r, c, nil)
+	g.gradientsPrev = mat.NewDense(r, c, nil)
+	g.deltas = mat.NewDense(r, c, deltas)
 	g.config = config
 	return
 }
@@ -82,7 +82,7 @@ func (g *rPropGradient) ApplyDelta(m mat.Matrix) (result *mat.Dense) {
 
 	result = &mat.Dense{}
 
-	gradient := g.Gradients
+	gradient := g.gradients
 	r, c := gradient.Dims()
 	dividers := make([]float64, r*c)
 	for i := range dividers {
@@ -92,33 +92,38 @@ func (g *rPropGradient) ApplyDelta(m mat.Matrix) (result *mat.Dense) {
 	gradient.DivElem(gradient, gradientDivider)
 
 	result.Apply(func(i, j int, v float64) (outV float64) {
-		gradientSign := g.GradientsPrev.At(i, j) * gradient.At(i, j)
+		gradientSign := g.gradientsPrev.At(i, j) * gradient.At(i, j)
 		if gradientSign > 0 {
-			g.Deltas.Set(i, j, math.Min(nuPlus*g.Deltas.At(i, j), deltaMax))
-			outV = v - sign(gradient.At(i, j))*g.Deltas.At(i, j)
+			g.deltas.Set(i, j, math.Min(nuPlus*g.deltas.At(i, j), deltaMax))
+			outV = v - sign(gradient.At(i, j))*g.deltas.At(i, j)
 
-			g.GradientsPrev.Set(i, j, gradient.At(i, j))
+			g.gradientsPrev.Set(i, j, gradient.At(i, j))
 		} else if gradientSign < 0 {
 			outV = v
-			g.Deltas.Set(i, j, math.Max(nuMinus*g.Deltas.At(i, j), deltaMin))
+			g.deltas.Set(i, j, math.Max(nuMinus*g.deltas.At(i, j), deltaMin))
 
-			g.GradientsPrev.Set(i, j, 0.0)
+			g.gradientsPrev.Set(i, j, 0.0)
 		} else {
-			outV = v - sign(gradient.At(i, j))*g.Deltas.At(i, j)
+			outV = v - sign(gradient.At(i, j))*g.deltas.At(i, j)
 
-			g.GradientsPrev.Set(i, j, gradient.At(i, j))
+			g.gradientsPrev.Set(i, j, gradient.At(i, j))
 		}
 		return
 	}, m)
 
 	g.batchSize = 0
+	g.gradients = mat.NewDense(r, c, nil)
 	return result
 }
 
 func (g *rPropGradient) AccumGradients(gradient mat.Matrix) {
-	g.Gradients.Apply(func(i, j int, v float64) float64 {
+	g.gradients.Apply(func(i, j int, v float64) float64 {
 		v += gradient.At(i, j)
 		return v
-	}, g.Gradients)
+	}, g.gradients)
 	g.batchSize++
 }
+
+func (g rPropGradient) Gradients() *mat.Dense {
+	return g.gradients
+}

+ 17 - 7
neuralnetwork/teach/mnistreader.go

@@ -42,10 +42,19 @@ type MNISTReader struct {
 	imageSize       int
 	buffered        *mat.Dense
 	resultsBuffered *mat.Dense
+	window          MNISTBatchWindow
+	currentIndex    int64
 }
 
-func NewMNISTReader(dataFilename string, resultsFilename string) (r *MNISTReader) {
-	r = &MNISTReader{}
+type MNISTBatchWindow struct {
+	from int64
+	to   int64
+}
+
+func NewMNISTReader(dataFilename string, resultsFilename string, window MNISTBatchWindow) (r *MNISTReader) {
+	r = &MNISTReader{
+		window: window,
+	}
 
 	var err error
 	r.file, err = os.Open(dataFilename)
@@ -78,6 +87,7 @@ func NewMNISTReader(dataFilename string, resultsFilename string) (r *MNISTReader
 		return nil
 	}
 
+	r.Reset()
 	return
 }
 
@@ -93,9 +103,8 @@ func (r *MNISTReader) Next() bool {
 	buffer := make([]byte, r.imageSize)
 	_, err := r.file.Read(buffer)
 
-	if err == io.EOF {
-		r.file.Seek(16, 0)
-		r.resultsFile.Seek(8, 0)
+	if err == io.EOF || r.currentIndex >= r.window.to {
+		r.Reset()
 		return false
 	} else if err != nil {
 		log.Fatal("File read error\n")
@@ -132,11 +141,12 @@ func (r *MNISTReader) Next() bool {
 	r.resultsBuffered.Set(num, 0, 1.0)
 
 	// fmt.Printf("r.resultsBuffered:\n%v\n\n", mat.Formatted(r.resultsBuffered, mat.Prefix(""), mat.Excerpt(0)))
+	r.currentIndex++
 
 	return true
 }
 
 func (r *MNISTReader) Reset() {
-	r.file.Seek(16, 0)
-	r.resultsFile.Seek(8, 0)
+	r.file.Seek(16+r.window.from*int64(r.imageSize), 0)
+	r.resultsFile.Seek(8+r.window.from*int64(r.imageSize), 0)
 }

+ 7 - 0
neuralnetwork/teach/textdatareader.go

@@ -33,6 +33,7 @@ import (
 	"os"
 	"strconv"
 	"strings"
+	"sync"
 	"time"
 
 	mat "gonum.org/v1/gonum/mat"
@@ -44,12 +45,14 @@ type TextDataReader struct {
 	index           int
 	validationIndex int
 	validationCount int
+	mutex           *sync.Mutex
 }
 
 func NewTextDataReader(filename string, validationPart int) *TextDataReader {
 	r := &TextDataReader{
 		index:           0,
 		validationIndex: 0,
+		mutex:           &sync.Mutex{},
 	}
 	r.readData(filename)
 	r.validationCount = len(r.dataSet) / validationPart
@@ -144,10 +147,14 @@ func (r *TextDataReader) readData(filename string) {
 }
 
 func (r *TextDataReader) GetData() (*mat.Dense, *mat.Dense) {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
 	return r.dataSet[r.index], r.result[r.index]
 }
 
 func (r *TextDataReader) NextData() bool {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
 	if (r.index + 1) >= len(r.result)-r.validationCount {
 		r.index = 0
 		return false