소스 검색

Add Reset method to NeuralNetwork to reset it to random state with
specified layers configuration
Use sync locker for most methods of nueral network
Minor code cleanup

Alexey Edelev 5 년 전
부모
커밋
a6374f1562
1개의 변경된 파일46개의 추가작업 그리고 4개의 파일을 삭제
  1. 46 4
      neuralnetwork/neuralnetwork/neuralnetwork.go

+ 46 - 4
neuralnetwork/neuralnetwork/neuralnetwork.go

@@ -149,6 +149,8 @@ func NewNeuralNetwork(sizes []int, gradientDescentInitializer GradientDescentIni
 }
 
 func (nn *NeuralNetwork) Copy() (outNN *NeuralNetwork) {
+	nn.syncMutex.Lock()
+	defer nn.syncMutex.Unlock()
 	outNN = &NeuralNetwork{
 		Sizes:                      nn.Sizes,
 		LayerCount:                 len(nn.Sizes),
@@ -171,6 +173,42 @@ func (nn *NeuralNetwork) Copy() (outNN *NeuralNetwork) {
 	return
 }
 
+func (nn *NeuralNetwork) Reset(sizes []int) (err error) {
+	nn.syncMutex.Lock()
+	defer nn.syncMutex.Unlock()
+	err = nil
+	if len(sizes) < 3 {
+		fmt.Printf("Invalid network configuration: %v\n", sizes)
+		return errors.New("Invalid network configuration: %v\n")
+	}
+
+	for i := 0; i < len(sizes); i++ {
+		if sizes[i] < 2 {
+			fmt.Printf("Invalid network configuration: %v\n", sizes)
+			return errors.New("Invalid network configuration: %v\n")
+		}
+	}
+
+	lenSizes := len(sizes)
+	nn.Sizes = sizes
+	nn.LayerCount = len(sizes)
+	nn.Biases = make([]*mat.Dense, lenSizes)
+	nn.Weights = make([]*mat.Dense, lenSizes)
+	nn.BGradient = make([]interface{}, lenSizes)
+	nn.WGradient = make([]interface{}, lenSizes)
+
+	for l := 1; l < nn.LayerCount; l++ {
+		nn.Biases[l] = generateRandomDense(nn.Sizes[l], 1)
+		nn.Weights[l] = generateRandomDense(nn.Sizes[l], nn.Sizes[l-1])
+		if nn.gradientDescentInitializer != nil {
+			nn.BGradient[l] = nn.gradientDescentInitializer(nn, l, BiasGradient)
+			nn.WGradient[l] = nn.gradientDescentInitializer(nn, l, WeightGradient)
+		}
+	}
+
+	return
+}
+
 func (nn *NeuralNetwork) SetStateWatcher(watcher StateWatcher) {
 	nn.watcher = watcher
 	if watcher != nil {
@@ -207,6 +245,8 @@ func (nn *NeuralNetwork) Predict(aIn mat.Matrix) (maxIndex int, max float64) {
 }
 
 func (nn *NeuralNetwork) Validate(trainer training.Trainer) (failCount, total int) {
+	nn.syncMutex.Lock()
+	defer nn.syncMutex.Unlock()
 	failCount = 0
 	total = 0
 	trainer.Reset()
@@ -313,13 +353,15 @@ func (nn *NeuralNetwork) runBatchWorkers(threadCount int, trainer training.Train
 }
 
 func (nn *NeuralNetwork) SaveState(writer io.Writer) {
+	nn.syncMutex.Lock()
+	defer nn.syncMutex.Unlock()
 	//save input array count
 	bufferSize := make([]byte, 4)
 	binary.LittleEndian.PutUint32(bufferSize[0:], uint32(nn.LayerCount))
 	_, err := writer.Write(bufferSize)
 
 	check(err)
-	fmt.Printf("wrote value %d\n", uint32(nn.LayerCount))
+	//fmt.Printf("wrote value %d\n", uint32(nn.LayerCount))
 
 	// save an input array
 	buffer := make([]byte, nn.LayerCount*4)
@@ -332,13 +374,11 @@ func (nn *NeuralNetwork) SaveState(writer io.Writer) {
 	// fmt.Printf("wrote buffer %d bytes\n", n2)
 
 	//save biases
-	////////////////////////
 	for i := 1; i < nn.LayerCount; i++ {
 		saveDense(writer, nn.Biases[i])
 	}
 
 	//save weights
-	////////////////////////
 	for i := 1; i < nn.LayerCount; i++ {
 		saveDense(writer, nn.Weights[i])
 	}
@@ -352,6 +392,8 @@ func (nn *NeuralNetwork) SaveStateToFile(filePath string) {
 }
 
 func (nn *NeuralNetwork) LoadState(reader io.Reader) {
+	nn.syncMutex.Lock()
+	defer nn.syncMutex.Unlock()
 	// Reade count
 	nn.LayerCount = readInt(reader)
 
@@ -374,9 +416,9 @@ func (nn *NeuralNetwork) LoadState(reader io.Reader) {
 		nn.Biases[l] = readDense(reader, nn.Biases[l])
 	}
 
+	// read Weights and initialize gradient descents
 	nn.BGradient = make([]interface{}, nn.LayerCount)
 	nn.WGradient = make([]interface{}, nn.LayerCount)
-	// read Weights and initialize gradient descents
 	nn.Weights[0] = &mat.Dense{}
 	for l := 1; l < nn.LayerCount; l++ {
 		nn.Weights = append(nn.Weights, &mat.Dense{})