Exemplo n.º 1
0
bool TextFileReaderInternal::readArray(NumericalArray<double> *array) throw()
{
	ugen_assert(array != 0);
	
	if(isEof()) return false;
	
	Text text = readLine();
	
	if(text != "NumericalArray<double>:v1") return false; // should reset the file position before read
	
	text = readLine(); // should be size info;
	int size;
	sscanf(text.getArray(), "size:%d", &size);
	
	NumericalArray<double> newArray = NumericalArray<double>::withSize(size);
	double *newArrayPtr = newArray.getArray();
	
	for(int i = 0; i < size; i++)
	{
		text = readLine();
		int index;
		double value;
		sscanf(text.getArray(), "[%d]=%lg", &index, &value);
		
		if(index != i) return false;
		
		newArrayPtr[i] = value;
	}
	
	*array = newArray;
	return true;	
}
Exemplo n.º 2
0
void NeuralNetworkSimpleInternal::write(TextFileWriter const& _file) const throw()
{
	const int intFieldWidth = 4;
	const int size = 128;
	char buf[size];
	
	TextFileWriter file = _file;
	
	file.write("NeuralNetworkSimple:v1\n");
	
	snprintf(buf, size, "learnRate: %f actFuncOffset: %f\n", getLearnRate(), getActFuncOffset());
	file.write(buf);
	
	snprintf(buf, size, "Layers:%*d\n", intFieldWidth, getNumLayersIncludingInput());
	file.write(buf);
	
	snprintf(buf, size, "Layer %*d:%*d\n", intFieldWidth, 0, 
										   intFieldWidth, numInputs);
	file.write(buf);
	
	for(int layer = 0; layer < getNumLayersExcludingInput(); layer++)
	{
		snprintf(buf, size, "Layer %*d:%*d\n", intFieldWidth, layer+1, 
											   intFieldWidth, getNumNodesOnLayer(layer));
		file.write(buf);
	}
		
	for(int layer = 0; layer < getNumLayersExcludingInput(); layer++)
	{
		const int numNodes = getNumNodesOnLayer(layer);
		for(int node = 0; node < numNodes; node++)
		{
			NumericalArray<float> weights;
			float threshold;
			get(layer, node, &weights, threshold);
				
			snprintf(buf, size, "%*d %*d %*d   %.16f\n", intFieldWidth, layer, 
														 intFieldWidth, node, 
														 intFieldWidth, -1, 
														 threshold);
			file.write(buf);
			
			const int numWeights = weights.size();
			for(int weight = 0; weight < numWeights; weight++)
			{
				snprintf(buf, size, "%*d %*d %*d   %.16f\n", intFieldWidth, layer, 
															 intFieldWidth, node, 
															 intFieldWidth, weight, 
															 weights[weight]);
				file.write(buf);
			}
		}
	}
	
}
Exemplo n.º 3
0
BEGIN_UGEN_NAMESPACE

#include "ugen_NeuralPattern.h"


NeuralPatternSimpleInternal::NeuralPatternSimpleInternal(NumericalArray<float> const& _inputVector,
														 NumericalArray<float> const& _outputVector) throw()
:	inputVector(_inputVector.copy()), 
	outputVector(_outputVector.copy())
{
}
Exemplo n.º 4
0
void NeuralNodeSimpleInternal::set(NumericalArray<float> const& newWeightVector, const float newThreshold) throw()
{	
	if(weightVector.size() == newWeightVector.size())
	{
		weightVector = newWeightVector;
		threshold = newThreshold;
	}
}
Exemplo n.º 5
0
void NeuralNodeSimpleInternal::backProp(NumericalArray<float> const& inputVector, 
										const float error, const float actFuncOffset, const float learnRate, 
										NumericalArray<float>& adjustVector) throw()
{
	float output = this->output;
	float adjust = error * (actFuncOffset + (output * (one-output)));
	float learn = adjust * learnRate;
	
	float* weightVectorPtr = weightVector.getArray();
	float* adjustVectorPtr = adjustVector.getArray();
	const float* inputVectorPtr = inputVector.getArray();
	
	const int size = weightVector.size();
	for(int i = 0; i < size; i++)
	{
		weightVectorPtr[i] += inputVectorPtr[i] * learn;
		adjustVectorPtr[i] += weightVectorPtr[i] * adjust;
	}
	
	threshold += learn;
}
Exemplo n.º 6
0
void NeuralNetworkSimpleInternal::backProp(NumericalArray<float> const& inputVector, NumericalArray<float> const& targetVector) throw()
{
	NumericalArray<float> outputVector = propogate(inputVector);	

	float* errorVectorPtr = errorVector.getArray();
	const float* targetVectorPtr = targetVector.getArray();
	const float* outputVectorPtr = outputVector.getArray();
	
	const int size = errorVector.size();
	for(int i = 0; i < size; i++)
	{
		errorVectorPtr[i] = targetVectorPtr[i] - outputVectorPtr[i];
	}
	
	NumericalArray<float> vector = errorVector;
	
	NeuralLayer *layersPtr = layers.getArray();
	for(int i = getNumLayersExcludingInput()-1; i >= 0; i--)
	{
		vector = layersPtr[i].backProp(vector, actFuncOffset, learnRate);
	}
}
Exemplo n.º 7
0
float NeuralNodeSimpleInternal::propogate(NumericalArray<float> const& inputVector) throw()
{
	float input = zero;
	
	const float* inputVectorPtr = inputVector.getArray();
	const float* weightVectorPtr = weightVector.getArray();
	
	const int size = weightVector.size();
	for(int i = 0; i < size; i++)
	{
		input += inputVectorPtr[i] * weightVectorPtr[i];
	}	
	
	float act = input + threshold;
	output = one / (one + (float)ugen::pow(ne1, -act));
	
	return output;
}