Example #1
0
void TetrisCup::putFigure(const Figure &_figure)
{
    m_clearedLines = 0;
    Matrix figMatrix = _figure.getContent();
    for (int i = 0; i < figMatrix.getWidth(); i++)
        for (int j = 0; j < figMatrix.getHeight(); j++)
            if (figMatrix.getValue(i, j) > 0)
                m_content.setValue(_figure.getX() + i, _figure.getY() + j, figMatrix.getValue(i, j));

    int i = 0;
    while (i < m_content.getHeight()) {
        bool filled = true;
        for (int j = 0; j < m_content.getWidth(); j++)
            if (m_content.getValue(j, i) == 0) {
                filled = false;
                break;
            }
        if (filled) {
            m_content.delRow(i);
            m_clearedLines++;
        }
        else
            i++;
    }
}
Example #2
0
// This function simply adds the specified number of neurons to the 
// hidden layer.
void BPN::growHiddenLayer(int extraNodes)
{
	// must have a hidden layer and +.v.e number of nodes to add
	if(weights.size()<2 || extraNodes<1) return;

	// weights between input layer and hidden layer
	// increase the width by extraNodes
	int oldWidth = weights[0].getWidth();
	Matrix<float> temp = weights[0];
	weights[0].resize(weights[0].getWidth()+extraNodes, weights[0].getHeight());
	// and set random values to new weights
	for(int i=0;i<weights[0].getHeight();i++)
	{
		for(int j=0;j<weights[0].getWidth();j++)
		{
			if(j<oldWidth)
				weights[0].setValue(j, i, temp.getValue(j, i));
			else
				weights[0].setValue(j, i, getRandom(WEIGHTMIN, WEIGHTMAX));
		}
	}

	// do bias weights too
	oldWidth = biasWeights[0].getWidth();
	temp = biasWeights[0];
	biasWeights[0].resize(biasWeights[0].getWidth()+extraNodes, biasWeights[0].getHeight());
	// and set random values to new weights
	for(i=0;i<biasWeights[0].getHeight();i++)
	{
		for(int j=0;j<biasWeights[0].getWidth();j++)
		{
			if(j<oldWidth)
				biasWeights[0].setValue(j, i, temp.getValue(j, i));
			else
				biasWeights[0].setValue(j, i, getRandom(WEIGHTMIN, WEIGHTMAX));
		}
	}

	// weights between 1st hidden layer and next layer
	// increase the height by extraNodes
	int oldHeight = weights[1].getHeight();
	temp = weights[1];
	weights[1].resize(weights[1].getWidth(), weights[1].getHeight()+extraNodes);
	// and set random values to new weights
	for(i=0;i<weights[1].getHeight();i++)
	{
		for(int j=0;j<weights[1].getWidth();j++)
		{
			if(i<oldHeight)
				weights[1].setValue(j, i, temp.getValue(j, i));
			else
				weights[1].setValue(j, i, getRandom(WEIGHTMIN, WEIGHTMAX));
		}
	}

	// 1st hidden layer doesn't connect to second bias neuron
	// so skip that one now
}
Example #3
0
	/**
	 * @brief Sets the components of this vector based on a 3x1 matrix
	 * @param mat A 3x1 matrix of vector components in order x,y,z
	 * @todo Flip matrix to 1x3 (XxY)
	 */
	void Vector3D::setMatrix(Matrix mat)
	{
		if (mat.getXSize() != 3 || mat.getYSize() != 1)
			return;

		xComp = mat.getValue(0, 0);
		yComp = mat.getValue(1, 0);
		zComp = mat.getValue(2, 0);
	}
Example #4
0
 bool inverse( Matrix &_matrix ) {
   typedef typename scalar_type< typename scalar_type< Matrix >::type >::type MatrixElement;
   int max_row_index = 0;
   int max_col_index = 0;
   int element_count;
   {
     typename indexed_iterator< Matrix >::type row_iter( _matrix.begin() );
     std::advance( row_iter, _matrix.size() - 1 );
     element_count = row_iter.getIndex() + 1;
   }
   {
     typename indexed_iterator< Matrix >::type row_iter( _matrix.begin() );
     typename indexed_iterator< Matrix >::type row_end( _matrix.end() );
     for( ; row_iter != row_end; ++row_iter ) {
       typename indexed_iterator< typename scalar_type< Matrix >::type >::type col_iter( row_iter->begin() );
       std::advance( col_iter, row_iter->size() - 1 );
       element_count = std::max( element_count, col_iter.getIndex() + 1 );
     }
   }
   Vector< std::vector< int > > log( element_count );
   {
     indexed_iterator< std::vector< int > >::type log_iter( log.begin() );
     indexed_iterator< std::vector< int > >::type log_end( log.end() );
     for( ; log_iter != log_end; ++log_iter )
       *log_iter = log_iter.getIndex();
   }
   Matrix lu_matrix = _matrix;
   if( !lu( lu_matrix, log ) )
     return false;
   
   for( int index = 0; index != element_count; ++index ) {
     {
       for( int row = 0; row != element_count; ++row ) {
         int pivot = log.getConstValue( row );
         MatrixElement sum = ( ( pivot == index ) ? 1 : 0 );
         for( int col = 0; col != row; ++col )
           sum -= lu_matrix.getConstValue( row ).getConstValue( col ) * _matrix.getConstValue( col ).getConstValue( index );
        _matrix.getValue( row ).getValue( index ) = sum;
       }
     }
     {
       for( int row = element_count - 1; row != -1; row-- ) {
         MatrixElement sum = _matrix.getConstValue( row ).getConstValue( index );
         for( int col = row + 1; col != element_count; col++ )
           sum -= lu_matrix.getConstValue( row ).getConstValue( col ) * _matrix.getConstValue( col ).getConstValue( index );
         _matrix.getValue( row ).getValue( index ) = sum / lu_matrix.getConstValue( row ).getConstValue( row );
       }
     }
   }
   return true;
 }
void Matrix<elType>::multiply(const Matrix &m)
{
	if(m.getHeight()!=width) {
		throw std::invalid_argument("Matrix dimension must agree for multiplication");
	}

	// perform multiply and store in temporary memory
	elType *temp_pData;
	temp_pData = new elType[height*m.getWidth()];

	elType value;
	int row,col,m_row,m_col;
	for(row=0; row<height; row++) {
		for(m_col=0; m_col<m.getWidth(); m_col++) {
			value = 0;
			for(m_row=0,col=0; col<width; m_row++,col++) {
				value += m.getValue(m_row,m_col)*getValue(row,col);
			}
			temp_pData[m_col*height+row] = value;
		}
	}

	// copy over result matrix to this
	// TODO: The height and width seem inversed... (LP)
	create(height,m.getWidth());
	memcpy(pData, temp_pData, height*width*sizeof(elType));

	// de-allocate memory
	delete[] temp_pData;
	temp_pData = 0;
}
Example #6
0
bool BPN::DActivationFN(Matrix<float>& in, Matrix<float>& out, int layer) const
{
#ifdef _DEBUG
	if(in.width!=out.width || in.height!=out.height) return false;
#endif
	for(int y=0;y<in.height;y++)
	{
		for(int x=0;x<in.width;x++)
			out.setValue(x, y, DActivationFN(in.getValue(x, y), layer));
	}
	return true;
}
void Matrix<elType>::multiply(const Matrix &m1,const Matrix &m2)
{
	if(m2.getHeight()!=m1.getWidth()) {
		throw std::invalid_argument("Matrix dimension must agree for multiplication");
	}
	create(m2.getWidth(),m1.getHeight());
	elType *temp_pData = pData;

	elType value;
	int row,col,m_row,m_col;
	for(row=0; row<m1.getHeight(); row++) {
		for(m_col=0; m_col<m2.getWidth(); m_col++) {
			value = 0;
			for(m_row=0,col=0; col<m1.getWidth(); m_row++,col++) {
				value += m2.getValue(m_row,m_col)*m1.getValue(row,col);
			}
			temp_pData[m_col*height+row] = value;
		}
	}

}
Example #8
0
    Matrix Matrix::operator-(const Matrix &matr)
    {
        Matrix result;

        if (xSize != matr.xSize || ySize != matr.ySize) //Matrix sizes MUST match.
            return result;

        result.setSize(xSize, ySize);

        for (int x = 0; x < xSize; ++x)
            for (int y = 0; y < ySize; ++y)
                {
                    double sub = (mat[x])[y] - matr.getValue(x, y);
                    result.setValue(sub, x, y);
                }
        return result;
    }
Example #9
0
    Matrix Matrix::operator*(const Matrix& matr)
    {
         Matrix result;

        //Perform rows vs column check
        if (ySize != matr.getXSize())
            return result;

        result.setSize(xSize, matr.getYSize());

        for (int y = 0; y != result.getXSize(); ++y)
        {
            for (int x = 0; x != result.getYSize(); x++)
            {
                double sum = 0;
                for (int i = 0; i != getYSize(); ++ i)
                    sum += getValue(i, y) * matr.getValue(x, i);
                result.setValue(sum, x, y);
            }
        }
        return result;
    }
Example #10
0
    /**
     * @brief Standard matrix multiplication.
     * @param matr Second matrix with same Y size as this matrix's X size.
     * @return Resultant matrix with X of the second matrix and Y of the first matrix.
     */
    Matrix Matrix::operator*(const Matrix& matr)
    {
        //Note: this algorithm assumes that this matrix is the matrix on the LEFT.
        Matrix result;

        //The columns of the first MUST match the rows of the second.
        if (getXSize() != matr.getYSize())
            return result;

        result.setSize(matr.getXSize(), getYSize()); //Size is equal to columns of the second by the rows of the first

        for (int iY = 0; iY < getYSize(); iY++) //Rows of the first
        {
            for (int iX = 0; iX < matr.getXSize(); iX++)
            {
                double sum = 0;
                for (int i = 0; i < getXSize(); i++)
                    sum += getValue(i, iY) * matr.getValue(iX, i);
                result.setValue(sum, iX, iY);
            }
        }

        return result;
    }
/** Extract all training pairs from an SGF file using the given
SGFReader object and add them to the given NNDatabase.
@params sgf An SGFReader object to use to read the file it 
represents.
@params database The NNDatabase to store the training pairs in. */
void Urgency3BPNGoTrainer::extractTrainingPairs(SGFReader* sgf, NNDatabase* database,
			int movesFrom /*=0*/, int movesTo /*=0*/, int lookahead /*=0*/, bool quiet /*=false*/) const
{
	// at each step in sgf file, score current move with current urgency net
	// then score next move of same colour
	// if first move doesn't score at least 0.2 higher than second
	// add as training pairs:
	// 1. First move output should be second move score +0.2 (0.9 max)
	// 2. Second move output should be first move score -0.2 (0.1 min)

	LogWriter log;
	string message;
	//char buffer[50];

	if(movesFrom > movesTo)
	{
		log.println("Start move is greater than end move.");
		return;
	}

	vector<Move> moves;
	sgf->getTree().getAllPrimaryMoves(moves);
	if(moves.size()==0)
		return;

	// if file already in the database skip it
	if(!database->addSignature(sgf->getSignature()))
	{
		log.println("File already in database.");
		return;
	}

	int size = 19;
	string v;
	if(sgf->getBoardSize(v))
		size = atoi(v.c_str());
	if(size>19 || size<5) 
	{
		message = "Boardsize too small or large to handle: ";
		message+=v;
		log.println(message);
		return;
	}
	BoardStruct board(size);
	
	vector<Move> futureMoves;
	int moveNumber=0;
	
	string c;
	Matrix<float> currentInput(1, goAdapter->getBPN().getWeights()[0].getHeight());
	Matrix<float> nextInput(1, goAdapter->getBPN().getWeights()[0].getHeight());
	Matrix<float> output(1, 1);
	Matrix<float>* answers;
//	vector<Matrix<float> > temp(goAdapter->getBPN().getNumberOfLayers());
	vector<vector<float> > tv;
	tv.resize(1);
	tv[0].resize(1);
	// colour of new move
	int colour;
	
	//sgf->initBoard(board);
	board.clear();
	// add setup points
	setupBoardFromSGF(*sgf, board);
/*	vector<Move> props;
	if(sgf->getRootNode()->getEmptySetup(props))
	{
		for(int i=0;i<props.size();i++)
			board.setPoint(props[i].getX(), props[i].getY(), EMPTY, false);
	}
	if(sgf->getRootNode()->getBlackSetup(props))
	{
		for(int i=0;i<props.size();i++)
			board.setPoint(props[i].getX(), props[i].getY(), BLACK, false);
	}
	if(sgf->getRootNode()->getWhiteSetup(props))
	{
		for(int i=0;i<props.size();i++)
			board.setPoint(props[i].getX(), props[i].getY(), WHITE, false);
	} */

	SGFNode* nextNode = &(sgf->getRootNode());
	bool useThisMove = true;
	
	bool currentIsMoveA = true;
	bool getInputSuccess = false;

	// continue until a null node forces the loop to break
	while (true) {
		// check bounds if necessary
		if(movesTo!=0) {
			// use ply instead of individual moves
			if(moveNumber/2 >= movesTo) break;
			else if(moveNumber/2 < movesFrom) useThisMove = false;
			else useThisMove = true;
		}

		if(movesTo==0 || useThisMove) {
			nextNode = nextNode->getChild();
			if(nextNode==NULL)
				break;
			
			// determine colour of move	
			vector<string> vs;
			if (nextNode->getProperty(SGFProperty::blackMoveTag, vs)) {
				colour = BLACK;
				c = vs[0];
			}
			else if (nextNode->getProperty(SGFProperty::whiteMoveTag, vs)) {
				colour = WHITE;
				c = vs[0];
			}
			else break;
		
			// find our next move to compare with
			// check 
			futureMoves.clear();
			// NOTE: The first move returned by getLookaheadMoves() is
			// always the current one, so we need 2 lookahead moves
			nextNode->getLookaheadMoves(2, colour, futureMoves);
			if(futureMoves.size()==2 && Move::SGFToX(c)!=-1 && Move::SGFToY(c)!=-1
				&& futureMoves[1].getX()!=-1 && futureMoves[1].getY()!=-1) {
				// At each step in sgf file, score current move with current urgency net
				// then score next move of same colour
				// if first move doesn't score at least 0.2 higher than second
				// add as training pairs:
				// 1. First move output should be second move score +0.2 (0.9 max)
				// 2. Second move output should be first move score -0.2 (0.1 min)

				// get current move score
				goAdapter->getInput(Move::SGFToX(c), Move::SGFToY(c), board, currentInput, colour);
				goAdapter->getBPN().getAnswer(currentInput);
				answers = &(goAdapter->getBPN().getOutputs()[goAdapter->getBPN().getNumberOfLayers()-1]);
				float currentMoveScore = answers->getValue(0, 0);

				// get next move score
				goAdapter->getInput(futureMoves[1].getX(), futureMoves[1].getY(), board, nextInput, colour);
				goAdapter->getBPN().getAnswer(nextInput);
				answers = &(goAdapter->getBPN().getOutputs()[goAdapter->getBPN().getNumberOfLayers()-1]);
				float nextMoveScore = answers->getValue(0, 0);

				// if current scores higher than next move by 0.2
				// don't train otherwise do train
				if(currentMoveScore<=(nextMoveScore+0.2)) {
					if((nextMoveScore+0.2)>0.9)
						tv[0][0] = 0.9f;
					else
						tv[0][0] = nextMoveScore+0.2;
					output.setValues(tv);
					database->addTrainingPair(&currentInput, &output);

					if((currentMoveScore-0.2)<0.1)
						tv[0][0] = 0.1f;
					else
						tv[0][0] = currentMoveScore-0.2;
					output.setValues(tv);
					database->addTrainingPair(&nextInput, &output);
				}
			}
		} // end if(movesTo==0 || useThisMove)
		
		board.setPoint(Move::SGFToX(c), Move::SGFToY(c), colour);
		if(!quiet)
			log.print(".");
		moveNumber++;
		// save after every board position has been
		// analysed and moves extracted
		// database.save();
	} // end while(true)
	if(!quiet)
		log.print("\n");
	//database.save();
} // end extractTrainingPairs
Example #12
0
/** Load a BPN network from the specified file.
Save version differences:
Version 0: Includes saveVersion, type, learningRate, momentum, epochsCompleted and weights.
Version 1: Adds lastPatternTest, patternsCompleted.
Version 2: Adds dynamicLearningRate, dynamicMomentum.
Version 3: Adds inputFieldShape.
@param f The filename to read this BPN network from. 
@param quiet An optional parameter to select level of text output, default is false.
@see BPN::save() */
bool BPN::load(string f, bool quiet /*=false*/)
{
	LogWriter log;
	string message = "Loading ";
	message+=f;
	if(!quiet)
		log.println(message);

	ifstream in(f.c_str(), ios::binary);

	if(!in)
	{
		message = "BPN: Could not load file: ";
		message+=f;
		LogWriter::printerr(message+"\n");
		return false;
	}

	this->filename = f;

	// read save version
	in.read(reinterpret_cast<char*>(&saveVersion), sizeof(int));
	if(in.fail()) 
	{
		LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
		return false;
	}

	// read type
	in.read(reinterpret_cast<char*>(&id), sizeof(int));
	if(in.fail()) 
	{
		LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
		return false;
	}


	// load learning rate
	in.read(reinterpret_cast<char*>(&learningRate), sizeof(float));
	if(in.fail()) 
	{
		LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
		return false;
	}

	// load momentum
	in.read(reinterpret_cast<char*>(&momentum), sizeof(float));
	if(in.fail()) 
	{
		LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
		return false;
	}

	// load the number of epochs this net has completed
	in.read(reinterpret_cast<char*>(&epochsCompleted), sizeof(int));
	if(in.fail()) 
	{
		LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
		return false;
	}

	// load number of layers

	// load weights as matrices including top row of each for bias weights
	int it = 0;
	in.read(reinterpret_cast<char*>(&it), sizeof(int));
	if(in.fail()) 
	{
		LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
		return false;
	}

	activationFunction.clear();
	weights.resize(it);
	biasWeights.resize(it);
	errors.resize(it);
	outputs.resize(it+1);
//	net.resize(it);
	// load layer sizes - width, height
	Matrix<float>* temp;
	for(int l=0;l<it;l++)
	{
		temp = Matrix<float>::load(in);
		if(temp==NULL)
		{
			LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
			return false;
		}

		//weights[l] = *temp;
		biasWeights[l].resize(temp->getWidth(), 1);
		weights[l].resize(temp->getWidth(), temp->getHeight()-1);
		for(int y=0;y<temp->getHeight();y++)
		{
			for(int x=0;x<temp->getWidth();x++)
			{
				// if this is the bias weight row
				if(y==0)
					biasWeights[l].setValue(x, 0, temp->getValue(x, 0));
				else
					weights[l].setValue(x, y-1, temp->getValue(x, y));
			}
		}
		delete temp;
		temp = NULL;
		activationFunction.push_back(SIGMOID);
	}

	weightsSize = weights.size();

	// load lastPatternTest and patternsCompleted
	if(saveVersion>0)
	{
		in.read(reinterpret_cast<char*>(&lastPatternTest), sizeof(double));
		if(in.fail()) 
		{
			LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
			return false;
		}
		in.read(reinterpret_cast<char*>(&patternsCompleted), sizeof(double));
		if(in.fail()) 
		{
			LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
			return false;
		}
		dynamicLearningRate = false;
		dynamicMomentum = false;
	}

	if(saveVersion>1)
	{
		in.read(reinterpret_cast<char*>(&dynamicLearningRate), sizeof(bool));
		if(in.fail()) 
		{
			LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
			return false;
		}
		in.read(reinterpret_cast<char*>(&dynamicMomentum), sizeof(bool));
		if(in.fail()) 
		{
			LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
			return false;
		}
	}

	if(saveVersion>2)
	{
		in.read(reinterpret_cast<char*>(&inputFieldShape), sizeof(int));
		if(in.fail()) 
		{
			LogWriter::printerr("Corrupt or incorrect version of .bpn file, aborting...\n", typeToString(id));
			return false;
		}
	}

	in.close();

	resetWeightChanges();
	
	return true;
}
Example #13
0
void Gauss::resolveSytem( bool usePivot ){
    int numberOfLines;
    double multiplier;
    double pivo;
    long double executionTimeInSec = 0;

    clock_t executionTime = 0;
    clock_t start;
    clock_t end;
    std::ostringstream description;

    beforeSolve();

    Matrix* coefficientsMatrix = getCoefficientMatrix();
    Matrix* independentTermsMatrix = getIndependentTerms();
    numberOfLines = independentTermsMatrix->getHeight();

    saveOnList("Sistema Inicial: \n");

    try{
        for(int k = 0; k<=numberOfLines-2; k++){
            for(int i = k +1; i<=numberOfLines-1;i++){
                start = clock();

                if(usePivot == true){ pivoting( coefficientsMatrix, independentTermsMatrix, numberOfLines, k );}

                pivo = coefficientsMatrix->getValue(k,k);

                if(pivo == 0){
                    end = clock();
                    executionTime = executionTime + (end - start);
                    throw 0;
                }

                multiplier = coefficientsMatrix->getValue(i,k)/pivo;
                addRowByOtherRowMultipliedByScalar(coefficientsMatrix,independentTermsMatrix,i,k,multiplier);

                end = clock();
                executionTime = executionTime + (end - start);
                description<<"Operação realizada: L"<< i <<" <- L"<< i <<" - ("<< multiplier <<") * L"<< k<<"\n";
                saveOnList(description.str());
                description.str("");

            }

            if(coefficientsMatrix->getValue(numberOfLines-1,numberOfLines-1) == 0){
                end = clock();
                executionTime = executionTime + (end - start);
                throw 1;
            }

        }

        start = clock();
        retroSubstitutions();
        end = clock();
        executionTime = executionTime + (end - start);

    }

    catch(int e){
        if( e == 0 ){
            saveOnList("Nao foi possivel continuar pois o pivô atual é igual a zero.\n");
        }
        if( e == 1 ){
            saveOnList("Não é possivel realizar a retro-substituição\n");
        }
        setSolvable(false);
    }

    executionTimeInSec = executionTime/(long double) CLOCKS_PER_SEC;
    setExecutionTime(executionTimeInSec);

    afterSolve();

}
Example #14
0
/** Train the neural network against a single input, output
training pair through one epoch.
@params input The input matrix.
@params output The required output matrix. 
@params momentum If true then take the momentum value into
consideration when recalculating weight values. Some amount of the 
previous epochs weight change will be added to each weight. */
bool RBFBPNTrainer::trainPattern(const Matrix<float>& input, const Matrix<float>& output, const vector<int>& freezeLayers,
					   bool doMomentum /*=true*/, bool batchUpdate /*=false*/)
{
	// First check which phase we should be in
	int patternsCompleted = internalBPN->getPatternsCompleted();
	if(patternsCompleted>=(phase1+phase2))
	{
		// phase 3 combined hidden and output training
/* J is hidden node learning strength, K is output node learning strength
procedure TrainCombined(float J, float K)
  float E	{output error value}
  int   j	{hidden node index}
  int   k	{output node index}

  for 1 £ j £ NJ	{for each hidden node}
    sj = 0	{set hidden node's learning strength to zero}
  end for
  CalcActivations	{precalculate all hidden activations}
  for 1 £ k £ NK	{for each output node}
    E = ActualOutput(k) - DesiredOutput(k) {determine output error}
    for 1 £ j £ NJ	{for each hidden node}
      sj -= JEajWkj	{modify hidden node's learning strength according to hidden learning
      Wkj -= KEaj	  strength, output error, hidden activation, and output weight}
    end for	{modify output weight according to output learning strength, output error,
  end for	  and hidden activation}
  for 1 £ j £ NJ	{for each hidden node}
    TrainHiddenNode(j, sj) {train hidden node using hidden node's learning strength}
  end for
end procedure*/
		Matrix<float>& errors0 = internalBPN->errors[0];
		Matrix<float>& weights0 = internalBPN->weights[0];
		Matrix<float>& outputWeights = internalBPN->weights[1];
		int inputNodes = weights0.getHeight();
		int hiddenNodes = weights0.getWidth();
		int outputNodes = outputWeights.getWidth();
		float lr = internalBPN->learningRate;
		float hiddenActivationConstant = internalBPN->getHiddenActivationConstant();
// can't use a linearly degrading learning rate in phase 3 because we have an indefinite number 
// of patterns to train from...
//#ifdef LINEARLY_DEGRADING_LEARNING_RATE
//		lr = internalBPN->learningRate*(((phase1+phase2+1)-patternsCompleted)/(float)(phase1+phase2+1));
//#endif

// ADRIAN COOK'S VERSION OF PHASE3
		vector<float> hiddenErrors(hiddenNodes, 0.0f);

		// input to an RBF hidden unit is the Euclidean norm:
		//    sqrt[sum((Xi-Wij)^2)]
		// where x is each input value, and w is each weight into 
		// the hidden unit

		// calculate hidden errors
		float error, diff;
		float totalErrors = 0.0f;
		int i, j;
		if(errors0.getWidth()!=1 || errors0.getHeight()!=hiddenNodes)
			errors0.resize(1, hiddenNodes);
		for(i=0;i<hiddenNodes;i++)
		{
			error = 0.0f;
			for(j=0;j<inputNodes;j++)
			{
				diff = input.getValue(0, j)-weights0.getValue(i, j);
				error+=diff*diff;
			}
			errors0.setValue(0, i, error);
		} // end for i
		
		// NOTE: We should sqrt the error for each hidden unit
		// but our activation function is exp(-net^2) (Gaussian form)
		// so we square it straight away anyway...

		// convert hidden errors to hidden activations
		for(i=0;i<hiddenNodes;i++)
			errors0.setValue(0, i, exp(-errors0.getValue(0, i)));

		// determine the output errors and train output node weights
		float outputError, actualOutput, outputChange, hiddenActivation, outputWeight;
		for(i=0;i<outputNodes;i++)
		{
			actualOutput = 0.0f;
			for(j=0;j<hiddenNodes;j++)
				actualOutput+=errors0.getValue(0, j)*outputWeights.getValue(i, j);
#ifdef USE_CN_OUTPUT_SIGMOID_ACTIVATION_FUNCTION
			actualOutput = 1/(1+exp(-actualOutput));
#endif
			outputError = actualOutput - output.getValue(i, 0);
#ifdef USE_CN_OUTPUT_SIGMOID_ACTIVATION_FUNCTION
			// multiply the output error by the output activation function derivative
			// which for sigmoid is luckily actValue(1-actValue)
//			outputError*=output.getValue(i, 0)*(1-output.getValue(i, 0));
			// Work on the network output not the target value!!!
			outputError*=actualOutput*(1-actualOutput);
#endif
			for(j=0;j<hiddenNodes;j++)
			{
				// calculate the hidden node error
				hiddenActivation = errors0.getValue(0, j);
				outputWeight = outputWeights.getValue(i, j);
#ifdef RBF_ERRORS
#error how do you calculate the hidden node error for RBF nets?
#endif
				hiddenErrors[j]-=lr*outputError*hiddenActivation*outputWeight;
				// correct the hidden to output weights
				outputChange = outputWeight-(lr*outputError*hiddenActivation);
				outputWeights.setValue(i, j, outputChange);
			}
		} // end for i

		// train hidden node weights
		float w, hiddenLr;
		for(i=0;i<hiddenNodes;i++)
		{
			hiddenLr = hiddenErrors[i];
			for(int j=0;j<inputNodes;j++)
			{
				w = weights0.getValue(i, j);
				weights0.setValue(i, j, w+(hiddenLr*(input.getValue(0, j)-w)));
			}
		}
// END ADRIAN COOK'S VERSION OF PHASE3
	}
	// run phase 2 output training
	else if(patternsCompleted>phase1)
	{
#ifdef RBF_ERRORS
#error do this
#endif
		Matrix<float>& outputWeights = internalBPN->weights[1];
		int hiddenNodes = internalBPN->weights[0].getWidth();
//		int outputNodes = outputWeights.getWidth();
		float lr = internalBPN->learningRate;
#ifdef LINEARLY_DEGRADING_LEARNING_RATE
		lr = internalBPN->learningRate*(((phase1+phase2+1)-patternsCompleted)/(float)(phase1+phase2+1));
#endif

/*procedure CalcActivations
  float s	{average hidden error value}
  int   j	{hidden node index}

  s = AverageHiddenError
  for 1 £ j £ NJ	{for each hidden node}
    aj = e^(-(ej)/s)	{convert hidden error to hidden activation}
  end for
end procedure

float AverageHiddenError
  float e	{cumulative hidden error value}
  int   j	{hidden node index}

  CalcHiddenErrors	{precalculate all hidden errors}
  e = 0
  for 1 £ j £ NJ	{for each hidden node}
    e += ej	{accumulate hidden error}
  end for
return e/NJ	{return average hidden error}

float ActualOutput(int k)
  float A	{cumulative actual output value}
  int   j	{hidden node index}

  A = 0
  for 1 £ j £ NJ	{for each hidden node}
    A += ajWkj	{accumulate actual output using hidden activation and output weight}
  end for
	  return A/NJ	{return actual output} */

/*procedure TrainOutputs(float L)
  float E	{output error value}
  int   j	{hidden node index}
  int   k	{output node index}

  CalcActivations	{precalculate all hidden activations}
  for 1 £ k £ NK	{for each output node}
    E = ActualOutput(k) - DesiredOutput(k) {determine output error}
    for 1 £ j £ NJ	{for each hidden node}
      Wkj -= LEaj	{modify output weight according to learning strength, output error, and
    end for	  hidden activation}
  end for
end procedure*/

		// calculate hidden errors
/*		float error, diff;
		float totalErrors = 0.0f;
		errors.resize(1, hiddenNodes);
		for(int i=0;i<hiddenNodes;i++)
		{
			error = 0.0f;
			for(int j=0;j<inputNodes;j++)
			{
				diff = input.getValue(0, j)-weights0.getValue(i, j);
				error+=diff*diff;
//				error+=pow((input.getValue(0, j)-weights0.getValue(i, j)), 2);
			}
			error = error/(float)inputNodes;
			errors.setValue(0, i, error);
			totalErrors+=error;
		} // end for i

		// find the average hidden error
		float averageHiddenError = totalErrors/hiddenNodes;

		// convert hidden errors to hidden activations
		for(i=0;i<hiddenNodes;i++)
			errors.setValue(0, i, exp(-errors.getValue(0, i)/averageHiddenError));

		// determine the output errors and adapt weights
		float outputError, actualOutput, outputChange;
		for(i=0;i<outputNodes;i++)
		{
			actualOutput = 0.0f;
			for(int j=0;j<hiddenNodes;j++)
				actualOutput+=errors.getValue(0, j)*outputWeights.getValue(i, j);
			actualOutput = actualOutput/(float)hiddenNodes;
		}*/


/*		for(i=0;i<outputNodes;i++)
		{
			outputError = actualOutput - output.getValue(i, 0);
			for(j=0;j<hiddenNodes;j++)
			{
				outputChange = outputWeights.getValue(i, j)-(lr*outputError*errors.getValue(0, j));
				outputWeights.setValue(i, j, outputChange);
			}
		} // end for i*/

		if(!internalBPN->getAnswer(input)) return false;
		
		int outputLayer = internalBPN->getNumberOfLayers()-1;
		Matrix<float>& outputLayerMatrix = internalBPN->outputs[outputLayer];
		Matrix<float>& hiddenOutputs = internalBPN->outputs[1];
		int numberOfOutputs = outputLayerMatrix.getWidth();
		float outputError, outputChange;
		for(int i=0;i<numberOfOutputs;i++)
		{
			outputError = outputLayerMatrix.getValue(i, 0) - output.getValue(i, 0);
#ifdef USE_CN_OUTPUT_SIGMOID_ACTIVATION_FUNCTION
			// multiply the output error by the output activation function derivative
			// which for sigmoid is luckily actValue(1-actValue)
//			outputError*=output.getValue(i, 0)*(1-output.getValue(i, 0));
			// work on network output not target value!!!
			outputError*=outputLayerMatrix.getValue(i, 0)*(1-outputLayerMatrix.getValue(i, 0));
#endif
			for(int j=0;j<hiddenNodes;j++)
			{
				outputChange = outputWeights.getValue(i, j)-(lr*outputError*hiddenOutputs.getValue(0, j));
				outputWeights.setValue(i, j, outputChange);
			}
		}
	}
	// run phase 1 unsupervised hidden node training
	else
	{
#ifdef RBF_ERRORS
#error do this
#endif
		// check this pattern is not one of the bad examples
		// skip it if it is
		if(output.getValue(0, 0)<0.9f)
			return true;

		// current hidden layer width and height is sqrt of hidden node number
		Matrix<float>& errors0 = internalBPN->errors[0];
		Matrix<float>& weights0 = internalBPN->weights[0];
		int inputNodes = weights0.getHeight();
		int hiddenNodes = weights0.getWidth();
		int hiddenSize = sqrt(hiddenNodes);
		float neighbourhoodSize = hiddenSize;
		float lr = internalBPN->learningRate;
#ifdef LINEARLY_DEGRADING_NEIGHBOURHOOD_SIZE
		// set neighbourhood size
		neighbourhoodSize = hiddenSize*(((phase1+1)-patternsCompleted)/(float)(phase1+1));
#endif
#ifdef NO_NEIGHBOURHOOD
		neighbourhoodSize = 1;
#endif
#ifdef LINEARLY_DEGRADING_LEARNING_RATE
		lr = internalBPN->learningRate*(((phase1+1)-patternsCompleted)/(float)(phase1+1));
#endif
/* int HiddenWinner
float m	{minimum hidden error value}
int   j	{hidden node index}
int   w	{winning hidden node index}

CalcHiddenErrors	{precalculate all hidden errors}
m = MAXFLOAT
for 1 £ j £ NJ	{for each hidden node}
	if ej < m	{if hidden error is less than current minimum}
		m = ej	{record hidden error as current minimum}
		w = j	{record hidden node as current winner}
	end if
end for
return w	{return winning hidden node} */

/* procedure CalcHiddenErrors
  int   j	{hidden node index}

  for 1 £ j £ NJ	{for each hidden node}
    ej = HiddenError(j)	{get hidden error}
  end for
end procedure

float HiddenError(int j)
  float e	{cumulative hidden error value}
  int   i	{input node index}

  e = 0
  for 1 £ i £ NI	{for each input node}
    e += (Si - wji)2	{accumulate hidden error between input node and hidden weight}
  end for
return e/NI	{return hidden error} */

		// calculate hidden errors
		float error, diff;
		errors0.resize(1, hiddenNodes);
		for(int i=0;i<hiddenNodes;i++)
		{
			error = 0.0f;
			for(int j=0;j<inputNodes;j++)
			{
				diff = input.getValue(0, j) - weights0.getValue(i, j);
				error+=diff*diff;
//				error+=pow((input.getValue(0, j)-weights0.getValue(i, j)), 2);
			}
			errors0.setValue(0, i, error/(float)inputNodes);
		} // end for i

		// find the best hidden node
		int bestHiddenNode = 0;
		float bestHiddenError = errors0.getValue(0, 0);
		for(i=1;i<hiddenNodes;i++)
		{
			if(errors0.getValue(0, i)<bestHiddenError)
			{
				bestHiddenError = errors0.getValue(0, i);
				bestHiddenNode = i;
			}
		}

/* w = hidden winner, L = learning rate, a and b = width
   and height of neighbourhood
procedure TrainNeighbourhood(int w, float L, float a, float b)
  int   c	{horizontal position of winning hidden node in 2D hidden layer}
  int   d	{vertical position of winning hidden node in 2D hidden layer}
  int   x	{horizontal distance from winning hidden node}
  int   y	{vertical distance from winning hidden node}
  float g	{Gaussian-derived learning strength for each hidden node}
  int   j	{hidden node index}

  c = w mod WJ	{determine horizontal position of winning hidden node}
  d = w/WJ	{determine vertical position of winning hidden node}
  for 1 £ j £ NJ	{for each hidden node}
    x = |j mod WJ - c|	{determine horizontal distance from winning hidden node}
    x = min(x, WJ - x)	{wrap around to opposite side of hidden layer if distance is less}
    y = |j/WJ - d|	{determine vertical distance from winning hidden node}
    y = min(y, HJ - y)	{wrap around to opposite side of hidden layer if distance is less}
    g = e-10(x2/a2 + y2/b2)	{determine learning strength depending on position in neighbourhood}
    TrainHiddenNode(j, Lg) {train hidden node using learning strength}
  end for
end procedure

procedure TrainHiddenNode(int j, float L)
  int   i	{input node index}

  for 1 £ i £ NI	{for each input node}
    wji += L(Si - wji)	{make hidden weight more like input node according to learning strength}
  end for
end procedure*/
	
		// train other hidden nodes to be more like the best one

		// determine horizontal and vertical position of winning hidden node
		int hPos = bestHiddenNode%hiddenSize;
		int vPos = bestHiddenNode/hiddenSize;
		int hDist, vDist;
		float g;
		float w;
		for(i=0;i<hiddenNodes;i++)
		{
			// determine horizontal distance of this hidden node from the winner
			hDist = abs((i%hiddenSize)-hPos);
			// wrap to opposite side of hidden layer if distance is less
			if((hiddenSize-hDist)<hDist) hDist = hiddenSize-hDist;
			// determine vertical distance of this hidden node from the winner
			vDist = abs((i/hiddenSize)-vPos);
			// wrap to opposite side of hidden layer if distance is less
			if((hiddenSize-vDist)<vDist) vDist = hiddenSize-vDist;

			// calculate gaussian learning strength
			g = exp(-10*((hDist*hDist)/(float)(neighbourhoodSize*neighbourhoodSize) + (vDist*vDist)/(float)(neighbourhoodSize*neighbourhoodSize)));
			// multiply by the actual learning rate
			g*=lr;
			for(int j=0;j<inputNodes;j++)
			{
				w = weights0.getValue(i, j);
				weights0.setValue(i, j, w+(g*(input.getValue(0, j)-w)));
			}
		} // end for hiddenNodes
	} // end if phase 1

	internalBPN->patternsCompleted++;
	return true;
} // end trainPattern(Matrix input, Matrix output)
Example #15
0
File: lu.hpp Project: Fadis/miyabi
 bool lu( Matrix &_matrix, Log &_log ) {
   typedef typename scalar_type< typename scalar_type< Matrix >::type >::type MatrixElement;
   Vector< std::map< int, float > > weight;
   int row_index;
   typename indexed_iterator< Matrix >::type row_iter( _matrix.begin() );
   typename indexed_iterator< Matrix >::type row_end( _matrix.end() );
   for( ; row_iter != row_end; ++row_iter ) {
     MatrixElement max_in_row = getDefault< typename scalar_type< Matrix >::type >();
     typename indexed_iterator< typename scalar_type< Matrix >::type >::type col_iter( row_iter->begin() );
     typename indexed_iterator< typename scalar_type< Matrix >::type >::type col_end( row_iter->end() );
     for( ; col_iter != col_end; ++col_iter ) {
       MatrixElement current_value = *col_iter;
       if( current_value < getDefault< typename scalar_type< Matrix >::type >() )
         current_value = -current_value;
       if( current_value > max_in_row )
         max_in_row = current_value;
     }
     if( max_in_row == getDefault< typename scalar_type< Matrix >::type >() )
       return false;
     weight.getValue( row_iter.getIndex() ) = static_cast< MatrixElement >( 1 ) / max_in_row;
   }
   int element_count;
   {
     typename indexed_iterator< Log >::type log_iter( _log.begin() );
     std::advance( log_iter, _log.size() - 1 );
     element_count = log_iter.getIndex() + 1;
   }
   typename Matrix::const_iterator col_iter;
   int col_index;
   for( col_index = 0; col_index != element_count; ++col_index ) {
     {
       typename indexed_iterator< Matrix >::type row_iter( _matrix.begin() );
       typename indexed_iterator< Matrix >::type row_end( _matrix.begin() );
       typename indexed_iterator< Matrix >::type container_end( _matrix.end() );
       for( ; row_end != container_end && row_end.getIndex() < col_index; ++row_end );
       for( ; row_iter != row_end; ++row_iter ) {
         MatrixElement sum = row_iter->getConstValue( col_index );
         typename indexed_iterator< Matrix >::type row2_iter( _matrix.begin() );
         typename indexed_iterator< Matrix >::type row2_end = row_iter;
         for( ; row2_iter != row2_end; ++row2_iter )
           sum -= row_iter->getConstValue( row2_iter.getIndex() ) * row2_iter->getConstValue( col_index );
         row_iter->getValue( col_index ) = sum;
       }
     }
     int max_rated_row = 0;
     {
       typename indexed_iterator< Matrix >::type row_iter( _matrix.begin() );
       typename indexed_iterator< Matrix >::type row_end( _matrix.end() );
       for( ; row_iter != row_end && row_iter.getIndex() < col_index; ++row_iter );
       MatrixElement max_rate = getDefault< typename scalar_type< Matrix >::type >();
       for( ; row_iter != row_end; ++row_iter ) {
         MatrixElement sum = row_iter->getConstValue( col_index );
         typename indexed_iterator< Matrix >::type row2_iter( _matrix.begin() );
         typename indexed_iterator< Matrix >::type row2_end( _matrix.begin() );
         typename indexed_iterator< Matrix >::type container_end( _matrix.end() );
         for( ; row2_end != container_end && row2_end.getIndex() < col_index; ++row2_end );
         for( ; row2_iter != row2_end; ++row2_iter )
           sum -= row_iter->getConstValue( row2_iter.getIndex() ) * row2_iter->getConstValue( col_index );
         row_iter->getValue( col_index ) = sum;
         if( sum < getDefault< typename scalar_type< Matrix >::type >() )
           sum = -sum;
         MatrixElement rate = weight.getConstValue( row_iter.getIndex() ) * sum;
         if( rate >= max_rate ){
           max_rate = rate;
           max_rated_row = row_iter.getIndex();
         }
       }
     }
     if( col_index != max_rated_row ) {
       boost::swap( _matrix.getValue( max_rated_row ), _matrix.getValue( col_index ) );
       boost::swap( _log.getValue( max_rated_row ), _log.getValue( col_index ) );
       boost::swap( weight.getValue( max_rated_row ), weight.getValue( col_index ) );
     }
     if( _matrix.getConstValue( col_index ).getConstValue( col_index ) == 0.0 )
       return false;
     if( col_index != element_count - 1 ) {
       MatrixElement temp = static_cast< MatrixElement >( 1 ) / _matrix.getConstValue( col_index ).getConstValue( col_index );
       typename indexed_iterator< Matrix >::type row_iter( _matrix.begin() );
       typename indexed_iterator< Matrix >::type row_end( _matrix.end() );
       for( ; row_iter != row_end && row_iter.getIndex() < col_index + 1; ++row_iter );
       for( ; row_iter != row_end; ++row_iter )
         row_iter->getValue( col_index ) *= temp;
     }
   }
   return true;
 }
void SimpleHwShadowTest::paintGL()
{
	float shadowBiasScale = -10000;

	float yCoeff = ((float) yRot) / 5760;
	float shadowBias = yCoeff;
	//SPAM( shadowBias );

	Imath::V3f lightDir;
	m_LightFrame.checkDirty();
	m_LightFrame.getForwardVector(lightDir);
	m_MoveLight = m_GlSettings->getClearInBlackConfig();

	m_ShadowViewFrame.setPosition(lightDir * 250);
	m_ShadowViewFrame.setLookAtDirection(-lightDir);

	// Model matrix
	m_ModelMatrix.makeIdentity();

	Matrix cubeMtx = m_ModelMatrix;
	cubeMtx.setScale(50.f);
	setTranslation(cubeMtx, m_ModelMatrix.translation() - Imath::V3f(0, 125, 0));

	m_ModelMatrix.setTranslation((Imath::V3f(xRot - 2880, 5520 - 2880, zRot - 2880)
			/ 2880.0f - Imath::V3f(1, 1, 1)) * Imath::V3f(10, 10, 10));

	// Update the shadow map first..
	m_ShadowRenderTarget->use();

	m_Renderer.setClearColour(Colour(255, 0, 0, 0)); // just clear in red
	m_Renderer.clear((UInt) (Renderer::eColor | Renderer::eDepth));

	if (!m_GlSettings->getWireframeConfig())
	{
		m_Renderer.enableCullFace(Renderer::eFront); // cull the front faces to prevent shadow acne artefacts

		//glEnable( GL_POLYGON_OFFSET_FILL );
		//glPolygonOffset( shadowBias * shadowBiasScale, shadowBias * shadowBiasScale );
	}

	// render the penguin from the light point of view
	m_ShadowUpdateProgram->use();
	m_ShadowUpdateProgram->setUniformMatrix("ModelMatrix", m_ModelMatrix.getValue(), false);
	m_ShadowUpdateProgram->setUniformMatrix("ViewMatrix", m_ShadowViewFrame.getMatrix().getValue(), false);
	m_ShadowUpdateProgram->setUniformMatrix("ProjMatrix", m_ShadowProjMatrix.getValue(), false);

	m_Texture->use(0, m_ShadowUpdateProgram);
	m_Mesh->use(m_ShadowUpdateProgram);
	m_Mesh->draw();

	m_Texture2->use(0, m_ShadowUpdateProgram);
	m_Cube->use(m_ShadowUpdateProgram);
	m_ShadowUpdateProgram->setUniformMatrix("ModelMatrix", cubeMtx.getValue(), false);
	m_Cube->draw();

	// go back on default FB
	m_DefaultRenderTarget->use();
	resetViewport();
	m_Renderer.disableCullFace();

	m_Renderer.setClearColour(clearColor);
	m_Renderer.clear((UInt) (Renderer::eColor | Renderer::eDepth));
	//m_Renderer.EnableWireFrame( m_GlSettings->getWireframeConfig() );

	m_Program->use();
	m_Program->setUniformMatrix("ModelMatrix", m_ModelMatrix.getValue(), false);
	m_Program->setUniformMatrix("ViewMatrix", m_CameraFrame.getMatrix().getValue(), false);
	m_Program->setUniformMatrix("ProjMatrix", m_ProjMatrix.getValue(), false);

	// for shadows
	m_Program->setUniformMatrix("ShadowViewMatrix", m_ShadowViewFrame.getMatrix().getValue(), false);
	m_Program->setUniformMatrix("ShadowProjMatrix", m_ShadowProjMatrix.getValue(), false);
	//m_Program->setUniform("ShadowBias", shadowBias * shadowBiasScale );
	m_Program->setUniform("ShadowBlurScale", shadowBias * 0.1f);

	Matrix currentViewMatrix = m_CameraFrame.getMatrix();
	Imath::V3f viewSpaceLightDir;
	currentViewMatrix.multDirMatrix(lightDir, viewSpaceLightDir);

	m_Program->setUniformVec3("LightDir", viewSpaceLightDir);

	m_Texture->use(0, m_Program);
	//m_ShadowRenderTarget->getTexture(0)->use( 0, m_Program );
	m_ShadowRenderTarget->getDepthTexture()->use(1, m_Program); // set shadow map
	m_Mesh->use(m_Program);
	m_Mesh->draw();

	m_Texture2->use(0, m_Program);
	m_Cube->use(m_Program);
	m_Program->setUniformMatrix("ModelMatrix", cubeMtx.getValue(), false);
	m_Cube->draw();

	// just for debugging..
	// display our quad
	m_FSProgram->use();
	m_FSQuad->use(m_FSProgram);
	m_ShadowRenderTarget->getTexture(0)->use(0, m_FSProgram);
	m_FSQuad->draw();
}