예제 #1
0
파일: see.cpp 프로젝트: enlighter/giraffe
bool RunSeeTest(std::string fen, std::string move, Score expectedScore)
{
	std::cout << "Checking SEE for " << fen << ", <= " << move << std::endl;

	Board b(fen);
	Move mv = b.ParseMove(move);

	if (mv == 0)
	{
		std::cerr << "Failed to parse move " << move << std::endl;
		return false;
	}

	Score see = StaticExchangeEvaluation(b, mv);

	if (see != expectedScore)
	{
		std::cerr << "Expected: " << expectedScore << " Got: " << see << std::endl;
		return false;
	}

	b.CheckBoardConsistency();

	std::cout << "Passed" << std::endl;

	return true;
}
예제 #2
0
파일: see.cpp 프로젝트: enlighter/giraffe
Score GlobalExchangeEvaluation(Board &board, std::vector<Move> &pv, Score currentEval, Score lowerBound, Score upperBound)
{
	assert(pv.empty());

	// try standpat
	if (currentEval >= upperBound)
	{
		return currentEval;
	}
	else if (currentEval > lowerBound)
	{
		lowerBound = currentEval;
	}

	MoveList captures;
	board.GenerateAllLegalMoves<Board::VIOLENT>(captures);

	std::vector<Move> subPv;

	for (size_t i = 0; i < captures.GetSize(); ++i)
	{
		Score see = StaticExchangeEvaluation(board, captures[i]);

		// we only want to search positive SEEs (not even neutral ones), and only if it can possibly improve lowerBound
		if ((see < 0) || ((currentEval + see) <= lowerBound))
		{
			continue;
		}

		subPv.clear();

		PieceType capturedPt = board.GetCapturedPieceType(captures[i]);

		board.ApplyMove(captures[i]);

		Score score = -GlobalExchangeEvaluation(board, subPv, -(currentEval + SEE_MAT[capturedPt]), -upperBound, -lowerBound);

		board.UndoMove();

		if (score >= upperBound)
		{
			return score;
		}

		if (score > lowerBound)
		{
			lowerBound = score;

			pv.clear();
			pv.push_back(captures[i]);
			pv.insert(pv.end(), subPv.begin(), subPv.end());
		}
	}

	return lowerBound;
}
예제 #3
0
void Engine::generateCaptureScores(vector<Move>& moves, vector<int>& scores)
{
	for (int i = 0;i < moves.size();i++)
	{
		Move m = moves.at(i);
		int cap = m.getCapturedPiece();
		if (cap != SQUARE_EMPTY)
		{
			scores.push_back(StaticExchangeEvaluation(m.getTo(), m.getFrom(), m.getMovingPiece(), cap));
		}
		else if (m.getSpecial() == PIECE_PAWN)
		{
			scores.push_back(StaticExchangeEvaluation(m.getTo(), m.getFrom(), m.getMovingPiece(), SQUARE_WHITEPAWN));
		}
		else
		{
			scores.push_back(0);
		}
	}
}
예제 #4
0
long long Engine::getMoveScore(const Move& m)
{
	int from = m.getFrom();
	int to = m.getTo();
	int capturedpiece = m.getCapturedPiece();
	int special = m.getSpecial();
	int movingpiece = m.getMovingPiece();
	long long score = 1000000;
	if (ply < PrincipalVariation.size())
	{
		if (m == PrincipalVariation.at(PrincipalVariation.size() - 1 - ply))
		{
			score += 6000000;
			SortPhase = SORTPHASE_PV;
			//cout << "info string pv hit " << ply << " " << m.toString() << " " << (PrincipalVariation.size() - 1 - ply) << endl;
			return score;
		}
	}
	if (m == Table.getBestMove(pos.TTKey)) //history best move is always first, give it a big advantage of 400000
	{
		score += 4000000;
		SortPhase = SORTPHASE_HASH;
		//tthitcount++;
		return score;
	}
	/*if (!pos.makeMove(m))
		return -10000;
	bool incheck = pos.underCheck(pos.turn);
	pos.unmakeMove(m);
	if (incheck)
	{
		score += 3500000;
		return score;
	}*/
	/*if (incheck[ply] && movingpiece == PIECE_KING)
	{
		SortPhase = SORTPHASE_GOODCAP;
		score += 3200000;
		return score;
	}*/
	if (m.getSpecial() == PIECE_QUEEN) //queen promotion
	{
		SortPhase = SORTPHASE_GOODCAP;
		score += 3100000;
		return score;
	}
	if (capturedpiece != SQUARE_EMPTY) //a capture
	{
		//if (Threats[ply].getFrom() == to && Threats[ply]!=CONS_NULLMOVE) //capturing a threatening piece
		//{
		//	score += 200000; 
		//}
		//int cap = getSquare2Piece(capturedpiece);
		int x = StaticExchangeEvaluation(to, from, movingpiece, capturedpiece);
		//int x = capturedpiece * 8 + movingpiece;
		//int x = movescore;
		if (x >= 0) //if it is a good capture
		{
			SortPhase = SORTPHASE_GOODCAP;
			score += 3000000 + x;
			return score;
		}
		else //bad capture
		{
			SortPhase = SORTPHASE_BADCAP;
			score += -500000 + x;
			return score;
		}
	}
	else if (special == PIECE_PAWN) //enpassant are also captures
	{
		int x = StaticExchangeEvaluation(to, from, movingpiece, capturedpiece);
		//int x = 0;
		if (x >= 0)
		{
			SortPhase = SORTPHASE_GOODCAP;
			score += 3000000 + x;
			return score;
		}
		else
		{
			SortPhase = SORTPHASE_BADCAP;
			score += -500000 + x;
			return score;
		}
	}
	else
	{
		SortPhase = SORTPHASE_KILLER;
		if (from == KillerMoves[0][ply].getFrom() && to == KillerMoves[0][ply].getTo()) //if its a killer move
		{
			score += 2500000;
			return score;
		}
		else if (from == KillerMoves[1][ply].getFrom() && to == KillerMoves[1][ply].getTo())
		{
			score += 2000000;
			return score;
		}
		/*else if (from == KillerMoves[2][ply].getFrom() && to == KillerMoves[2][ply].getTo())
		{
			score += 1500000;
			return score;
		}*/
		if (ply > 1)
		{
			Move prev = currentVariation[ply - 1];
			//if (from == CounterMoves[prev.getMovingPiece()][prev.getTo()][0].getFrom() && to == CounterMoves[prev.getMovingPiece()][prev.getTo()][0].getTo())
			if(m==CounterMoves[prev.getMovingPiece()][prev.getTo()][0])
			{
				score += 1900000;
				return score;
			}
			//else if (from == CounterMoves[prev.getMovingPiece()][prev.getTo()][1].getFrom() && to == CounterMoves[prev.getMovingPiece()][prev.getTo()][1].getTo())
			else if(m == CounterMoves[prev.getMovingPiece()][prev.getTo()][1])
			{
				score += 1800000;
				return score;
			}
		}
		
		if (ply > 2)
		{
			Move prev = currentVariation[ply - 2];
			if (from == FollowupMoves[prev.getMovingPiece()][prev.getTo()][0].getFrom() && to == FollowupMoves[prev.getMovingPiece()][prev.getTo()][0].getTo())
			//if (m == FollowupMoves[prev.getMovingPiece()][prev.getTo()][0])
			{
				score += 1700000;
				return score;
			}
			else if (from == FollowupMoves[prev.getMovingPiece()][prev.getTo()][1].getFrom() && to == FollowupMoves[prev.getMovingPiece()][prev.getTo()][1].getTo())
			//else if (m == FollowupMoves[prev.getMovingPiece()][prev.getTo()][1])
			{
				score += 1600000;
				return score;
			}
		}
		
		{
			SortPhase = SORTPHASE_HISTORY;
			//if (pos.underCheck(pos.turn) == false) //move a threatened piece
			//{
			//	Move null = createNullMove(pos.epsquare);
			//	pos.makeMove(null);
			//	Move m2 = pos.getSmallestAttacker(getOpponent(pos.turn), m.getFrom());
			//	pos.unmakeMove(null);

			//	int x = StaticExchangeEvaluation(m.getFrom(), m2.getFrom(), m2.getMovingPiece(), m2.getCapturedPiece());

			//	if (x > 0)
			//	{
			//		score += 10000;
			//	}
			//}

			//if (Threats[ply].getTo() == from && Threats[ply]!=CONS_NULLMOVE) //moving a threatened piece
			//{
			//	score += 1000000;
			//}

			//if (getPawnAttacks(pos.turn, from)&pos.Pieces[getOpponent(pos.turn)][PIECE_PAWN]) //moving a threatened piece
			//{
			//	score += 1000000;
			//}

			score += HistoryScores[movingpiece][to]; //sort the rest by history
													 /*int p2sq = getPiece2Square(movingpiece, pos.turn);
													 if (pos.turn == COLOR_BLACK)
													 p2sq = -p2sq;
													 score += PieceSq[p2sq][to] - PieceSq[p2sq][from];*/
			return max(score,(long long)0);
		}
	}
	cout << "info string Move sort error" << endl;
	return score;
}
예제 #5
0
int Engine::QuiescenceSearch(int alpha,int beta)
{
	//quisctime.Start();

	if (isDraw()) return 0;

	nodes++;
	if(nodes%CheckupNodeCount == 0)
	{
		checkup();
		//nodes = 0;
	}
	/*if(pos.getGameStatus()!=STATUS_NOTOVER)
	{
		int val = LeafEval(alpha,beta);
		if(val >= beta)
			return beta;
		else if(val < alpha)
			return alpha;
		return val;
	}*/
	int stand_pat = 0;
	//ProbeStruct probe = Table.Probe(pos.TTKey, -1, alpha, beta);
	//if (probe.found && probe.entry->bound == TT_EXACT)
	//{
	//	return probe.score;
	//	stand_pat = probe.score; //use TT probe as leafeval
	//}
	//else
	//{
		stand_pat = LeafEval<false>();
	//} 
	if(stand_pat >= beta) //standpat
	{
		return stand_pat;
	}
	if(stand_pat <= alpha-PieceMaterial[PIECE_QUEEN]) //big delta pruning
	{
		return stand_pat;
	}
	if(alpha < stand_pat)
	{
		alpha = stand_pat;
	}
	Move m;
	int score = 0;

    vector<Move> vec; //generate moves
	vec.reserve(128);
	//movegentime.Start();
	pos.generateCaptures(vec);
	//pos.generatePawnPushes(vec);
	//movegentime.Stop();

	//vector<int> scores; //generate move scores
	//scores.reserve(128);
	//generateCaptureScores(vec, scores);

	int material = getBoardMaterial<COLOR_WHITE>()+getBoardMaterial<COLOR_BLACK>();
	Move bestmove = CONS_NULLMOVE;
	int bound = TT_ALPHA;
	for(int i = 0;i<vec.size();i++)
	{
		//m = getHighestScoringMove(vec,i);
		m = vec[i];
		int special = m.getSpecial();
		int captured = m.getCapturedPiece();
		if ((stand_pat + PieceMaterial[getSquare2Piece(captured)] + 200 < alpha) //delta pruning
			&& (special != PIECE_QUEEN && special != PIECE_KNIGHT  && special != PIECE_ROOK && special != PIECE_BISHOP) //not a promotion
			//&& (material > EndgameMaterial)
			)
		{
            continue;
		}
		if (StaticExchangeEvaluation(m.getTo(), m.getFrom(), m.getMovingPiece(), captured) < 0)
			continue;
			//break; //since moves are sorted by SEE, we know remaining moves will also have SEE<0
		//if (getSquare2Piece(m.getCapturedPiece()) == PIECE_KING) //captured opponent king
		//	return CONS_INF;
		if(!pos.makeMove(m))
		{
			continue;
		}
		//pos.forceMove(m);
		ply++;
		if (ply > SelectiveDepth)
		{
			SelectiveDepth = ply;
		}
		score = -QuiescenceSearch(-beta,-alpha);
		pos.unmakeMove(m);
		ply--;
		if (score >= beta)
		{
			//Table.Save(pos.TTKey, -1, score, TT_BETA, m);
			return score;
		}
			
		if (alpha < score)
		{
			alpha = score;
			bestmove = m;
			bound = TT_EXACT;
		}
			
	}
	//Table.Save(pos.TTKey, -1, alpha, TT_ALPHA, bestmove);
	//quisctime.Stop();
	return alpha;
}