示例#1
0
/* alphaBetaSearch ************************************************************************************************************
 *
 * @params: player - char value representing current player @pre: ai_player || human_player
 * 			node - GSTNode pointer to current node you are applying the minimax algorithm to
 *
 * @modifies: updates node's action score to min/max of children nodes depending on minimax
 *
 * @returns: returns node's action score
 **********************************************************************************************************************/
int AIAgent::alphaBetaSearch(GSTNode* node, int alpha, int beta, char player) {
	// if depth is 0 or node is a terminal node
	if (node->childPtr == nullptr) {
		// return heuristic value of node
		return node->action.score;
	}
	if (player == ai_player) {
		GSTNode* pTemp = node->childPtr;
		while (pTemp != nullptr) {
			int temp = alphaBetaSearch(pTemp, alpha, beta, human_player);
			alpha = (temp > alpha) ? temp : alpha;
			if (beta <= alpha)
				break;
			pTemp = pTemp->siblingPtr;
		}
		node->action.score = alpha;
		return alpha;
	}
	else {
		GSTNode* pTemp = node->childPtr;
		while (pTemp != nullptr) {
			int temp = alphaBetaSearch(pTemp, alpha, beta, ai_player);
			beta = (temp < beta) ? temp : beta;
			if (beta <= alpha)
				break;
			pTemp = pTemp->siblingPtr;
		}
		node->action.score = beta;
		return beta;
	}
}
示例#2
0
Action AIAgent::iterativeDeepeningSearch() {
	int depth = 1;
	depthLimitedSearch(depth);
	int score = alphaBetaSearch(gameTree->getRoot(), -1000000000, 100000000, ai_player);
	Action _action = findMove(score);
	for (; depth < 6; depth++) {
		depthLimitedSearch(depth);
		score = alphaBetaSearch(gameTree->getRoot(), -1000000000, 100000000, ai_player);
		_action = findMove(score);
		if (score == 10000000) {
			return _action;
		}
	}
	return _action;
}
 void GraphSearchEngine::makeBestMove(Chessboard *chessboard, void *move)
 {
     beginTime = clock();
     bestMove.clear();
     GraphSearchEngine::pChessboard = chessboard;
     rootDepth = limitDepth;
     int vl = alphaBetaSearch(-Chessboard::MATE_VALUE, Chessboard::MATE_VALUE, limitDepth);
     (*((Move *)move)).forceCopyFrom(bestMove);
 }
示例#4
0
// COMの手を決める.これは五目並べプログラムのAIの部分.
int decideComputerMove( int toMove ) 
{
	// toMove: 最終的に動かす決定をしたい石の色
	int score;

	rootToMove = toMove;

	score = alphaBetaSearch( 0, rootToMove, -INFINITYVAL, INFINITYVAL );
	printf( "Best move evaluation: %d\n", score );

	return 1;
}
示例#5
0
void ChessBoard::alphaBetaSearchMain()
{
    m_searchCallTimes = 0;
    m_distance = 0;
    m_mvComputer = 0;
    memset(m_historyTable, 0, 65536 * sizeof(int));

    for (int i = 1; i <= 6; i ++)
    {
        int vl = alphaBetaSearch(i, -MATE_VALUE, MATE_VALUE);
        if (vl > WIN_VALUE || vl < -WIN_VALUE)
        {
            break;
        }
    }
}
示例#6
0
int ZComputer::alphaBetaSearch(int depth, int alpha, int beta) {
    if (depth <= 0) {
        return calculateAdvantage();
    }
    POSITION movables[MAX_MOVABLE];
    for (int index = 0; index < MAN_COUNT; index ++)
    {
        int genIndex = MOVE_GENERATE_ORDER[index];
        CHESS* pChessman = chessModel->chessAt(role * MAN_COUNT + genIndex);
        if (CHESS_DEAD(*pChessman))
            continue;

        chessModel->getPossibleMoves(pChessman, movables);
        for (int s = 0; s < MAX_MOVABLE; s++)
        {
            // No more possible steps.
            if (movables[s] == 0x8080) break;
            int x = POSITION_X(movables[s]);
            int y = POSITION_Y((movables[s]));

//            searchStack[currentDepth] = COMBINE_POSITION(CHESS_INDEX(*pChessman), x, y, CHESS_INDEX(* chessModel->chessAt(x, y)));
            POSITION currentPos = COMBINE_POSITION(CHESS_INDEX(*pChessman), x, y, CHESS_INDEX(* chessModel->chessAt(x, y)));

            if (chessModel->move(pChessman, x, y))
            {
                int childValue = - alphaBetaSearch(depth - 1, -beta, -alpha);
                chessModel->backStep();
                if (childValue >= beta)
                {
                    searchStack[depth - 1] = currentPos;
                    return childValue;
                }
                if (childValue > alpha)
                {
                    searchStack[depth - 1] = currentPos;
                    alpha = childValue;
                }
            }
        }
    }
    return alpha;
}
示例#7
0
void ZComputer::play()
{
    if (chessModel->currentPlayer() != role)
        return;

    bestValue = -100000;
    bestMove = INVALID_POSITION;
    currentDepth = 0;
//    searchNext();
    int value = alphaBetaSearch(5, -100000, 100000);
    qDebug()<<"alphaBeatSearch: value = " << value;
    bestMove = searchStack[4];
    if (bestMove  != INVALID_POSITION)
    {
        int chessmanIndex = POSITION_INDEX(bestMove);
        int x = POSITION_X(bestMove);
        int y = POSITION_Y(bestMove);
        // played
        qDebug() << "Computer: moving chessman (index= " << chessmanIndex << ") to (" << x << y << ")";
        emit moveCalculated(chessmanIndex, x, y);
    }
}
示例#8
0
int ChessBoard::alphaBetaSearch(int depth, int alpha, int beta)
{    
    //1.到达水平线返回
    if (depth==0)
    {
        return evaluate();
    }
    m_searchCallTimes++;

    //2.初使化最佳值,最佳走法    
    int vlBest = -MATE_VALUE;
    int mvBest = 0;

    //3.生成走法,根据历史表排序
    Moves mvs;
    generateMoves(mvs);
    //PrintMoves(mvs);
    qSort(mvs.begin(), mvs.end(), compareLessTan);
    //PrintMoves(mvs);
    //qDebug()<<"----------------------";

    // 4. 逐一走这些走法,并进行递归
    for(int i=0; i<mvs.count(); i++)
    {
        //列出走法
        int pcCaptured;
        if (makeMove(mvs[i], pcCaptured))
        {
            int vl = -alphaBetaSearch(depth - 1, -beta, -alpha);
            undoMakeMove(mvs[i], pcCaptured);
            //qDebug()<<mvString(mvs[i])<<" vl="<<vl;

            //进行Alpha-Beta大小判断和截断
            if (vl > vlBest)
            {
                // 找到最佳值(但不能确定是Alpha、PV还是Beta走法)
                vlBest = vl;        // "vlBest"就是目前要返回的最佳值,可能超出Alpha-Beta边界
                if (vl >= beta)
                {
                    // 找到一个Beta走法
                    mvBest = mvs[i];  // Beta走法要保存到历史表
                    break;            // Beta截断
                }
                if (vl > alpha)
                {
                    // 找到一个PV走法
                    mvBest = mvs[i];  // PV走法要保存到历史表
                    alpha = vl;     // 缩小Alpha-Beta边界
                }
            }
        }
    }

    // 5. 所有走法都搜索完了,把最佳走法(不能是Alpha走法)保存到历史表,返回最佳值
    if (vlBest == -MATE_VALUE)
    {
      // 如果是杀棋,就根据杀棋步数给出评价
      return m_distance - MATE_VALUE;
    }
    //qDebug()<<"mvBest="<<mvBest<<" distance="<<m_distance;
    if (mvBest != 0)
    {
      // 如果不是Alpha走法,就将最佳走法保存到历史表
      m_historyTable[mvBest] += depth * depth;
      if (m_distance==0)
      {
        // 搜索根节点时,总是有一个最佳走法(因为全窗口搜索不会超出边界),将这个走法保存下来
        m_mvComputer = mvBest;
      }
    }
    return vlBest;
}
    //************************************
    // Method:    alphaBetaSearch
    // FullName:  CIG::GraphSearchEngine::alphaBetaSearch
    // Access:    private static
    // Returns:   int
    // Qualifier:
    // Parameter: int alpha
    // Parameter: int beta
    // Parameter: int depth
    // 这个函数的多人版, 应该是这样的:
    // 原理是当前玩家看有限步, 得到在别的玩家也走对自己最有利的走法时, 自己所能获得的最有利的结果.
    // 这和最大最小搜索的最大不同在于得到当前被评估玩家走这一步之后所能得到的最好分数并不是直接由前一人取相反数得到的, 而应该直接由下级搜索返回本级的可期望评估.
    //************************************
    int GraphSearchEngine::alphaBetaSearch(int alpha, int beta, int depth)
    {
        int vl, vlBest;
        Move nowBestMove;
        Chessboard &nowBoard = *pChessboard;
        // 一个Alpha-Beta完全搜索分为以下几个阶段
        // 1. 到达水平线,则返回局面评价值
        unsigned tmp = clock();

        if(depth == 0 || beginTime + timeout <= tmp)
        {
            return nowBoard.getEvaluation((CIGRuleConfig::PLAYER_NAMES)(nowBoard.nowTurn - 1 + ((-!(long long)nowBoard.nowTurn)&CIGRuleConfig::PLAYER_NUM)));
        }

        // 2. 初始化最佳值和最佳走法
        vlBest = -Chessboard::MATE_VALUE; // 这样可以知道,是否一个走法都没走过(杀棋)
        //bestMove.clear();           // 这样可以知道,是否搜索到了Beta走法或PV走法,以便保存到历史表
        // 3. 生成全部走法,并根据历史表排序         如果被将死, 没有棋可以走.
        MotionGenerator mg(nowBoard);
        mg.generateMoves();
        MoveStack &runningMoveStack = mg.moveStack;

        //qsort(mvs, nGenMoves, sizeof(int), CompareHistory);       //根据CompareHistory的定义, 这里由大到小排序.

        // 4. 逐一走这些走法,并进行递归
        for(int i = runningMoveStack.size - 1; i >= 0 ; --i)
        {
            Move &nowMove = runningMoveStack[i];
            nowBoard.onWholeMoveIntent(nowMove, true);
            vl = alphaBetaSearch(-beta, -alpha, depth - 1);
            nowBoard.undoWholeMove(nowMove, true);

            // 5. 进行Alpha-Beta大小判断和截断
            if(vl > vlBest)       // 找到最佳值(但不能确定是Alpha、PV还是Beta走法)
            {
                vlBest = vl;        // "vlBest"就是目前要返回的最佳值,可能超出Alpha-Beta边界
                nowBestMove.forceCopyFrom(runningMoveStack[i]);

                if(CIGRuleConfig::PLAYER_NUM == 2)
                {
                    if(vl >= beta)    // 找到一个Beta走法
                    {
                        break;            // Beta截断
                    }
                    else if(vl > alpha)    // 找到一个PV走法
                    {
                        alpha = vl;     // 缩小Alpha-Beta边界
                    }
                }
            }
        }

        // 5. 所有走法都搜索完了,把最佳走法(不能是Alpha走法)保存到历史表,返回最佳值
        if(vlBest <= -Chessboard::WIN_VALUE)
        {
            // 如果是杀棋,就根据杀棋步数给出评价
            return nowBoard.getEvaluation((CIGRuleConfig::PLAYER_NAMES)(nowBoard.nowTurn - 1 + ((-!(long long)nowBoard.nowTurn)&CIGRuleConfig::PLAYER_NUM))) - nowBoard.nowRound;
        }

        if(nowBestMove.size != 0)
        {
            //// 如果不是Alpha走法,就将最佳走法保存到历史表
            //Search.nHistoryTable[mvBest] += nDepth * nDepth;
            if(depth == rootDepth)
            {
                // 搜索根节点时,总是有一个最佳走法(因为全窗口搜索不会超出边界),将这个走法保存下来
                GraphSearchEngine::bestMove.forceCopyFrom(nowBestMove);
            }
        }

        nowBoard.onMoveIntent(nowBestMove, true);
        int preBest = nowBoard.getEvaluation((CIGRuleConfig::PLAYER_NAMES)(nowBoard.nowTurn - 1 + ((-!(long long)nowBoard.nowTurn)&CIGRuleConfig::PLAYER_NUM)));
        nowBoard.undoMove(nowBestMove, true);
        return preBest;
    }
示例#10
0
int alphaBetaSearch( int depth, int toMove, int alpha, int beta ) 
{
	int score, eval;
	int x, y;

	// 最大深さになったら局面を評価する
	if( depth == MAXDEPTH )
		return evaluate( rootToMove, toMove, depth );

	// MaxプレイヤーとMinプレイヤーの最高評価を初期化
	if( toMove == rootToMove )
		score = -INFINITYVAL;
	else
		score = INFINITYVAL;

	// Generate all the moves by putting stones of the right color on the empty squares
	// 空いているマスに正しい色の石を置いて、すべての手を作成
	for( x = 1; x <= BOARDSIZE; x++ ) 
	{
		for( y = 1; y <= BOARDSIZE; y++ ) 
		{
			if( gomokuBoard[ x ][ y ] == EMPTY_SQUARE ) 
			{
				gomokuBoard[ x ][ y ] = toMove;
				// この手で五目並べになったかどうかをチェック
				if( fiveInRowCheck( x, y, toMove ) ) 
				{
					if( rootToMove == toMove ) 
					{	
						// Maxプレイヤーの五目並べ
						// 手を戻す
						gomokuBoard[ x ][ y ] = EMPTY_SQUARE;
						// 勝ちになった手は探索の初期局面にあったので手を保存する
						if( depth == 0 ) 
						{
							nextMoveX = x;
							nextMoveY = y;
						}
						return WINNING - depth;			// 浅い探索の勝ちは深い探索の勝ちより良い
					}
					else 
					{		
						// Minプレイヤーの五目並べ
						// 手を戻す
						gomokuBoard[ x ][ y ] = EMPTY_SQUARE;
						return -( WINNING - depth );	// 浅い探索の勝ちは深い探索の勝ちより良い
					}
				}
				else 
				{
					// alpha-beta探索を再帰的に呼ぶ
					eval = alphaBetaSearch( depth + 1, flip( toMove ), alpha, beta );

					// 手を戻す
					gomokuBoard[ x ][ y ] = EMPTY_SQUARE;

					if( rootToMove == toMove ) 
					{
						// この局面はMaxプレイヤーの手番.探索の結果は現在の最大評価より高いならば最大評価を更新
						if( eval > score ) 
						{
							score = eval;
							// 最善手は探索の初期局面にあったので手を保存する
							if( depth == 0 ) 
							{
								nextMoveX = x;
								nextMoveY = y;
							}
						}

						// Beta枝刈り
						if( score >= beta )
							return score;
						// alphaを更新
						if( score > alpha )
							alpha = score;
					}
					else 
					{
						// この局面はMinプレイヤーの手番.探索の結果は現在の最低評価より低いならば最低評価を更新
						if( eval < score )
							score = eval;

						// Alpha枝刈り
						if( score <= alpha )
							return score;
						// Betaを更新
						if( beta < score )
							beta = score;
					}
				}
			}
		}
	}
	return score;
}