Exemplo n.º 1
0
void a_star_node::initNode(map_location const &pos, map_location const &dst,
                           double cost, a_star_node *parent, std::set<map_location> const *teleports)
{
	isInCloseList = false;
	loc = pos;
	nodeParent = parent;
	g = cost;
	h = heuristic(pos, dst);
	//if there are teleport locations, correct the heuristic to take them into account
	if (teleports != NULL) {
		double srch = h, dsth = h;
		std::set<map_location>::const_iterator i;
		for(i = teleports->begin(); i != teleports->end(); ++i) {
			const double new_srch = heuristic(pos, *i);
			const double new_dsth = heuristic(*i, dst);
			if(new_srch < srch) {
				srch = new_srch;
			}
			if(new_dsth < dsth) {
				dsth = new_dsth;
			}
		}
		if(srch + dsth + 1.0 < h) {
			h = srch + dsth + 1.0;
		}
	}
}
Exemplo n.º 2
0
void AI::getGraphMoves(Move &mv)
{
  if(board.testCoords(++mv.dcol, mv.drow))
  {
    mv.hVal = heuristic(mv);
    storage.insert(mv);
  }
  
  if(board.testCoords(mv.dcol-=2, mv.drow))
  {
    mv.hVal = heuristic(mv);
    storage.insert(mv);
  }
  
  if(board.testCoords(++mv.dcol, ++mv.drow))
  {
    mv.hVal = heuristic(mv);
    storage.insert(mv);
  }
  
  if(board.testCoords(mv.dcol, mv.drow-=2))
  {
    mv.hVal = heuristic(mv);
    storage.insert(mv);
  }
  return;
}
Exemplo n.º 3
0
/* Attempt to find a path FROM (fx,fy) TO (tx,ty) (which is a tc).
 * Return '!' if no path is found.
 */
char
findpath(int fx, int fy, int tx, int ty, char tc)
{
	int ix;

	from_x = fx; from_y = fy;
	to_x = tx; to_y = ty; to_object = tc;

	/* Mark all squares as unsolved */
	for (ix = 0; ix < 80*24; ix++)
		nodes[ix].score = SCORE_UNTOUCHED;

	/* We can solve the destination trivially */
	nodes[ty*80 + tx].score = heuristic(tx,ty);
	nodes[ty*80 + tx].dir   = 8;

	while(1)
	{
		int bestix = 0, dir, bx, by;
		unsigned int oldpathcost;
		/* Find a node that has been optimally solved */
		for (ix = 0; ix < 80*24; ix++)
		{
			if (nodes[ix].score < nodes[bestix].score)
			{
				bestix = ix;
			}
		}

		if (nodes[bestix].score >= SCORE_UNTOUCHED)
		{
			/* Urk.  No path. */
			return '!';
		}

		bx = bestix % 80; by = bestix / 80;

		/* Have we found the source? */
		if (bestix == (80*fy + fx))
			return dch[nodes[bestix].dir];

		/* Try to expand the solution */
		oldpathcost = nodes[bestix].score - heuristic(bx, by);
		nodes[bestix].score = SCORE_CLOSED;

		for (dir = 0; dir < 8; dir++)
		{
			evaluate(bx, by, dir, oldpathcost);
		}
	}
}
Exemplo n.º 4
0
void AAStar::findPath(std::vector<ANode*>& path) {
	float c;
	ANode *x, *y;

	init();

	printf("Pathfinding...");
	open.push(start);
	start->open = true;
	visited.push_back(start);

	while (!open.empty()) {
		x = open.top(); open.pop();
		x->open = false;

		if (x == goal) {
			tracePath(path);
			printf("[done]\n");
			return;
		}

		x->closed = true;

		successors(x, succs);
		while (!succs.empty()) {
			y = succs.front(); succs.pop();
			c = x->g + (y->w * heuristic(x, y));

			if (y->open && c < y->g)
				y->open = false;

			/* Only happens with an admissable heuristic */
			if (y->closed && c < y->g)
				y->closed = false;
			
			if (!y->open && !y->closed) {
				y->g = (unsigned int) c;
				y->parent = x;
				y->h = (y->w * heuristic(y, goal));
				open.push(y);
				y->open = true;

				visited.push_back(y);
				history.push_back(y);
				history.push_back(x);
			}
		}
	}

	printf("[failed]\n");
}
Exemplo n.º 5
0
    // bestkids returns a vector of all of the successors that have the
    // best f value.
    std::vector<Current> bestkids(D &d, Current &cur, Cost &sndf) {
        sndf = Cost(-1);
        std::vector<Current> bests;

        this->res.expd++;
        typename D::Operators ops(d, cur.state);
        for (unsigned int n = 0; n < ops.size(); n++) {

            this->res.gend++;
            typename D::Edge e(d, cur.state, ops[n]);
            Cost h = heuristic(d, e.state, e.revop);
            Cost f = h == Cost(-1) ? h : h + e.cost;

            if (bests.empty() || better(f, bests[0].f)) {
                if (!bests.empty())
                    sndf = bests[0].f;
                bests.clear();
                bests.push_back(Current(e.state, ops[n], e.revop, e.cost, f));
            } else if (bests[0].f == f) {
                assert (!bests.empty());
                bests.push_back(Current(e.state, ops[n], e.revop, e.cost, f));
            } else if (better(f, sndf)) {
                sndf = f;
            }
        }

        return bests;
    }
Exemplo n.º 6
0
void Solver::init(char* map) {

	solution = 0;
	this->noExpandedNodes = 0;

	this->gameMap = new Map(map);
	//cout << gameMap.width() << " " << gameMap.height() << endl;
	Coordinate normalizedStartPos = gameMap->calcNormalizedPosition();

    State::initZobristHash(gameMap->width(), gameMap->height());
	State initialState = State(normalizedStartPos, gameMap->getBoxes(), gameMap->getStart());
	parentStates.insert(psMap(initialState.getHash(),parentState(0,stateMove(initialState.getMoveLoc(),initialState.getMoveType()))));
	
	queue = new BucketQueue(1);
	queue->push(intStatePair(heuristic(initialState, gameMap), initialState));
	
    reverseQueue = new BucketQueue(1);
    vector<State> goalStates = gameMap->getAllEndStates();
    for(size_t i=0; i<goalStates.size(); i++) {
        State goalState = goalStates[i];
        reverseParentStates.insert(psMap(goalState.getHash(), parentState(0,stateMove(goalState.getMoveLoc(), goalState.getMoveType()))));
        reverseQueue->push(intStatePair(reverseHeuristic(goalState, gameMap), goalState));
        possibleEndStates.insert(pair<U64,State>(goalState.getHash(), goalState));
    }
}	
Exemplo n.º 7
0
void bot_mtdf::do_move_search(const board* b, board* res)
{
  stats.start_timer();
  last_search_exact = exact;
 
  board children[32];
  int child_count = b->get_children(children) - children;
  
  output() << "bot_" << get_name() << " searching ";
  if(exact){
    output() << "perfectly at depth " << b->count_empty_fields() << '\n';
  }
  else{ 
    output() << "at depth " << get_search_depth() << '\n';
  }
  
  moves_left = exact ? get_perfect_depth() : get_search_depth();
  
  do_sorting(children,child_count);
  
  
  int best_heur = exact ? MIN_PERFECT_HEURISTIC : MIN_HEURISTIC;
  int first_guess;
  {
    int tmp = moves_left - 6;
    if(tmp < 0){
      first_guess = heuristic();
    }
    else{
      std::swap(tmp,moves_left);
      first_guess = mtdf<false,false>(0,best_heur);
      std::swap(tmp,moves_left);
    }
  }
  
  
  
  
  for(int id=0;id<child_count;++id){
    inspected = children[id];
    moves_left--;
    int cur_heur = mtdf<true,exact>(id==0 ? first_guess  : best_heur,best_heur);
    moves_left++;
    if(cur_heur > best_heur){
      best_heur = cur_heur;
      *res = children[id];
    }
    output() << "move " << (id+1) << "/" << (child_count);
    output() << " (" << board::index_to_position(b->get_move_index(children+id)) << ')';
    
    output() << ": " << best_heur << '\n';    
  }
  
  stats.stop_timer();
  
  output() << big_number(stats.get_nodes()) << " nodes in ";
  output() << stats.get_seconds() << " seconds: ";
  output() << big_number(stats.get_nodes_per_second()) << " nodes / sec\n";
  
}
Exemplo n.º 8
0
void a_star_search
  (
	void (*get_map_neighbors)( Pair<int> ), // pointer to static member that gives us neighbors
   Pair<int> start,
   Pair<int> goal,
   unordered_map< Pair<int>, Pair<int> >& came_from,
   unordered_map< Pair<int>, int >& cost_so_far)
{
  PriorityQueue< Pair<int> > frontier;
  frontier.put(start, 0);

  came_from[start] = start;
  cost_so_far[start] = 0;

  while (!frontier.empty()) {
    Pair<int> current = frontier.get();

    if (current == goal) {
      break;
    }

    for (Pair<int> next : (*get_map_neighbors)(current)) {
    	/* this number 1 is the cost to move - no weighting scheme is being used right now
    	 */
      int new_cost = cost_so_far[current] + 1;

      if (!cost_so_far.count(next) || new_cost < cost_so_far[next]) {
        cost_so_far[next] = new_cost;
        int priority = new_cost + heuristic(next, goal);
        frontier.put(next, priority);
        came_from[next] = current;
      }
    }
  }
}
Exemplo n.º 9
0
/* double Dstar::getG(state u)
 * --------------------------
 * Returns the G value for state u.
 */
double Dstar::getG(state u) {

  if (cellHash.find(u) == cellHash.end())
    return heuristic(u,s_goal);
  return cellHash[u].g;

}
Exemplo n.º 10
0
/* void Dstar::init(int sX, int sY, int gX, int gY)
 * --------------------------
 * Init dstar with start and goal coordinates, rest is as per
 * [S. Koenig, 2002]
 */
void Dstar::init(int sX, int sY, int gX, int gY) {

  cellHash.clear();
  path.clear();
  openHash.clear();
  while(!openList.empty()) openList.pop();

  k_m = 0;

  s_start.x = sX;
  s_start.y = sY;
  s_goal.x  = gX;
  s_goal.y  = gY;

  cellInfo tmp;
  tmp.g = tmp.rhs =  0;
  tmp.cost = C1;

  cellHash[s_goal] = tmp;

  tmp.g = tmp.rhs = heuristic(s_start,s_goal);
  tmp.cost = C1;
  cellHash[s_start] = tmp;
  s_start = calculateKey(s_start);

  s_last = s_start;

}
Exemplo n.º 11
0
// findSolution returns a vector of in-order moves to perform in order to reach
// a solution for a given board.
std::vector<BoardMove> findSolution(const Board b) {
    if (isSolution(b))
        return std::vector<BoardMove>();

    SearchHeap moveList([](float p1, float p2) -> float { return p2 - p1; });
    std::set<Board> seen;
    moveList.insert(0.f, std::make_tuple(b, std::vector<BoardMove>()));

    while (!moveList.isEmpty()) {
        auto pair = moveList.removeTuple();

        Board board = std::get<0>(pair.value);
        for (auto bm: board.validMoves()) {
            Board temp = board.doMove(bm);
            if (seen.find(temp) != seen.end())
                continue;
            seen.insert(temp);

            std::vector<BoardMove> moves = std::get<1>(pair.value);
            moves.push_back(bm);

            int h = heuristic(temp);
            if (h == 0)
                return moves;

            moveList.insert(pair.priority + (h / 2.f + 0.5f), std::make_tuple(temp, moves));
        }
    }

    return std::vector<BoardMove>();
}
Exemplo n.º 12
0
void
run_wgm_sssp(warthog::scenario_manager& scenmgr)
{
    warthog::weighted_gridmap map(scenmgr.get_experiment(0)->map().c_str());
	warthog::wgridmap_expansion_policy expander(&map);
	warthog::zero_heuristic heuristic(map.width(), map.height());

	warthog::flexible_astar<
		warthog::zero_heuristic,
	   	warthog::wgridmap_expansion_policy> astar(&heuristic, &expander);
	astar.set_verbose(verbose);

	std::cout << "id\talg\texpd\tgend\ttouched\ttime\tsfile\n";
	for(unsigned int i=0; i < scenmgr.num_experiments(); i++)
	{
		warthog::experiment* exp = scenmgr.get_experiment(i);

		int startid = exp->starty() * exp->mapwidth() + exp->startx();
		astar.get_length(map.to_padded_id(startid), warthog::INF);

		std::cout << i<<"\t" << "sssp_wgm" << "\t" 
		<< astar.get_nodes_expanded() << "\t" 
		<< astar.get_nodes_generated() << "\t"
		<< astar.get_nodes_touched() << "\t"
		<< astar.get_search_time()  << "\t"
		<< scenmgr.last_file_loaded() << std::endl;
	}
	std::cerr << "done. total memory: "<< astar.mem() + scenmgr.mem() << "\n";
}
Exemplo n.º 13
0
bool AAStar::findPath(std::list<ANode*>* path) {
	float g;
	ANode *x, *y;

	init();

	start->open = true;
	start->g = 0.0f;
	start->h = heuristic(start,goal);
	open.push(start);

	while (!open.empty()) {
		x = open.top(); open.pop();

		if (x == goal) {
			if (path)
				tracePath(x, *path);
			return true;
		}

		x->open   = false;
		x->closed = true;

		successors(x, succs);
		while (!succs.empty()) {
			y = succs.front(); succs.pop();

			if (y->closed)
				continue;

			g = x->g + y->w*heuristic(x,y);
			if (y->open && g < y->g)
				y->open = false;

			if (!y->open) {
				y->g      = g;
				y->parent = x;
				y->h      = heuristic(y, goal);
				y->open   = true;
				open.push(y);
			}
			visited++;
		}
	}
	return false;
}
Exemplo n.º 14
0
/* double Dstar::getRHS(state u)
 * --------------------------
 * Returns the rhs value for state u.
 */
double Dstar::getRHS(state u) {

  if (u == s_goal) return 0;

  if (cellHash.find(u) == cellHash.end())
    return heuristic(u,s_goal);
  return cellHash[u].rhs;

}
Exemplo n.º 15
0
		MGAStarNode(int x, int y, const MGAStarNode& goal, double g = 0.0)
		: m_X(x),
		  m_Y(y),
		  m_ParentX(x),
		  m_ParentY(y),
		  m_H(0.0),
		  m_G(g)
		{
			setH(heuristic(goal));
		}
Exemplo n.º 16
0
/* state Dstar::calculateKey(state u)
 * --------------------------
 * As per [S. Koenig, 2002]
 */
state Dstar::calculateKey(state u) {

  double val = fmin(getRHS(u),getG(u));

  u.k.first  = val + heuristic(u,s_start) + k_m;
  u.k.second = val;

  return u;

}
Exemplo n.º 17
0
/* void Dstar::makeNewCell(state u)
 * --------------------------
 * Checks if a cell is in the hash table, if not it adds it in.
 */
void Dstar::makeNewCell(state u) {

  if (cellHash.find(u) != cellHash.end()) return;

  cellInfo tmp;
  tmp.g       = tmp.rhs = heuristic(u,s_goal);
  tmp.cost    = C1;
  cellHash[u] = tmp;

}
Exemplo n.º 18
0
/* resursiveDLS *********************************************************************************************************
 *
 * @params: player - char value of the current player @pre: ai_player || human_player
 * 			node - pointer to current GSTNode
 * 			limit - integer value of cut off value for depth limited search @pre: > 1
 *
 * @modifies: creates game tree consisting of nodes for the depth limited search
 *
 * @returns: nothing
 ************************************************************************************************************************/
void AIAgent::resursiveDLS(char player, GSTNode* node, int limit, Action best_move) {
	// check for base cases
	char winner = node->board->checkWin(win_connections);
	if (winner == ai_player) {
		node->action.score = 10000000;
		return;
	}
	else if (winner == human_player) {
		node->action.score = -10000000;
		return;
	}
	else if (limit == 0) {
		node->action.score = heuristic(*node->board);
		return;
	}
	// if no action from alpha best pruning is given
	if (best_move.x == 0 || best_move.y == 0) {
		// recursive call
		for (int i = 1; i <= boardDimensions; i++) {
			for (int j = 1; j <= boardDimensions; j++) {
				if (((node->board->getValue(i, j) == EMPTY)) && (node->board->getValue(i, j - 1) == player
						|| node->board->getValue(i, j + 1) == player || node->board->getValue(i - 1, j) == player
						|| node->board->getValue(i + 1, j) == player)) {
					// deep copy of node
					Board* _board = new Board(*node->board);
					if (_board->getValue(i + 1, j) == player)
						_board->setValue(i + 1, j, EMPTY);
					else if (_board->getValue(i - 1, j) == player)
						_board->setValue(i - 1, j, EMPTY);
					else if (_board->getValue(i, j + 1) == player)
						_board->setValue(i, j + 1, EMPTY);
					else if (_board->getValue(i, j - 1) == player)
						_board->setValue(i, j - 1, EMPTY);
					_board->setValue(i, j, player);
					gameTree->insert(*_board, node);
					GSTNode* pTemp = node->childPtr;
					// if we are already reached a terminal node
					while (pTemp->siblingPtr != nullptr) {
						pTemp = pTemp->siblingPtr;
					}
					pTemp->action.x = i;
					pTemp->action.y = j;
					if (player == ai_player) {
						resursiveDLS(human_player, pTemp, limit - 1);
					}
					else {
						resursiveDLS(ai_player, pTemp, limit - 1);
					}
					node->board->setValue(i, j, EMPTY);
				}
			}
		}
	}
	return;
}
Exemplo n.º 19
0
int main(int argc, char *argv[])
{
  double anssend,mytime;
  double start,end,node;
  int i,bitv,mincost,j,vis[n];

  /*Initiating the MPI communication headers*/

  MPI_Init(&argc,&argv);
  MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD,&myid);

  start=MPI_Wtime();

  init_adj();
  adj[0][0]=0;

  bitv=(1<<n)-2;
  node=snode;
  lowerb=heuristic(node,bitv);
  printf("The lower bound before calculating the dfs is %d\n",lowerb);
  mincost=dfs(snode,bitv,0);
  printf("\nThe minimum cost through dfs is %d\n",mincost);

  printf("Heuristic cost of going from city 1 to city");

  /*This is the heuristic algorithm*/

  node=snode;
  lowerb=heuristic(node,bitv);
  printf("The hueristic cost is %d\n",hcost);

  /*bfs(snode,bitv,0);
    printf("The minimum coast through bfs is %d\n",mincost);*/

  end=MPI_Wtime();
  if(myid==0)
    printf("Number of processors %d and the time is %lf\n",numprocs,end-start);

  printf("Number of times lower bound worked %d and didn't work is %d and the sum is %d\n",gcount,gcount1,gcount+gcount1);

}
Exemplo n.º 20
0
/* void Dstar::updateStart(int x, int y)
 * --------------------------
 * Update the position of the robot, this does not force a replan.
 */
void Dstar::updateStart(int x, int y) {

  s_start.x = x;
  s_start.y = y;

  k_m += heuristic(s_last,s_start);

  s_start = calculateKey(s_start);
  s_last  = s_start;

}
Exemplo n.º 21
0
/* Quiescence Search */
move_t quiescence(board_t *b, int32_t alpha, int32_t beta) {
    uint8_t i;
    move_t m, best;
    move_list_t *list;
    
    /* Initialize the best possible move as blank */
    SET_BLANK_MOVE(best);

    m.eval = heuristic(b, onmove);
    if(m.eval >= beta) {
    	best.eval = m.eval;
    	return best;
    } else if(m.eval > alpha) {
    	alpha = m.eval;
    }

    /* Get the next possible Good captures only */
    list = gen_move_list(b, TRUE);

    /*TODO: Re-order the move list (SEE - Static Exchange Eval) */
    /*reorder_move_list(b, list); */

    /* For each possible next move... */
    for(i = 0; i < list->size; i++) {
    	/* Let's see the board after that move... */
    	move(b, list->move[i]);

    	/* Quiescence Search recursion */
    	m = quiescence(b, -beta, -alpha);
    	m.eval = -m.eval;

    	/* Restores the previous board (before the possible move) */
    	unmove(b);

    	/* Beta cutoff */
    	if(m.eval >= beta) {
    	    best = list->move[i];
    	    best.eval = m.eval;
    	    break;
    	/* Alpha cutoff */
    	} else if(m.eval > alpha) {
    	    best = list->move[i];
    	    alpha = best.eval = m.eval;
    	/* Best possible move until now */
    	} else if(i == 0 || m.eval > best.eval) {
    	    best = list->move[i];
    	    best.eval = m.eval;
    	}
    }

    /* Clear temporary information and return */
    clear_move_list(list);
    return best;
}
Exemplo n.º 22
0
int main() 
{
  //Motion planning algorithm parameters
  glc::Parameters alg_params;
  alg_params.res=16;
  alg_params.control_dim = 2;
  alg_params.state_dim = 2;
  alg_params.depth_scale = 100;
  alg_params.dt_max = 5.0;
  alg_params.max_iter = 50000;
  alg_params.time_scale = 20;
  alg_params.partition_scale = 40;
  alg_params.x0 = glc::vctr({0.0,0.0});
  
  //Create a dynamic model
  SingleIntegrator dynamic_model(alg_params.dt_max);
  
  //Create the control inputs
  ControlInputs2D controls(alg_params.res);
  
  //Create the cost function
  ArcLength performance_objective(4);
  
  //Create instance of goal region
  glc::vctr xg({10.0,10.0});
  SphericalGoal goal(xg.size(),0.25,4);
  goal.setGoal(xg);
  
  //Create the obstacles
  PlanarDemoObstacles obstacles(4);
  
  //Create a heuristic for the current goal
  EuclideanHeuristic heuristic(xg,goal.getRadius());
  glc::GLCPlanner planner(&obstacles,
                          &goal,
                          &dynamic_model,
                          &heuristic,
                          &performance_objective,
                          alg_params,
                          controls.points);
  
  //Run the planner and print solution
  glc::PlannerOutput out;
  planner.plan(out);
  if(out.solution_found){
    std::vector<glc::nodePtr> path = planner.pathToRoot(true);
    glc::splinePtr solution = planner.recoverTraj( path );
    glc::splineTensor coef(solution->coefficient_array);
    glc::printSpline( solution , 20, "Solution");
    glc::trajectoryToFile("shortest_path.txt","../plots/",solution,500);
    glc::nodesToFile("shortest_path_nodes.txt","../plots/",planner.domain_labels);
  }
  return 0;
}
Exemplo n.º 23
0
Node*
Search::make_start(const Point &p) {
	Node* node = get_node(p);
	node->g = 0;
	node->h = heuristic(p);
	node->f = node->h;
	node->prev = NULL;
	node->set_state(START);
///	printf("Push S   ");
///	node->print(stdout);
	return node;
}
Exemplo n.º 24
0
/* Get shortest path as array of nodes in the graph by using the A Star algorithm */
vector<std::shared_ptr<Node>> Graph::aStar(std::shared_ptr<Node> start, std::shared_ptr<Node> goal)
{
	PriorityQueue<std::shared_ptr<Node>> frontier; // List of open, not yet evaluated nodes
	frontier.put(start, 0);			// Place starting node in that list

	map<std::shared_ptr<Node>, std::shared_ptr<Node>> cameFrom; // Mapping of visited nodes and from where we reached them
	map<std::shared_ptr<Node>, int> costSoFar; // Mapping of costs to get to a specific node

	cameFrom[start] = start;
	costSoFar[start] = 0;

	/* Find goal node by repeatedly searching the frontier and expanding it */
	while (!frontier.empty())
	{
		auto current = frontier.get(); // Get top priority node and pop from queue

									   // If current node is goal then stop for it has been reached
		if (current == goal) { break; }

		// For all edges coming from current node
		for (std::shared_ptr<Edge> myEdge : edgeListVector[current->getIndex()])
		{
			// Calculate cost to get from current node to next
			int newCost = costSoFar[current] + myEdge->getCost();

			// If either we haven't visited connected node yet or if cost is less
			if (!costSoFar.count(myEdge->getTo()) || newCost < costSoFar[myEdge->getTo()])
			{
				costSoFar[myEdge->getTo()] = newCost;	// Set cost to get to the node
				int priority = newCost + heuristic(myEdge->getTo(), goal);
				frontier.put(myEdge->getTo(), priority);		// Push node onto frontier
				cameFrom[myEdge->getTo()] = current;	// Set from where we reached the node
			}
		}
	}
	std::shared_ptr<Node> current = goal;

	/* Goal is found. Now we need to establish a path towards it */
	vector<std::shared_ptr<Node>> path;
	path.push_back(current);

	// Walk back along trail and push nodes until start is reached
	while (current != start)
	{
		current = cameFrom[current];
		path.push_back(current);
	}

	reverse(path.begin(), path.end()); // Reverse path to get correct order


	return path;
}
Exemplo n.º 25
0
std::vector<Astar::node> Astar::getNeighbors(Astar::node & current){
    std::vector<Point *> points = all_points->getNeighbors(current.currentPoint);
    std::vector<Astar::node> neighboringNodes(points.size());
    for(int x = 0; x < points.size(); x++){
        if(points[x]->valid()){
        neighboringNodes[x].currentPoint = points[x];
        neighboringNodes[x].previousNode = &current;
        neighboringNodes[x].cost = heuristic(neighboringNodes[x]);
        }
    }
    return neighboringNodes;
}
Exemplo n.º 26
0
int dfs(int city,int bitv,int cost)
{
  int i,tempdfs,temph;
  static int count;
  int val,best=100000;


  if(bitv==0)
    return adj[city][snode];
  val=0;
  for(i=0;i<n;i++)
    {
      if((bitv&(1<<i))>>i)
	{
	  val=adj[city][i];
	  printf("Thinking of visiting %d to %d and the initial value is %d; ori cost:%d \n",city,i,val,cost);
	  count++;
	  temph=heuristic(i,bitv-(1<<i));
	  if((val+cost)>lowerb)
	    printf("This exceeded the lower bound val:%d \n",val);
	  else
	    printf("Val:%d cost:%d\n",val,cost);
	  if((val+cost+temph)<=lowerb)
	    {
	      printf("The values are %d %d %d %d\n",i,lowerb,val,cost);
	      tempdfs=dfs(i,bitv-(1<<i),val);
	      val+=tempdfs;
	      best=min(best,val);
	      printf("The value is %d temph:%d\n",val,temph);
	      gcount1++;
	    }
	  if((val+cost+heuristic(i,bitv-(1<<i)))>lowerb)
	    {
	      printf("Value of lower bound has exceeded val:%d cost:%d and hcost:%d\n",val,cost,temph);
	      gcount++;
	    }
	}
    }
  return best;
}
Exemplo n.º 27
0
/* Consider moving TO (x,y) BY dir; to reach the destination from
 * (x,y) took pathcost.
 */
void evaluate(int x, int y, int dir, int pathcost)
{
	int dx = dxes[dir], dy = dyes[dir];
	int fx = x - dx, fy = y - dy;
	int ix = 80*y + x, fix = 80*fy + fx;

	int newscore = pathcost + 1 + heuristic(fx,fy);

	node_t *from_node = &nodes[fix];

	/* Let's not be pathing off the map */
	if (fx < 0 || fx >= 80 || fy < 1 || fy >= 22)
		return;

	if (x == to_x && y == to_y && ismonster(to_object))
	{
		/* Special rule for attacking monsters - normal terrain
		 * effects do not apply in this case
		 */
	}
	else
	{
		/* Don't path into impassible terrain */
		if (impassable(framebuffer[ix], x, y))
			return;

		if (dx && dy)
		{
			/* Moving diagonally!  Check if there are doorways. */
			if (notebuffer[fix] & NOTE_OPENDOOR)
				return;

			if (notebuffer[ix] & NOTE_OPENDOOR)
				return;
		}

		/* Don't path into traps unless forced */
		if (notebuffer[ix] & NOTE_TRAP)
			newscore += 1000; /* prefer a trap over 1001 squares of walking */
	}

	if (from_node->score != SCORE_CLOSED && newscore < from_node->score)
	{
		/* Ooh.  A better way to solve (fx,fy). */
		from_node->score = newscore;
		from_node->dir = dir;
	}
}
Exemplo n.º 28
0
/* void Dstar::updateGoal(int x, int y)
 * --------------------------
 * This is somewhat of a hack, to change the position of the goal we
 * first save all of the non-empty on the map, clear the map, move the
 * goal, and re-add all of non-empty cells. Since most of these cells
 * are not between the start and goal this does not seem to hurt
 * performance too much. Also it free's up a good deal of memory we
 * likely no longer use.
 */
void Dstar::updateGoal(int x, int y) {
   
  list< pair<ipoint2, double> > toAdd;
  pair<ipoint2, double> tp;
  
  ds_ch::iterator i;
  list< pair<ipoint2, double> >::iterator kk;
  
  for(i=cellHash.begin(); i!=cellHash.end(); i++) {
    if (!near(i->second.cost, C1)) {
      tp.first.x = i->first.x;
      tp.first.y = i->first.y;
      tp.second = i->second.cost;
      toAdd.push_back(tp);
    }
  }

  cellHash.clear();
  openHash.clear();

  while(!openList.empty())
    openList.pop();
  
  k_m = 0;
  
  s_goal.x  = x;
  s_goal.y  = y;

  cellInfo tmp;
  tmp.g = tmp.rhs =  0;
  tmp.cost = C1;

  cellHash[s_goal] = tmp;
  insert(s_goal);

  tmp.g = tmp.rhs = heuristic(s_start,s_goal);
  tmp.cost = C1;
  cellHash[s_start] = tmp;
  s_start = calculateKey(s_start);

  s_last = s_start;    

  for (kk=toAdd.begin(); kk != toAdd.end(); kk++) {
    updateCell(kk->first.x, kk->first.y, kk->second);
  }
  

}
Exemplo n.º 29
0
void closest(struct KD_TREE *tree, struct Node *u, int k, struct Point *x, int h[], int *mndist) {
	if (u == NULL || heuristic(tree, h) >= *mndist)
		return ;
	int dist = pt_dist(&(u->pid), x), old;
	*mndist = min(*mndist, dist), old = h[k];
	if (x->d[k] < u->pid.d[k]) {    		
		closest(tree, u->lson, (k+1)%tree->kD, x, h, mndist);
		h[k] = abs(x->d[k] - u->pid.d[k]);
		closest(tree, u->rson, (k+1)%tree->kD, x, h, mndist);
		h[k] = old;
	} else {
		closest(tree, u->rson, (k+1)%tree->kD, x, h, mndist);
		h[k] = abs(x->d[k] - u->pid.d[k]);
		closest(tree, u->lson, (k+1)%tree->kD, x, h, mndist);
		h[k] = old;
	}
}
Exemplo n.º 30
0
void
run_jps_wgm(warthog::scenario_manager& scenmgr)
{
    warthog::weighted_gridmap map(scenmgr.get_experiment(0)->map().c_str());
	warthog::jps_expansion_policy_wgm expander(&map);
	warthog::octile_heuristic heuristic(map.width(), map.height());

	warthog::flexible_astar<
		warthog::octile_heuristic,
	   	warthog::jps_expansion_policy_wgm> astar(&heuristic, &expander);
	astar.set_verbose(verbose);
    // cheapest terrain (movingai benchmarks) has ascii value '.'; we scale
    // all heuristic values accordingly (otherwise the heuristic doesn't 
    // impact f-values much and search starts to behave like dijkstra)
    astar.set_hscale('.');  

	std::cout << "id\talg\texpd\tgend\ttouched\ttime\tcost\tsfile\n";
	for(unsigned int i=0; i < scenmgr.num_experiments(); i++)
	{
		warthog::experiment* exp = scenmgr.get_experiment(i);

		int startid = exp->starty() * exp->mapwidth() + exp->startx();
		int goalid = exp->goaly() * exp->mapwidth() + exp->goalx();
		double len = astar.get_length(
				map.to_padded_id(startid),
			   	map.to_padded_id(goalid));
		if(len == warthog::INF)
		{
			len = 0;
		}

		std::cout << i<<"\t" << "jps_wgm" << "\t" 
		<< astar.get_nodes_expanded() << "\t" 
		<< astar.get_nodes_generated() << "\t"
		<< astar.get_nodes_touched() << "\t"
		<< astar.get_search_time()  << "\t"
		<< len << "\t" 
		<< scenmgr.last_file_loaded() << std::endl;

		check_optimality(len, exp);
	}
	std::cerr << "done. total memory: "<< astar.mem() + scenmgr.mem() << "\n";
}