Example #1
0
int main(int argc, char* argv[])
{
    StateSet entries;
    StateCreatorFunction initFunc(InitState::createState);
    entries.insert(StateEntry(INIT, "init", initFunc));

    StateCreatorFunction stopFunc(StopState::createState);
    entries.insert(StateEntry(STOP, "stop", stopFunc));

    StateEntry entryArray[] =
        {
            StateEntry(INIT, "init", initFunc),
            StateEntry(STOP, "stop", stopFunc)
        };

    StateSet entires2(entryArray, entryArray + (sizeof(entryArray) / sizeof(entryArray[0])));

    StateSet::iterator it = entries.find(INIT);
    boost::shared_ptr<State> s1 = it->creatorFunction_();

    StateSet::nth_index<1>::type::iterator it2 = entries.get<1>().find("stop");
    boost::shared_ptr<State> s2 = it2->creatorFunction_();

    std::cout << "State name: " << s1->name() << std::endl;
    std::cout << "State name: " << s2->name() << std::endl;

    BOOST_FOREACH(const StateEntry& entry, entires2)
    {
        std::cout << entry.umStateName_ << std::endl;
    }
Example #2
0
/**
 * recursive solver (useless)
 */
void dfs(Statistics &stat, vector<State> &stateVector, StateSet &rec, const vector<string> &ground, State state, int &flg, State &result)
{
	// got a result, return
	if (flg)
		return;
	for (int i = 0; i < 4 && !flg; ++i) {
		State now = state;
		now.person.x += direction[i][0]; 
		now.person.y += direction[i][1];
		int s = validState(direction[i][0], direction[i][1], now, ground);
		now.move = step[i];
		now.previousStateNum = now.currentStateNum;
		stat.anodes++;
		if (s == -1) {
			result = now;
			flg = 1;
			return;
		} else if (s && !rec.count(now)) {
			now.currentStateNum = stateVector.size();
			stateVector.push_back(now);
			rec.insert(now);
			dfs(stat, stateVector, rec, ground, now, flg, result);
		} else if (s) {
			stat.bnodes++;
		}
	}
}
Example #3
0
/**
 * Uniform Cost Search
 */
void UCS(const vector<string> &ground)
{
	int time1 = clock();
	Statistics stat;
	priority_queue<UCSState>q;
	StateSet rec;
	UCSState init(0);
	initState(ground, init.state);
	vector<State>stateVector;
	q.push(init);
	stateVector.push_back(init.state);
	rec.insert(init.state);
	UCSState result;
	while (!q.empty()) {
		UCSState tmp = q.top();
		q.pop();
		for (int i = 0; i < 4; ++i) {
			UCSState now = tmp;
			now.state.person.x += direction[i][0];
			now.state.person.y += direction[i][1];
			int s = validState(direction[i][0], direction[i][1], now.state, ground);
			now.state.move = step[i];
			now.state.previousStateNum = now.state.currentStateNum;
			stat.anodes++;
			if (s == -1) {
				result = now;
				goto end;
			} else if (s && !rec.count(now.state)) {
				now.cost += s;
				now.state.currentStateNum = stateVector.size();
				stateVector.push_back(now.state);
				rec.insert(now.state);
				q.push(now);
			} else if (s) {
				stat.bnodes++;
			}
		}
	}
end:
	stat.cnodes = q.size();
	stat.dnodes = rec.size() + 1;
	stat.runtime = (clock() - time1) * 1.0 / CLOCKS_PER_SEC;
	outputStat(stat);
	outputSolution(stateVector, result.state);
}
Example #4
0
 const std::set< State> getReturns(Nwa const & nwa)
 {
   details::TransitionStorage const & trans = nwa._private_get_transition_storage_();
   const Returns ret = trans.getReturns();
   StateSet returns;
   for( ReturnIterator it = ret.begin(); it != ret.end(); it++ )
   {
     returns.insert( Trans::getReturnSite(*it) );
   }
   return returns;
 }
Example #5
0
 const std::set< State> getCalls(Nwa const & nwa)
 {
   details::TransitionStorage const & trans = nwa._private_get_transition_storage_();
   const Returns call = trans.getReturns();
   StateSet calls;
   for( ReturnIterator it = call.begin(); it != call.end(); it++ )
   {
     calls.insert( Trans::getCallSite(*it) );
   }
   return calls;
 }
Example #6
0
 const std::set< State> getExits(Nwa const & nwa)
 {
   details::TransitionStorage const & trans = nwa._private_get_transition_storage_();
   const Returns & exit = trans.getReturns();
   StateSet exits;
   for( ReturnIterator it = exit.begin(); it != exit.end(); it++ )
   {
     exits.insert( Trans::getExit(*it) );
   }
   return exits;
 }
Example #7
0
    const std::set< State> getReturns_Call(Nwa const & nwa, State callSite, Symbol symbol )
    {
      assert(callSite < wali::WALI_BAD_KEY);
      assert(symbol < wali::WALI_BAD_KEY);

      details::TransitionStorage const & trans = nwa._private_get_transition_storage_();

      const Returns ret = trans.getTransPred(callSite);
      StateSet returns;
      for( ReturnIterator it = ret.begin(); it != ret.end(); it++ )
      {
        if( symbol == Trans::getReturnSym(*it) )
          returns.insert( Trans::getReturnSite(*it) );
      }
      return returns;
    }
Example #8
0
    const  std::set< State> getCalls_Ret(Nwa const & nwa, Symbol symbol, State returnSite )
    {
      assert(returnSite < wali::WALI_BAD_KEY);
      assert(symbol < wali::WALI_BAD_KEY);

      details::TransitionStorage const & trans = nwa._private_get_transition_storage_();

      const Returns call = trans.getTransRet(returnSite);
      StateSet calls;
      for( ReturnIterator it = call.begin(); it != call.end(); it++ )
      {
        if( symbol == Trans::getReturnSym(*it) )
          calls.insert( Trans::getCallSite(*it) );
      }
      return calls;
    }
Example #9
0
    const std::set< State> getExits_Sym(Nwa const & nwa, Symbol symbol )
    {
      assert(symbol < wali::WALI_BAD_KEY);

      details::TransitionStorage const & trans = nwa._private_get_transition_storage_();

      const Returns & exit = trans.getReturns();
      StateSet exits;
      for( ReturnIterator it = exit.begin(); it != exit.end(); it++ )
      {
        if( symbol == Trans::getReturnSym(*it) )
        {
          exits.insert( Trans::getExit(*it) );
        }
      }
      return exits;
    }
Example #10
0
/**
 * dfs to the solution
 */
void DFS(const vector<string> &ground)
{
	int time1 = clock();
	Statistics stat;
	State init;
	initState(ground, init);
	StateSet rec;
	vector<State>stateVector;
	stateVector.push_back(init);
	int flg = 0;
	State result;
	stack<State>st;
	st.push(init);
	while (!st.empty()) {
		State tmp = st.top();
		st.pop();
		for (int i = 0; i < 4; ++i) {
			State now = tmp;
			now.person.x += direction[i][0];
			now.person.y += direction[i][1];
			int s = validState(direction[i][0], direction[i][1], now, ground);
			now.move = step[i];
			now.previousStateNum = now.currentStateNum;
			stat.anodes++;
			if (s == -1) {
				result = now;
				goto end;
			} else if (s && !rec.count(now)) {
				now.currentStateNum = stateVector.size();
				st.push(now);
				stateVector.push_back(now);
				rec.insert(now);
			} else if (s) {
				stat.bnodes++;
			}
		}
	}
end:
	stat.cnodes = st.size();
	stat.dnodes = rec.size() + 1;
	stat.runtime = (clock() - time1) * 1.0 / CLOCKS_PER_SEC;
	outputStat(stat);
	outputSolution(stateVector, result);
}
Example #11
0
void FFReducedModelSolver::lao(mlcore::State* s0)
{
    // This is a stack based implementation of LAO*.
    // We don't use the existing library implementation because we are going to
    // solve the reduced states with j=k using FF.
    StateSet visited;
    int countExpanded = 0;
    while (true) {
        do {
            visited.clear();
            countExpanded = 0;
            list<mlcore::State*> stateStack;
            stateStack.push_back(s0);
            while (!stateStack.empty()) {
                if (timeHasRunOut(startingPlanningTime_, maxPlanningTime_))
                    return;
                mlcore::State* s = stateStack.back();
                stateStack.pop_back();
                if (!visited.insert(s).second)  // state was already visited.
                    continue;
                if (s->deadEnd() || problem_->goal(s))
                    continue;
                int cnt = 0;
                if (s->bestAction() == nullptr) {
                    // state has never been expanded.
                    this->bellmanUpdate(s);
                    countExpanded++;
                    continue;
                } else {
                    mlcore::Action* a = s->bestAction();
                    mlreduced::ReducedState* reducedState =
                        (mlreduced::ReducedState* ) s;
                    for (Successor sccr : problem_->transition(s, a)) {
                        if (!(useFF_ && reducedState->exceptionCount() == 0))
                            stateStack.push_back(sccr.su_state);
                    }
                }
                this->bellmanUpdate(s);
            }
        } while (countExpanded != 0);
        while (true) {
            visited.clear();
            list<mlcore::State*> stateStack;
            stateStack.push_back(s0);
            double error = 0.0;
            while (!stateStack.empty()) {
                if (timeHasRunOut(startingPlanningTime_, maxPlanningTime_))
                    return;
                mlcore::State* s = stateStack.back();
                stateStack.pop_back();
                if (!visited.insert(s).second)
                    continue;
                if (s->deadEnd() || problem_->goal(s))
                    continue;
                mlcore::Action* prevAction = s->bestAction();
                if (prevAction == nullptr) {
                    // if it reaches this point it hasn't converged yet.
                    error = mdplib::dead_end_cost + 1;
                } else {
                    mlreduced::ReducedState* reducedState =
                        (mlreduced::ReducedState* ) s;
                    for (Successor sccr : problem_->transition(s, prevAction)) {
                        if (!(useFF_ && reducedState->exceptionCount() == 0))
                            stateStack.push_back(sccr.su_state);
                    }
                }
                error = std::max(error, this->bellmanUpdate(s));
                if (prevAction != s->bestAction()) {
                    // it hasn't converged because the best action changed.
                    error = mdplib::dead_end_cost + 1;
                    break;
                }
            }
            if (error < epsilon_)
                return;
            if (error > mdplib::dead_end_cost) {
                break;  // BPSG changed, must expand tip nodes again
            }
        }
    }
}
Example #12
0
// Runs [numSims] of the given solver and and returns the results
// (i.e., expectedCost, variance, totalTime, statesSeen).
// Argument [algorithm] is the name of the algorithm implemented by [solver].
// Argument [maxTime], if set to > 0, specifies the maximum time allowed to
// the algorithm to complete all simulations (in milliseconds).
// If [perReplan] is passed, then [maxTime] is used as the maximum time allowed
// per re-planning event.
vector<double> simulate(Solver* solver,
                        string algorithm,
                        int numSims,
                        int maxTime = -1,
                        bool perReplan = false)
{
    double expectedCost = 0.0;
    double variance = 0.0;
    double totalTime = 0.0;
    double longestTime = 0.0;
    double expectedTime = 0.0;  // expected *total* time
    double varianceTime = 0.0;  // variance *total* time
    StateSet statesSeen;
    int cnt = 0;
    int numDecisions = 0;
    clock_t simulationsStartTime = clock();
    for (int i = 0; i < numSims; i++) {
        if (verbosity >= 100)
            cout << " ********* Simulation Starts ********* " << endl;
        clock_t startTime, endTime;
        double simulationPlanTime = 0.0;
        // If requested, reset all state information computed by the algorithm
        if (mustResetPlanner(i)) {
            for (State* s : problem->states())
                s->reset();
            if (maxTime > 0) {
                solver->maxPlanningTime(maxTime);
            }
            solver->reset();
            if (!flag_is_registered("precompute-h"))
                heuristic->reset();
            startTime = clock();
            // Initial planning
            if (algorithm != "greedy")
                solver->solve(problem->initialState());

            endTime = clock();
            double planTime = (double(endTime - startTime) / CLOCKS_PER_SEC);
            totalTime += planTime;
            simulationPlanTime += planTime;
            longestTime = std::max(longestTime, planTime);
            numDecisions++;
        }
        if (verbosity >= 10) {
            cout << "Starting simulation " << i << endl;
        }
        State* tmp = problem->initialState();
        if (verbosity >= 100) {
            cout << "Estimated cost " <<
                problem->initialState()->cost() << endl;
        }
        // This is where the actual simulated trial starts
        double costTrial = 0.0;
        int plausTrial = 0;
        while (!problem->goal(tmp)) {
            statesSeen.insert(tmp);
            Action* a;
            // Re-planning
            if (mustReplan(solver, algorithm, tmp, plausTrial)) {
                startTime = clock();
                int simulationsElapsedTime =
                    std::ceil(1000 * (double(startTime - simulationsStartTime)
                                / CLOCKS_PER_SEC));
                if (maxTime > -1) {
                    int planningTime = perReplan ?
                        maxTime : std::max(0, maxTime - simulationsElapsedTime);
                    solver->maxPlanningTime(planningTime);
                }
                if (algorithm != "greedy")
                    a = solver->solve(tmp);
                                                                                dprint("found action" , (void *) a);
                endTime = clock();
                double planTime =
                    (double(endTime - startTime) / CLOCKS_PER_SEC);
                totalTime += planTime;
                simulationPlanTime += planTime;
                longestTime = std::max(longestTime, planTime);
                numDecisions++;
                if (algorithm != "hop")
                    a = greedyAction(problem, tmp);
            } else {
                if (useUpperBound) {
                    // The algorithms that use upper bounds store the
                    // greedy action with respect to the upper bound
                    // in State::bestAction_
                    a = tmp->bestAction();
                }
                else {
                    a = greedyAction(problem, tmp);
                }
            }

            if (verbosity >= 1000) {
                cout << "State/Action: " << tmp << " " << a << " " << endl;
            }

            costTrial += problem->cost(tmp, a);
            costTrial = std::min(costTrial, mdplib::dead_end_cost);
            if (costTrial >= mdplib::dead_end_cost) {
                break;
            }
            double prob = 0.0;
            State* aux = randomSuccessor(problem, tmp, a, &prob);
            if (algorithm == "hdp") {
                double maxProb = 0.0;
                for (auto const & sccr : problem->transition(tmp, a))
                    maxProb = std::max(maxProb, sccr.su_prob);
                plausTrial +=
                    static_cast<HDPSolver*>(solver)->kappa(prob, maxProb);
            }
            tmp = aux;
        }
        if (verbosity >= 10)
            cout << costTrial << endl;
        if (flag_is_registered("ctp")) {
            CTPState* ctps = static_cast<CTPState*>(tmp);
            if (!ctps->badWeather()) {
                cnt++;
                updateStatistics(costTrial, cnt, expectedCost, variance);
                updateStatistics(
                    simulationPlanTime, cnt, expectedTime, varianceTime);
            }
        } else {
            cnt++;
            updateStatistics(costTrial, cnt, expectedCost, variance);
            updateStatistics(
                simulationPlanTime, cnt, expectedTime, varianceTime);
        }
        if (verbosity >=0) {
            if (cnt % 500 == 0 || i == numSims - 1) {
                double reportedTime = perReplan ?
                    totalTime / numDecisions : totalTime;
                cout << "sim " << cnt << " exp.cost " << expectedCost
                     << " var " << variance / (cnt - 1)
                     << " time " << reportedTime
                     << " longestTime  " << longestTime << " "
                     << " Exp[total time] " << expectedTime
                     << " Var[total time] " << varianceTime / (cnt - 1) << endl;
            }
        }
    }

    double reportedTime = perReplan ? totalTime / numDecisions : totalTime;

    if (verbosity >= 10) {
        cout << "Estimated cost " << problem->initialState()->cost() << " ";
        cout << "Avg. Exec cost " << expectedCost << " ";
        cout << "Std. Dev. " << sqrt(variance / (cnt - 1)) << " ";
        cout << "Total time " << totalTime / cnt << " " << endl;
        cout << "States seen " << statesSeen.size() << endl;
        cout << "Avg. time per decision "
             << totalTime / numDecisions << endl
             << "Longest planning time " << longestTime << endl;
        cout << "Num. decisions " << numDecisions << endl;
    }

    double results[] = {expectedCost,
                        variance / (cnt - 1),
                        reportedTime,
                        longestTime};
    return vector<double>(results, results + sizeof(results) / sizeof(double));
}
Example #13
0
/* In crossing, src trans and dest trans both go to existing states. Make one
 * state from the sets of states that src and dest trans go to. */
TransAp *FsmAp::fsmAttachStates( MergeData &md, StateAp *from,
			TransAp *destTrans, TransAp *srcTrans )
{
	/* The priorities are equal. We must merge the transitions. Does the
	 * existing trans go to the state we are to attach to? ie, are we to
	 * simply double up the transition? */
	StateAp *toState = srcTrans->toState;
	StateAp *existingState = destTrans->toState;

	if ( existingState == toState ) {
		/* The transition is a double up to the same state.  Copy the src
		 * trans into itself. We don't need to merge in the from out trans
		 * data, that was done already. */
		addInTrans( destTrans, srcTrans );
	}
	else {
		/* The trans is not a double up. Dest trans cannot be the same as src
		 * trans. Set up the state set. */
		StateSet stateSet;

		/* We go to all the states the existing trans goes to, plus... */
		if ( existingState->stateDictEl == 0 )
			stateSet.insert( existingState );
		else
			stateSet.insert( existingState->stateDictEl->stateSet );

		/* ... all the states that we have been told to go to. */
		if ( toState->stateDictEl == 0 )
			stateSet.insert( toState );
		else
			stateSet.insert( toState->stateDictEl->stateSet );

		/* Look for the state. If it is not there already, make it. */
		StateDictEl *lastFound;
		if ( md.stateDict.insert( stateSet, &lastFound ) ) {
			/* Make a new state representing the combination of states in
			 * stateSet. It gets added to the fill list.  This means that we
			 * need to fill in it's transitions sometime in the future.  We
			 * don't do that now (ie, do not recurse). */
			StateAp *combinState = addState();

			/* Link up the dict element and the state. */
			lastFound->targState = combinState;
			combinState->stateDictEl = lastFound;

			/* Add to the fill list. */
			md.fillListAppend( combinState );
		}

		/* Get the state insertted/deleted. */
		StateAp *targ = lastFound->targState;

		/* Detach the state from existing state. */
		detachTrans( from, existingState, destTrans );

		/* Re-attach to the new target. */
		attachTrans( from, targ, destTrans );

		/* Add in src trans to the existing transition that we redirected to
		 * the new state. We don't need to merge in the from out trans data,
		 * that was done already. */
		addInTrans( destTrans, srcTrans );
	}

	return destTrans;
}
Example #14
0
// This implementation is not used anymore. Re-using the labels is incorrect
// because states can be solved in one of the short-sighted SSPs but not another
// (due to the horizon mismatch).
void SSiPPSolver::optimalSolver(WrapperProblem* problem, State* s0)
{
    // This is a stack based implementation of LAO*.
    // We don't use the existing library implementation so that we can take
    // advantage of the SOLVED_SSiPP labels.
    StateSet visited;
    int countExpanded = 0;
    while (true) {
        do {
            visited.clear();
            countExpanded = 0;
            list<State*> stateStack;
            stateStack.push_back(s0);
            while (!stateStack.empty()) {
                if (ranOutOfTime()) {
                    return;
                }
                State* s = stateStack.back();
                stateStack.pop_back();
                if (!visited.insert(s).second)  // state was already visited.
                    continue;

                if (s->deadEnd() ||
                        problem->goal(s) ||
                        s->checkBits(mdplib::SOLVED_SSiPP) ||
                        problem->overrideGoals()->count(s) > 0)
                    continue;
                int cnt = 0;
                if (s->bestAction() == nullptr) {
                    // state has never been expanded.
                    bellmanUpdate(problem, s);
                    countExpanded++;
                    continue;
                } else {
                    Action* a = s->bestAction();
                    for (Successor sccr : problem->transition(s, a))
                        stateStack.push_back(sccr.su_state);
                }
                if (!s->checkBits(mdplib::SOLVED_SSiPP)) {
                    bellmanUpdate(problem, s);
                }
            }
        } while (countExpanded != 0);
        while (true) {
            visited.clear();
            list<State*> stateStack;
            stateStack.push_back(s0);
            double error = 0.0;
            while (!stateStack.empty()) {
                if (ranOutOfTime()) {
                    return;
                }
                State* s = stateStack.back();
                stateStack.pop_back();
                if (s->deadEnd() ||
                        problem->goal(s) ||
                        s->checkBits(mdplib::SOLVED_SSiPP ||
                        problem->overrideGoals()->count(s) > 0))
                    continue;
                if (!visited.insert(s).second)
                    continue;
                Action* prevAction = s->bestAction();
                if (prevAction == nullptr) {
                    // if it reaches this point it hasn't converged yet.
                    error = mdplib::dead_end_cost + 1;
                } else {
                    for (Successor sccr : problem->transition(s, prevAction))
                        stateStack.push_back(sccr.su_state);
                }
                error = std::max(error, bellmanUpdate(problem, s));
                if (prevAction != s->bestAction()) {
                    // it hasn't converged because the best action changed.
                    error = mdplib::dead_end_cost + 1;
                    break;
                }
            }
            if (error < epsilon_)
                return;
            if (error > mdplib::dead_end_cost) {
                break;  // BPSG changed, must expand tip nodes again
            }
        }
    }
}