Пример #1
0
void PatternMatching::calcMarkersNextDistanceAngle(std::vector<Marker *>&markers) {
	//float R = (float) (360*PI*180);
	for (unsigned int i=0; i < markers.size(); ++i) {
		if ((i+1) != markers.size()) {
			markers[i]->next_dist = euclideanDistance2D(markers[i]->center, markers[i+1]->center);
			markers[i]->next_angle_dist = (markers[i+1]->angle - markers[i]->angle);	//Revisar!!
		}
		else {
			markers[i]->next_dist = euclideanDistance2D(markers[0]->center, markers[i]->center);
			markers[i]->next_angle_dist = (markers[0]->angle + ( (float) R - markers[i]->angle) );	//Revisar!!
		}
	}
}
Пример #2
0
double calculateReward(State goal, int lookahead) {
	ROS_INFO("Calculating reward for state. Lookahead is %d", lookahead);
	double reward = -1;
	//get the data from /goal , may be store it in a list and use
	//assuming the goal gets with only one person
	double normSum=0.0;
	if (lookahead == 1) {
		for (std::vector<State>::iterator state = pomdp.states.begin(); state != pomdp.states.end(); state++) {
			if (goal.pose.position.x != state->pose.position.x
					&& goal.pose.position.y != state->pose.position.y) {
				//only only 95 out of 100 times there is a person actually
				if (state->obs_weight == 1/pomdp.num_states) {
					ROS_INFO("State observation weight is 1/pomdp.num_states!");
					//as per map thats unreachable/unexplored  so keep it zero
					continue;
				} else {
					state->obs_weight = state->obs_weight + (1
							/ euclideanDistance2D(goal, *state) *.95 );
					//ROS_INFO("euc dist %f",euclideanDistance2D(goal, *state));
					if (state->obs_weight > reward) {
						reward = state->obs_weight;
					}
				}
			} else {
				state->obs_weight=0.8;
				ROS_INFO("goal is an internal state, reward set to very high");
			}
			normSum=normSum+state->obs_weight;

		}

		normaliseDistribution(normSum);
		ROS_INFO("Norm sum is %f.", normSum);
	} /*else {
	 for (std::vector<State>::iterator state = pomdp.states.begin(); state
	 != pomdp.states.end(); state++) {
	 if (goal != *state) {
	 if (state->obs_weight == 0) {
	 continue;
	 }
	 state->obs_weight = calculateReward(*state, lookahead - 1) + (1
	 / euclideanDistance2D(goal, *state) * 0.95)
	 + state->obs_weight;
	 if (state->obs_weight > reward) {
	 reward = state->obs_weight;
	 }
	 } else {
	 }
	 }
	 }*/
	ROS_INFO("Calculated a reward of %f.", reward);
	return reward;
}