void DiffDriveRobot::spin(){ if (goals_.size()==0) return; if (stop_mode_){ stop(); return; } diff_drive_robot::Goal smoothed_goal, current_goal, next_goal; current_goal = goals_[goal_idx_]; if ((distance_to_goal(current_goal)<R_MERGE_END) && (goal_idx_ < goals_.size() - 1)){ goal_idx_++; current_goal = goals_[goal_idx_]; std::cout<<"Moving to goal"<<goal_idx_<<std::endl; } std::cout<<"Distance to goal = "<<distance_to_goal(current_goal)<<std::endl; if (goal_idx_<goals_.size()-1) next_goal = goals_[goal_idx_+1]; else next_goal = goals_[goal_idx_]; //smooth transition between consecutive goals. if ((distance_to_goal(current_goal)<= R_MERGE_START) && (distance_to_goal(current_goal)>= R_MERGE_END)){ double d = distance_to_goal(current_goal); smoothed_goal.x = ((d-R_MERGE_END)/R_MERGE_END)*current_goal.x + ((R_MERGE_START-d)/R_MERGE_END)*next_goal.x ; smoothed_goal.y = ((d-R_MERGE_END)/R_MERGE_END)*current_goal.y + ((R_MERGE_START-d)/R_MERGE_END)*next_goal.y ; smoothed_goal.theta = ((d-R_MERGE_END)/R_MERGE_END)*current_goal.theta + ((R_MERGE_START-d)/R_MERGE_END)*next_goal.theta; } else smoothed_goal = current_goal; //for all other cases double dx = smoothed_goal.x-x_; double dy = smoothed_goal.y-y_; double eps = atan2(dy, dx); double rho = hypot(dx, dy); double gamma = wrap_angle(eps - theta_); double delta = wrap_angle(smoothed_goal.theta - gamma - theta_); std::cout<<"rho = "<<rho<<", gamma = "<<gamma*180.0/PI<<", delta = "<<delta*180.0/PI<<std::endl; double v_desired = K_RHO * rho; double w_desired = K_GAMMA * gamma + K_DELTA * delta; std::cout<<"v_desired = "<<v_desired<< ", w_desired = "<<w_desired*180.0/PI; // check velocity limits if (fabs(v_desired)>v_max_) v_desired = copysign(v_max_, v_desired); if (fabs(w_desired)>w_max_) w_desired = copysign(w_max_, w_desired); // check accelration limits if (fabs(v_desired-v_)/dt_ > a_max_) v_desired = v_ + copysign(a_max_*dt_, v_desired-v_); if (fabs(w_desired-w_)/dt_ > alpha_max_) w_desired = w_ + copysign(alpha_max_*dt_, w_desired-w_); w_ = w_desired; v_ = v_desired *(1.0 -0.25*fabs(w_)/w_max_); std::cout<<", v = "<<v_<<", w = "<<w_*180.0/PI<<std::endl; }
void EGraphManager<HeuristicType>::bruteforceHeuristic(std::vector<double>& cont_state, std::vector<int>& activeAgents_indices, std::vector<int>& heurs){ HeuristicType heur_coord; HeuristicType heur_coord_agent(2,0); heurs.assign(1+numagents_,0); egraph_env_->projectToHeuristicSpace(cont_state, heur_coord); bruteforceHeuristicCtr ++; int numActiveAgents = activeAgents_indices.size(); // look at all possible assignments of goals to agents int numassignments = pow(numActiveAgents, numgoals_); // stores heuristic for agents, for a given assignment std::vector<int> heur_assignment(1+numagents_, 0); std::vector<int> bestheursofar(1+numagents_,0); bestheursofar[0] = std::numeric_limits<int>::max(); Matrix assignment(numagents_); // compute distance from each agent to all unvisited goals Matrix distance_to_goal(numagents_); std::vector<int>::iterator agent_iterator; EGraphHeuristicPtr egraph_heur; clock_t mintime_t0 = clock(); for(agent_iterator = activeAgents_indices.begin(); agent_iterator != activeAgents_indices.end(); agent_iterator++ ){ assignment[*agent_iterator].resize(numgoals_ + 1, 0); distance_to_goal[*agent_iterator].resize(numgoals_, -1); heur_coord_agent[0] = heur_coord[2*(*agent_iterator)]; heur_coord_agent[1] = heur_coord[2*(*agent_iterator)+1]; for(int goal_i = 0; goal_i < numgoals_; goal_i++){ /*if(cont_state[4*numagents_ + goal_i] >= 0) // don't care about already visited goals continue; */ egraph_heur = egraph_heurs_[*agent_iterator][goal_i]; //clock_t getHeuristicBFS_t0 = clock(); //printf("\n 2DBFS for start (%d, %d), goal %d is %d\n ", //heur_coord_agent[0], heur_coord_agent[1], goal_i, // egraph_heur->getHeuristic(heur_coord_agent)); distance_to_goal[*agent_iterator][goal_i] = egraph_heur->getHeuristic(heur_coord_agent); //getHeuristicBFSClock += clock() - getHeuristicBFS_t0; } } //mintimeClock += clock() - mintime_t0; /* #ifdef DEBUG_HEUR printf("Computing heuristic for activeagents: "); for(int agent_i = 0; agent_i < activeAgents_indices.size(); agent_i++) printf("%d ", activeAgents_indices[agent_i]); printf("\n"); #endif */ for(int assg_i = 0; assg_i < numassignments; assg_i ++){ bool ASSG_FAILFAST_FLAG = false; heur_assignment.assign(1+numagents_,0); int index = assg_i; for(agent_iterator = activeAgents_indices.begin(); agent_iterator != activeAgents_indices.end(); agent_iterator++){ assignment[*agent_iterator][0] = 0; } // compute goal assignments: assignment is index in base numactiveagents for(int goal_i = 0; goal_i < numgoals_; goal_i++){ int activeagent_index = index % numActiveAgents; if(cont_state[4*numagents_ + goal_i] == -1){ // first element of assignment[agent_i] stores the number of goals assigned to agent_i assignment[activeAgents_indices[activeagent_index]][0]++; int ctr = assignment[activeAgents_indices[activeagent_index]][0]; assignment[activeAgents_indices[activeagent_index]][ctr] = goal_i; } index = (int) index/numActiveAgents; } // for this assignment, compute heuristic for every agent for(agent_iterator = activeAgents_indices.begin(); agent_iterator != activeAgents_indices.end(); agent_iterator++){ heur_coord_agent[0] = heur_coord[2*(*agent_iterator)]; heur_coord_agent[1] = heur_coord[2*(*agent_iterator) + 1]; clock_t bruteforceHeuristicPerAgent_t0 = clock(); heur_assignment[1+*agent_iterator] = bruteforceHeuristicPerAgent((*agent_iterator), heur_coord_agent, assignment[(*agent_iterator)], distance_to_goal[(*agent_iterator)]); /* printf("bestheursofar = %d heur_assignment = %d heur for agent %d = %d, assignment = ", bestheursofar[0], heur_assignment[0], *agent_iterator, heur_assignment[1+*agent_iterator]); printVector(assignment[(*agent_iterator)]); */ switch(costfunc_) { case mas_config::SUM: heur_assignment[0] += heur_assignment[1 + (*agent_iterator)]; break; case mas_config::MAX: heur_assignment[0] = std::max(heur_assignment[0], heur_assignment[1 + (*agent_iterator)]); break; } //printf("heur_assignment_so_far = %d\n", heur_assignment[0]); if(heur_assignment[0] > bestheursofar[0]){ // for this assignment, no need to evaluate any other agent ASSG_FAILFAST_FLAG = true; break; } //bruteforceHeuristicPerAgentClock += clock() - bruteforceHeuristicPerAgent_t0; } if(ASSG_FAILFAST_FLAG) continue; if(heur_assignment[0] < bestheursofar[0]){ bestheursofar = heur_assignment; } } // Admissible heuristic is the min of all possible assignments heurs = bestheursofar; #ifdef DEBUG_HEUR // assignment is index in base numactiveagents std::vector<int> best_assignment(numgoals_); int index = min_assignment_index; printf("Best Assignment: "); for(int goal_i = 0; goal_i < numgoals_; goal_i++){ int activeagent_index = index % numActiveAgents; // if goal is already visited at this state, don't assign to agent if(cont_state[4*numagents_ + goal_i] >= 0) best_assignment[goal_i] = -1; else best_assignment[goal_i] = activeAgents_indices[activeagent_index]; index = (int) index/numActiveAgents; printf("%d ", best_assignment[goal_i]); } //printf("Best heur: %d\n", bestheursofar); #endif //printf("Best heur: %d numassignments %d \n", bestheursofar, numassignments); }