예제 #1
0
void SolverNode::js_callback(const sensor_msgs::JointState& msg)
{
  if (0 == joint_ids.size())
    lookupJointIDs(msg.name);

  std::vector<double> pos(ndof);
  std::vector<double> vel(ndof);
  std::vector<double> acc(ndof);
  std::vector<double> effort(ndof);
  std::vector<double> computed_effort(ndof);

  double dt = (msg.header.stamp - last_time).toSec();
  
  for (uint i = 0; i < ndof; i++) {
    pos[i] = msg.position[joint_ids[i]]; // pos_filter[i]->getNextFilteredValue(msg.position[joint_ids[i]]);
    vel[i] = (pos[i] - last_pos[i]) / dt;
    //vel[i] = msg.velocity[joint_ids[i]];

    // acc[i] = (pos[i] - 2.0 * last_pos[i] + sec_last_pos[i]) / (dt * dt); // pos_filter[i]->getNextFilteredValue((vel[i] - last_vel[i]) / dt);
    computed_effort[i] = (vel[i] - last_vel[i]) / dt;
    acc[i] = acc_filter[i]->getNextFilteredValue((vel[i] - last_vel[i]) / dt ); // pos_filter[i]->getNextFilteredValue((vel[i] - last_vel[i]) / dt);


    last_pos[i] = pos[i];
    last_vel[i] = vel[i];

    // acc[i] = (double) acc_filter->
    //   getNextFilteredValue((float) msg.velocity[(vel[i] - last_vel[i]) / dt^2]);

    // vel[i] = (double) vel_filter->
    //   getNextFilteredValue((float) msg.velocity[joint_ids[i]]);
    // acc[i] = (double) acc_filter->
    //   getNextFilteredValue((float) msg.velocity[(vel[i] - last_vel[i]) / dt^2]);


    effort[i] = eff_filter[i]->getNextFilteredValue(msg.effort[joint_ids[i]]);
    

  }
  last_time = msg.header.stamp;

  // last_pos = std::vector<double>(ndof);
  // std::copy(pos.begin(), pos.end(), last_pos.begin()); 

  // last_vel = std::vector<double>(ndof);
  // std::copy(vel.begin(), vel.end(), last_vel.begin()); 

  solver->updateState(pos, vel, acc);
  solver->getTorques(computed_effort);

  std_msgs::Float64 sigma_msg;
  sigma_msg.data = solver->sigma;
  sigma_pub.publish(sigma_msg);
}
예제 #2
0
파일: pun_old.c 프로젝트: mduwalsh/SA
inline double expectedPayoffAfterPunishment(int g, int i, double *cs, double f, int (*lt)[3], int *idx, double (*ust)[TRAITS])
/*
 * g: group index
 * i: individual index
 * cs: candidate strategy vector
 * f: new payoff
 * lt:  punishment lookup table
 * idx: indices of lookup table
 * ust: unchanged current strategy array
 */
{
  int j, s, p, q;
  double dl; //, pdl;
  s = 2*(GS[g]-1);                                                               // punishment lookup table size  
  
#if AGGR
  double th;
  th = cs[1];                                                            // temporarily store candidate threshold strategy
  cs[1] = ust[i][1];                                                     // use old threshold unless it is aggressive enough to punish other individuals
#endif
  for(p = 0; p < s; p++){                                                // start punishing
    q = lt[idx[p]][0];                                                   // punisher
    j = lt[idx[p]][1];                                                   // punishee    

    if(!strengthcheck(j,q))                                              // if strength of j (punishee) less than agressiveness of q (punisher)
    {
#if AGGR                                                                 // only needed when AGGR = 1, for AGGR = 0, it is already decided whether to update threshold 
      if(q == i){                                                        // if punisher is i, update threshold
	cs[1] = th;
      }
#endif
      dl = threshold(q) - effort(j);      
      if( dl > 0){                                                       // if j is not making enough effort
	if(q == i){                                                      // if i is punisher	  
	  f -= E*dl*Sij[g][q][j];                                        // cost of punishing paid by q  
	  f = MAX(0.00, f);
	}
	else{                                                            // i is punishee
	  f -= MIN(f, E*dl);                                             // punishment from j
	}
      }
    }
  }
  return f;                                                               // return expected payoff
}