Esempio n. 1
0
void sol_body_e(float e[4],
                const struct s_vary *vary,
                const struct v_body *bp,
                float dt)
{
    if (bp->mj >= 0)
    {
        const struct v_move *mp = vary->mv + bp->mj;

        const struct b_path *pp = vary->base->pv + mp->pi;
        const struct b_path *pq = vary->base->pv + pp->pi;

        if (pp->fl & P_ORIENTED || pq->fl & P_ORIENTED)
        {
            float s;

            if (vary->pv[mp->pi].f)
                s = (mp->t + dt) / pp->t;
            else
                s = mp->t / pp->t;

            q_slerp(e, pp->e, pq->e, pp->s ? erp(s) : s);

            return;
        }
    }

    e[0] = 1.0f;
    e[1] = 0.0f;
    e[2] = 0.0f;
    e[3] = 0.0f;
}
Esempio n. 2
0
void sm_frame(sm_t *sm,int from,int to,float frame) {
	int i,frame0,frame1;
	if(from == -1) from = 0;
	if(to == -1) to = sm->num_frame;
	frame0 = (int)frame;
	frame -= frame0;
	frame0 += from;
	if(frame0 >= to) frame0 = ((frame0 - from) % (to - from)) + from;
	frame1 = frame0 + 1;
	if(frame1 >= to) frame1 = from;
	for(i = 0; i < sm->num_bone; i++) {
		float m0[16],m1[16];
		float xyz0[3],xyz1[3],xyz[3],rot[4];
		v_scale(sm->frame[frame0][i].xyz,1.0 - frame,xyz0);
		v_scale(sm->frame[frame1][i].xyz,frame,xyz1);
		v_add(xyz0,xyz1,xyz);
		q_slerp(sm->frame[frame0][i].rot,sm->frame[frame1][i].rot,frame,rot);
		m_translate(xyz,m0);
		q_to_matrix(rot,m1);
		m_multiply(m0,m1,sm->bone[i].matrix);
	}
	for(i = 0; i < sm->num_surface; i++) {
		int j;
		sm_surface_t *s = sm->surface[i];
		for(j = 0; j < s->num_vertex; j++) {
			int k;
			v_set(0,0,0,s->vertex[j].xyz);
			v_set(0,0,0,s->vertex[j].normal);
			for(k = 0; k < s->vertex[j].num_weight; k++) {
				float v[3];
				sm_weight_t *w = &s->vertex[j].weight[k];
				v_transform(w->xyz,sm->bone[w->bone].matrix,v);
				v_scale(v,w->weight,v);
				v_add(s->vertex[j].xyz,v,s->vertex[j].xyz);
				v_transform_normal(w->normal,sm->bone[w->bone].matrix,v);
				v_scale(v,w->weight,v);
				v_add(s->vertex[j].normal,v,s->vertex[j].normal);
			}
		}
	}
}
void VRPNTrackerInstance::getAccReport(vrpn_TRACKERACCCB* cpy, timeval* ts, int in_sensor)
{
	TrackerAccList::iterator it;
	vrpn_TRACKERACCCB* last = NULL;
	for ( it = acc.begin(); it != acc.end(); it++ )
	{
		vrpn_TRACKERACCCB* curr = *it;
		if (curr->sensor == in_sensor)
		{
			if (ts == NULL)
			{
				*cpy = *curr;
				return;
			}
			else if (vrpn_TimevalGreater(*ts,curr->msg_time))
			{
				if (last)
				{
					double val = vrpn_TimevalMsecs(vrpn_TimevalDiff(*ts, curr->msg_time))/vrpn_TimevalMsecs(vrpn_TimevalDiff(last->msg_time, curr->msg_time));
					cpy->acc[0] = curr->acc[0] + val*(last->acc[0] - curr->acc[0]);
					cpy->acc[1] = curr->acc[1] + val*(last->acc[1] - curr->acc[1]);
					cpy->acc[2] = curr->acc[2] + val*(last->acc[2] - curr->acc[2]);
					q_slerp(cpy->acc_quat, curr->acc_quat, last->acc_quat, val); 
					cpy->acc_quat_dt = curr->acc_quat_dt + val*(last->acc_quat_dt - curr->acc_quat_dt);
					return;
				}
				else
				{
					*cpy = *curr;
					return;
				}
			}
			else
				last = curr;
		}
	}
}
Esempio n. 4
0
void	vrpn_IMU_SimpleCombiner::update_matrix_based_on_values(double time_interval)
{
  //==================================================================
  // Adjust the orientation based on the rotational velocity, which is
  // measured in the rotated coordinate system.  We need to rotate the
  // difference vector back to the canonical orientation, apply the
  // orientation change there, and then rotate back.
  // Be sure to scale by the time value.
  q_type forward, inverse;
  q_copy(forward, d_quat);
  q_invert(inverse, forward);
  // Remember that Euler angles in Quatlib have rotation around Z in
  // the first term.  Compute the time-scaled delta transform in
  // canonical space.
  q_type delta;
  q_from_euler(delta,
    time_interval * d_rotational_vel.values[Q_Z],
    time_interval * d_rotational_vel.values[Q_Y],
    time_interval * d_rotational_vel.values[Q_X]);
  // Bring the delta back into canonical space
  q_type canonical;
  q_mult(canonical, delta, inverse);
  q_mult(canonical, forward, canonical);
  q_mult(d_quat, canonical, d_quat);

  //==================================================================
  // To the extent that the acceleration vector's magnitude is equal
  // to the expected gravity, rotate the orientation so that the vector
  // points downward.  This is measured in the rotated coordinate system,
  // so we need to rotate back to canonical orientation and apply
  // the difference there and then rotate back.  The rate of rotation
  // should be as specified in the gravity-rotation-rate parameter so
  // we don't swing the head around too quickly but only slowly re-adjust.

  double accel = sqrt(
      d_acceleration.values[0] * d_acceleration.values[0] +
      d_acceleration.values[1] * d_acceleration.values[1] +
      d_acceleration.values[2] * d_acceleration.values[2] );
  double diff = fabs(accel - 9.80665);

  // Only adjust if we're close enough to the expected gravity
  // @todo In a more complicated IMU tracker, base this on state and
  // error estimates from a Kalman or other filter.
  double scale = 1.0 - diff;
  if (scale > 0) {
    // Get a new forward and inverse matrix from the current, just-
    // rotated matrix.
    q_copy(forward, d_quat);

    // Change how fast we adjust based on how close we are to the
    // expected value of gravity.  Then further scale this by the
	// amount of time since the last estimate.
    double gravity_scale = scale * d_gravity_restore_rate * time_interval;

	// Rotate the gravity vector by the estimated transform.
	// We expect this direction to match the global down (-Y) vector.
	q_vec_type gravity_global;
	q_vec_set(gravity_global, d_acceleration.values[0],
		d_acceleration.values[1], d_acceleration.values[2]);
	q_vec_normalize(gravity_global, gravity_global);
	q_xform(gravity_global, forward, gravity_global);
	//printf("  XXX Gravity: %lg, %lg, %lg\n", gravity_global[0], gravity_global[1], gravity_global[2]);

	// Determine the rotation needed to take gravity and rotate
	// it into the direction of -Y.
	q_vec_type neg_y;
	q_vec_set(neg_y, 0, -1, 0);
	q_type rot;
	q_from_two_vecs(rot, gravity_global, neg_y);

	// Scale the rotation by the fraction of the orientation we
	// should remove based on the time that has passed, how well our
	// gravity vector matches expected, and the specified rate of
	// correction.
	static q_type identity = { 0, 0, 0, 1 };
	q_type scaled_rot;
	q_slerp(scaled_rot, identity, rot, gravity_scale);
	//printf("XXX Scaling gravity vector by %lg\n", gravity_scale);

    // Rotate by this angle.
    q_mult(d_quat, scaled_rot, d_quat);

    //==================================================================
    // If we are getting compass data, and to the extent that the
    // acceleration vector's magnitude is equal to the expected gravity,
    // compute the cross product of the cross product to find the
    // direction of north perpendicular to down.  This is measured in
    // the rotated coordinate system, so we need to rotate back to the
    // canonical orientation and do the comparison there.
	//  The fraction of rotation should be as specified in the
    // magnetometer-rotation-rate parameter so we don't swing the head
    // around too quickly but only slowly re-adjust.

    if (d_magnetometer.ana != NULL) {
      // Get a new forward and inverse matrix from the current, just-
      // rotated matrix.
      q_copy(forward, d_quat);

      // Find the North vector that is perpendicular to gravity by
      // clearing its Y axis to zero and renormalizing.
	  q_vec_type magnetometer;
      q_vec_set(magnetometer, d_magnetometer.values[0],
        d_magnetometer.values[1], d_magnetometer.values[2]);
	  q_vec_type magnetometer_global;
	  q_xform(magnetometer_global, forward, magnetometer);
	  magnetometer_global[Q_Y] = 0;
	  q_vec_type north_global;
	  q_vec_normalize(north_global, magnetometer_global);
	  //printf("  XXX north_global: %lg, %lg, %lg\n", north_global[0], north_global[1], north_global[2]);

      // Determine the rotation needed to take north and rotate it
	  // into the direction of negative Z.
	  q_vec_type neg_z;
	  q_vec_set(neg_z, 0, 0, -1);
	  q_from_two_vecs(rot, north_global, neg_z);

	  // Change how fast we adjust based on how close we are to the
	  // expected value of gravity.  Then further scale this by the
	  // amount of time since the last estimate.
	  double north_rate = scale * d_north_restore_rate * time_interval;

	  // Scale the rotation by the fraction of the orientation we
	  // should remove based on the time that has passed, how well our
	  // gravity vector matches expected, and the specified rate of
	  // correction.
	  static q_type identity = { 0, 0, 0, 1 };
	  q_slerp(scaled_rot, identity, rot, north_rate);

      // Rotate by this angle.
      q_mult(d_quat, scaled_rot, d_quat);
    }
  }

  //==================================================================
  // Compute and store the velocity information
  // This will be in the rotated coordinate system, so we need to
  // rotate back to the identity orientation before storing.
  // Convert from radians/second into a quaternion rotation as
  // rotated in a hundredth of a second and set the rotational
  // velocity dt to a hundredth of a second so that we don't
  // risk wrapping.
  // Remember that Euler angles in Quatlib have rotation around Z in
  // the first term.  Compute the time-scaled delta transform in
  // canonical space.
  q_from_euler(delta,
    1e-2 * d_rotational_vel.values[Q_Z],
    1e-2 * d_rotational_vel.values[Q_Y],
    1e-2 * d_rotational_vel.values[Q_X]);
  // Bring the delta back into canonical space
  q_mult(canonical, delta, inverse);
  q_mult(vel_quat, forward, canonical);
  vel_quat_dt = 1e-2;
}
Esempio n. 5
0
void vrpn_Tracker_DeadReckoning_Rotation::sendNewPrediction(vrpn_int32 sensor)
{
    //========================================================================
    // Figure out which rotation state we're supposed to use.
    if (sensor >= d_numSensors) {
        send_text_message(vrpn_TEXT_WARNING)
            << "sendNewPrediction: Asked for sensor " << sensor
            << " but I only have " << d_numSensors
            << "sensors.  Discarding.";
        return;
    }
    vrpn_Tracker_DeadReckoning_Rotation::RotationState &state =
        d_rotationStates[sensor];

    //========================================================================
    // If we haven't had a tracker report yet, nothing to send.
    if (state.d_lastReportTime.tv_sec == 0) {
        return;
    }

    //========================================================================
    // If we don't have permission to estimate velocity and haven't gotten it
    // either, then we just pass along the report.
    if (!state.d_receivedAngularVelocityReport && !d_estimateVelocity) {
        report_pose(sensor, state.d_lastReportTime, state.d_lastPosition,
                    state.d_lastOrientation);
        return;
    }

    //========================================================================
    // Estimate the future orientation based on the current angular velocity
    // estimate and the last reported orientation.  Predict it into the future
    // the amount we've been asked to.

    // Start with the previous orientation.
    q_type newOrientation;
    q_copy(newOrientation, state.d_lastOrientation);

    // Rotate it by the amount to rotate once for every integral multiple
    // of the rotation time we've been asked to go.
    double remaining = d_predictionTime;
    while (remaining > state.d_rotationInterval) {
        q_mult(newOrientation, state.d_rotationAmount, newOrientation);
        remaining -= state.d_rotationInterval;
    }

    // Then rotate it by the remaining fractional amount.
    double fractionTime = remaining / state.d_rotationInterval;
    q_type identity = { 0, 0, 0, 1 };
    q_type fractionRotation;
    q_slerp(fractionRotation, identity, state.d_rotationAmount, fractionTime);
    q_mult(newOrientation, fractionRotation, newOrientation);

    //========================================================================
    // Find out the future time for which we will be predicting by adding the
    // prediction interval to our last report time.
    struct timeval future_time;
    struct timeval delta;
    delta.tv_sec = static_cast<unsigned long>(d_predictionTime);
    double remainder = d_predictionTime - delta.tv_sec;
    delta.tv_usec = static_cast<unsigned long>(remainder * 1e6);
    future_time = vrpn_TimevalSum(delta, state.d_lastReportTime);

    //========================================================================
    // Pack our predicted tracker report for this future time.
    // Use the position portion of the report unaltered.
    if (0 != report_pose(sensor, future_time, state.d_lastPosition, newOrientation)) {
      fprintf(stderr, "vrpn_Tracker_DeadReckoning_Rotation::sendNewPrediction(): Can't report pose\n");
    }
}