Exemplo n.º 1
0
void vrpn_Tracker_Crossbow::process_packet(const raw_packet &packet) {
	// Use the current time for a timestamp.
	vrpn_gettimeofday(&timestamp, NULL);

	// Clear the position record.
	memset(pos, 0, sizeof(pos));
	
	// Calculate the current orientation. (We don't know yaw, so report 0.)
	double pitch = convert_scalar(packet.pitch_angle, 180.0f) * DEGREES_TO_RADIANS;
	double roll = convert_scalar(packet.roll_angle, 180.0f) * DEGREES_TO_RADIANS;
	xb_quat_from_euler(d_quat, pitch, roll);

	// Clear the linear velocity; we don't know it.
	memset(vel, 0, sizeof(vel));

	// Calculate the current angular velocity from yaw rate
	// It's in degrees per second, so convert to radians per second.
	q_from_euler(vel_quat, convert_scalar(packet.yaw_rate, 1.5f * ang_accel_range) * DEGREES_TO_RADIANS, 0, 0);
	vel_quat_dt = 1;

	// Calculate the current acceleration vector
	acc[0] = convert_scalar(packet.accel_x, 1.5f * lin_accel_range) * MPSS_PER_G;
	acc[1] = convert_scalar(packet.accel_y, 1.5f * lin_accel_range) * MPSS_PER_G;
	acc[2] = convert_scalar(packet.accel_z, 1.5f * lin_accel_range) * MPSS_PER_G;

	//printf("RawAccel = %hd\tG-Range = %f\tDerivedZ = %f\n", packet.accel_z, lin_accel_range, acc[2]);

	// The angular acceleration vector is nil. (0001 / 1)
	acc_quat[0] = acc_quat[1] = acc_quat[2] = 0;
	acc_quat[3] = acc_quat_dt = 1;
}
Exemplo n.º 2
0
void vrpn_Tracker_ViewPoint::get_tracker()
{
	// Get information for each eye
	for (int i = 0; i < 2; i++) {
		// The sensor
		d_sensor = i;


		// Which eye?
		VPX_EyeType eye;
		if (d_sensor == 0) eye = EYE_A;
		else eye = EYE_B;


		// Get tracker data from the DLL
		VPX_RealPoint gp, cv, ga;
		if (useSmoothedData) {
			// Use smoothed data, when available
			VPX_GetGazePointSmoothed2(eye, &gp);
			VPX_GetComponentVelocity2(eye, &cv);	// Always smoothed
			VPX_GetGazeAngleSmoothed2(eye, &ga);
		}
		else {
			// Use raw data
			VPX_GetGazePoint2(eye, &gp);
			VPX_GetComponentVelocity2(eye, &cv);	// Always smoothed
			VPX_GetGazeAngle2(eye, &ga);
		}


		// Set the tracker position from the gaze point
		pos[0] = gp.x;
		pos[1] = gp.y;
		pos[2] = 0.0;


		// Set the tracker velocity from the eye velocity
		vel[0] = cv.x;
		vel[1] = cv.y;
		vel[2] = 0.0;


		// Convert the gaze angle to a quaternion
		q_from_euler(d_quat, 0.0, Q_DEG_TO_RAD(ga.y), Q_DEG_TO_RAD(ga.x));


		// Send the data for this eye
		send_report();
	}
}
void vrpn_Tracker_OSVRHackerDevKit::on_data_received(std::size_t bytes,
                                                     vrpn_uint8 *buffer)
{
    if (bytes != 32 && bytes != 16) {
        send_text_message(vrpn_TEXT_WARNING)
            << "Received a report " << bytes
            << " in length, but expected it to be 32 or 16 bytes. Discarding. "
               "(May indicate issues with HID!)";
        return;
    }

    vrpn_uint8 version = vrpn_unbuffer_from_little_endian<vrpn_uint8>(buffer);
    /// @todo Verify that version is what we expect.
    vrpn_uint8 msg_seq = vrpn_unbuffer_from_little_endian<vrpn_uint8>(buffer);

    // Signed, 16-bit, fixed-point numbers in Q1.14 format.
    typedef vrpn::FixedPoint<1, 14> FixedPointValue;
    d_quat[Q_X] =
        FixedPointValue(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
            .get<vrpn_float64>();
    d_quat[Q_Y] =
        FixedPointValue(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
            .get<vrpn_float64>();
    d_quat[Q_Z] =
        FixedPointValue(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
            .get<vrpn_float64>();
    d_quat[Q_W] =
        FixedPointValue(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
            .get<vrpn_float64>();

    vrpn_Tracker::timestamp = _timestamp;
    {
        char msgbuf[512];
        int len = vrpn_Tracker::encode_to(msgbuf);
        if (d_connection->pack_message(len, _timestamp, position_m_id,
                                       d_sender_id, msgbuf,
                                       vrpn_CONNECTION_LOW_LATENCY)) {
            fprintf(stderr, "vrpn_Tracker_OSVRHackerDevKit: cannot write "
                            "message: tossing\n");
        }
    }
    if (version >= 2) {
        // We've got angular velocity in this message too
        // Signed Q6.9
        typedef vrpn::FixedPoint<6, 9> VelFixedPoint;
        q_vec_type angVel;
        angVel[0] =
            VelFixedPoint(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
                .get<vrpn_float64>();
        angVel[1] =
            VelFixedPoint(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
                .get<vrpn_float64>();
        angVel[2] =
            VelFixedPoint(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
                .get<vrpn_float64>();

        // Given XYZ radians per second velocity.
        q_from_euler(vel_quat, angVel[2] * vel_quat_dt, angVel[1] * vel_quat_dt,
                     angVel[0] * vel_quat_dt);

        char msgbuf[512];
        int len = vrpn_Tracker::encode_vel_to(msgbuf);
        if (d_connection->pack_message(len, _timestamp, velocity_m_id,
                                       d_sender_id, msgbuf,
                                       vrpn_CONNECTION_LOW_LATENCY)) {
            fprintf(stderr, "vrpn_Tracker_OSVRHackerDevKit: cannot write "
                            "message: tossing\n");
        }
    }
}
Exemplo n.º 4
0
// This function will get the reports from the intersense dll, then
// put that report into the time, sensor, pos and quat fields, and
// finally call send_report to send it.
void vrpn_Tracker_InterSense::get_report(void)
{
#ifdef  VRPN_INCLUDE_INTERSENSE
  q_vec_type angles;
  ISD_TRACKING_DATA_TYPE data;
  int i;

  if(ISD_GetTrackingData(m_Handle,&data)) {
    for(int station=0;station<ISD_MAX_STATIONS;station++) {
      if(data.Station[station].NewData == TRUE) {

       d_sensor = station;

        //--------------------------------------------------------------------
		// If we are doing IS900 timestamps, decode the time, add it to the
        // time we zeroed the tracker, and update the report time.  Remember
        // to convert the MILLIseconds from the report into MICROseconds and
        // seconds.
        //--------------------------------------------------------------------

        if (do_is900_timestamps) {
          vrpn_float32 read_time = data.Station[station].TimeStamp;

          struct timeval delta_time;   // Time since the clock was reset

          // Convert from the float in MILLIseconds to the struct timeval
          delta_time.tv_sec = (long)(read_time / 1000);	// Integer trunction to seconds
          read_time -= delta_time.tv_sec * 1000;	// Subtract out what we just counted
          delta_time.tv_usec = (long)(read_time * 1000);	// Convert remainder to MICROseconds

          // Store the current time
          timestamp = vrpn_TimevalSum(is900_zerotime, delta_time);
        } else {
	  vrpn_gettimeofday(&timestamp, NULL);	// Set watchdog now
	}

        //--------------------------------------------------------------------
        // If this sensor has an IS900 button on it, decode
        // the button values into the button device and mainloop the button
        // device so that it will report any changes.  Each button is stored
        // in one bit of the byte, with the lowest-numbered button in the
        // lowest bit.
        //--------------------------------------------------------------------

        if (is900_buttons[station]) {
	    for (i = 0; i < is900_buttons[station]->number_of_buttons(); i++) {
	      is900_buttons[station]->set_button(i, data.Station[station].ButtonState[i]);
	    }
            is900_buttons[station]->mainloop();
        }

        //--------------------------------------------------------------------
        // If this sensor has an IS900 analog on it, decode the analog values
        // into the analog device and mainloop the analog device so that it
        // will report any changes.  The first byte holds the unsigned char
        // representation of left/right.  The second holds up/down.  For each,
        // 0 means min (left or rear), 127 means center and 255 means max.
        //--------------------------------------------------------------------

        if (is900_analogs[station]) {
	  // Normalize the values to the range -1 to 1
	  is900_analogs[station]->setChannelValue(0, (data.Station[station].AnalogData[0] - 127) / 128.0);
	  is900_analogs[station]->setChannelValue(1, (data.Station[station].AnalogData[1] - 127) / 128.0);

	  // Report the new values
	  is900_analogs[station]->report_changes();
	  is900_analogs[station]->mainloop();
	}

	// Copy the tracker data into our internal storage before sending
	// (no unit problem as the Position vector is already in meters, see ISD_STATION_STATE_TYPE)
	// Watch: For some reason, to get consistent rotation and translation axis permutations,
	//        we need non direct mapping.
        // RMT: Based on a report from Christian Odom, it seems like the Quaternions in the
        //      Isense are QXYZ, whereas in Quatlib (and VRPN) they are XYZQ.  Once these
        //      are switched correctly, the positions can be read without strange swapping.
	pos[0] = data.Station[station].Position[0];
	pos[1] = data.Station[station].Position[1];
	pos[2] = data.Station[station].Position[2];

	if(m_StationInfo[station].AngleFormat == ISD_QUATERNION) {	
		d_quat[0] = data.Station[station].Quaternion[1];
		d_quat[1] = data.Station[station].Quaternion[2];
		d_quat[2] = data.Station[station].Quaternion[3];
		d_quat[3] = data.Station[station].Quaternion[0];
        } else {
	        // Just return Euler for now...
	        // nahon@virtools needs to convert to radians
		angles[0] = VRPN_DEGREES_TO_RADIANS*data.Station[station].Euler[0];
		angles[1] = VRPN_DEGREES_TO_RADIANS*data.Station[station].Euler[1];
		angles[2] = VRPN_DEGREES_TO_RADIANS*data.Station[station].Euler[2];

		q_from_euler(d_quat, angles[0], angles[1], angles[2]);	
	}

	// have to just send it now
        status = vrpn_TRACKER_REPORT_READY;
//	fprintf(stderr, "sending message len %d\n", len);
	send_report();

	//printf("Isense %f, %f, %f\n",pos[0],pos[1],pos[2]);
	//printf("Isense a:%f, %f, %f : ",angles[0],angles[1],angles[2]); //if the tracker reports a quat, these will be garbage
	//printf("q: %f, %f, %f, %f\n",d_quat[0],d_quat[1],d_quat[2],d_quat[3]);	
      }
    }

  }
#endif
}
Exemplo n.º 5
0
void	vrpn_IMU_SimpleCombiner::update_matrix_based_on_values(double time_interval)
{
  //==================================================================
  // Adjust the orientation based on the rotational velocity, which is
  // measured in the rotated coordinate system.  We need to rotate the
  // difference vector back to the canonical orientation, apply the
  // orientation change there, and then rotate back.
  // Be sure to scale by the time value.
  q_type forward, inverse;
  q_copy(forward, d_quat);
  q_invert(inverse, forward);
  // Remember that Euler angles in Quatlib have rotation around Z in
  // the first term.  Compute the time-scaled delta transform in
  // canonical space.
  q_type delta;
  q_from_euler(delta,
    time_interval * d_rotational_vel.values[Q_Z],
    time_interval * d_rotational_vel.values[Q_Y],
    time_interval * d_rotational_vel.values[Q_X]);
  // Bring the delta back into canonical space
  q_type canonical;
  q_mult(canonical, delta, inverse);
  q_mult(canonical, forward, canonical);
  q_mult(d_quat, canonical, d_quat);

  //==================================================================
  // To the extent that the acceleration vector's magnitude is equal
  // to the expected gravity, rotate the orientation so that the vector
  // points downward.  This is measured in the rotated coordinate system,
  // so we need to rotate back to canonical orientation and apply
  // the difference there and then rotate back.  The rate of rotation
  // should be as specified in the gravity-rotation-rate parameter so
  // we don't swing the head around too quickly but only slowly re-adjust.

  double accel = sqrt(
      d_acceleration.values[0] * d_acceleration.values[0] +
      d_acceleration.values[1] * d_acceleration.values[1] +
      d_acceleration.values[2] * d_acceleration.values[2] );
  double diff = fabs(accel - 9.80665);

  // Only adjust if we're close enough to the expected gravity
  // @todo In a more complicated IMU tracker, base this on state and
  // error estimates from a Kalman or other filter.
  double scale = 1.0 - diff;
  if (scale > 0) {
    // Get a new forward and inverse matrix from the current, just-
    // rotated matrix.
    q_copy(forward, d_quat);

    // Change how fast we adjust based on how close we are to the
    // expected value of gravity.  Then further scale this by the
	// amount of time since the last estimate.
    double gravity_scale = scale * d_gravity_restore_rate * time_interval;

	// Rotate the gravity vector by the estimated transform.
	// We expect this direction to match the global down (-Y) vector.
	q_vec_type gravity_global;
	q_vec_set(gravity_global, d_acceleration.values[0],
		d_acceleration.values[1], d_acceleration.values[2]);
	q_vec_normalize(gravity_global, gravity_global);
	q_xform(gravity_global, forward, gravity_global);
	//printf("  XXX Gravity: %lg, %lg, %lg\n", gravity_global[0], gravity_global[1], gravity_global[2]);

	// Determine the rotation needed to take gravity and rotate
	// it into the direction of -Y.
	q_vec_type neg_y;
	q_vec_set(neg_y, 0, -1, 0);
	q_type rot;
	q_from_two_vecs(rot, gravity_global, neg_y);

	// Scale the rotation by the fraction of the orientation we
	// should remove based on the time that has passed, how well our
	// gravity vector matches expected, and the specified rate of
	// correction.
	static q_type identity = { 0, 0, 0, 1 };
	q_type scaled_rot;
	q_slerp(scaled_rot, identity, rot, gravity_scale);
	//printf("XXX Scaling gravity vector by %lg\n", gravity_scale);

    // Rotate by this angle.
    q_mult(d_quat, scaled_rot, d_quat);

    //==================================================================
    // If we are getting compass data, and to the extent that the
    // acceleration vector's magnitude is equal to the expected gravity,
    // compute the cross product of the cross product to find the
    // direction of north perpendicular to down.  This is measured in
    // the rotated coordinate system, so we need to rotate back to the
    // canonical orientation and do the comparison there.
	//  The fraction of rotation should be as specified in the
    // magnetometer-rotation-rate parameter so we don't swing the head
    // around too quickly but only slowly re-adjust.

    if (d_magnetometer.ana != NULL) {
      // Get a new forward and inverse matrix from the current, just-
      // rotated matrix.
      q_copy(forward, d_quat);

      // Find the North vector that is perpendicular to gravity by
      // clearing its Y axis to zero and renormalizing.
	  q_vec_type magnetometer;
      q_vec_set(magnetometer, d_magnetometer.values[0],
        d_magnetometer.values[1], d_magnetometer.values[2]);
	  q_vec_type magnetometer_global;
	  q_xform(magnetometer_global, forward, magnetometer);
	  magnetometer_global[Q_Y] = 0;
	  q_vec_type north_global;
	  q_vec_normalize(north_global, magnetometer_global);
	  //printf("  XXX north_global: %lg, %lg, %lg\n", north_global[0], north_global[1], north_global[2]);

      // Determine the rotation needed to take north and rotate it
	  // into the direction of negative Z.
	  q_vec_type neg_z;
	  q_vec_set(neg_z, 0, 0, -1);
	  q_from_two_vecs(rot, north_global, neg_z);

	  // Change how fast we adjust based on how close we are to the
	  // expected value of gravity.  Then further scale this by the
	  // amount of time since the last estimate.
	  double north_rate = scale * d_north_restore_rate * time_interval;

	  // Scale the rotation by the fraction of the orientation we
	  // should remove based on the time that has passed, how well our
	  // gravity vector matches expected, and the specified rate of
	  // correction.
	  static q_type identity = { 0, 0, 0, 1 };
	  q_slerp(scaled_rot, identity, rot, north_rate);

      // Rotate by this angle.
      q_mult(d_quat, scaled_rot, d_quat);
    }
  }

  //==================================================================
  // Compute and store the velocity information
  // This will be in the rotated coordinate system, so we need to
  // rotate back to the identity orientation before storing.
  // Convert from radians/second into a quaternion rotation as
  // rotated in a hundredth of a second and set the rotational
  // velocity dt to a hundredth of a second so that we don't
  // risk wrapping.
  // Remember that Euler angles in Quatlib have rotation around Z in
  // the first term.  Compute the time-scaled delta transform in
  // canonical space.
  q_from_euler(delta,
    1e-2 * d_rotational_vel.values[Q_Z],
    1e-2 * d_rotational_vel.values[Q_Y],
    1e-2 * d_rotational_vel.values[Q_X]);
  // Bring the delta back into canonical space
  q_mult(canonical, delta, inverse);
  q_mult(vel_quat, forward, canonical);
  vel_quat_dt = 1e-2;
}
Exemplo n.º 6
0
void	vrpn_Tracker_AnalogFly::update_matrix_based_on_values
                  (double time_interval)
{
  double tx,ty,tz, rx,ry,rz;	// Translation (m/s) and rotation (rad/sec)
  q_matrix_type diffM;		// Difference (delta) matrix

  // For absolute trackers, the interval is treated as "1", so that the
  // translations and rotations are unscaled;
  if (d_absolute) { time_interval = 1.0; };

  // compute the translation and rotation
  tx = d_x.value * time_interval;
  ty = d_y.value * time_interval;
  tz = d_z.value * time_interval;
  
  rx = d_sx.value * time_interval * (2*VRPN_PI);
  ry = d_sy.value * time_interval * (2*VRPN_PI);
  rz = d_sz.value * time_interval * (2*VRPN_PI);

  // Build a rotation matrix, then add in the translation
  q_euler_to_col_matrix(diffM, rz, ry, rx);
  diffM[3][0] = tx; diffM[3][1] = ty; diffM[3][2] = tz;

  // While the clutch is not engaged, we don't move.  Record that
  // the clutch was off so that we know later when it is re-engaged.
  if (!d_clutch_engaged) {
    d_clutch_was_off = true;
    return;
  }
  
  // When the clutch becomes re-engaged, we store the current matrix
  // multiplied by the inverse of the present differential matrix so that
  // the first frame of the mouse-hold leaves us in the same location.
  // For the absolute matrix, this re-engages new motion at the previous
  // location.
  if (d_clutch_engaged && d_clutch_was_off) {
    d_clutch_was_off = false;
    q_type  diff_orient;
    // This is backwards, because Euler angles have rotation about Z first...
    q_from_euler(diff_orient, rz, ry, rx);
    q_xyz_quat_type diff;
    q_vec_set(diff.xyz, tx, ty, tz);
    q_copy(diff.quat, diff_orient);
    q_xyz_quat_type  diff_inverse;
    q_xyz_quat_invert(&diff_inverse, &diff);
    q_matrix_type di_matrix;
    q_to_col_matrix(di_matrix, diff_inverse.quat);
    di_matrix[3][0] = diff_inverse.xyz[0];
    di_matrix[3][1] = diff_inverse.xyz[1];
    di_matrix[3][2] = diff_inverse.xyz[2];
    q_matrix_mult(d_clutchMatrix, di_matrix, d_currentMatrix);
  }

  // Apply the matrix.
  if (d_absolute) {
      // The difference matrix IS the current matrix.  Catenate it
      // onto the clutch matrix.  If there is no clutching happening,
      // this matrix will always be the identity so this will just
      // copy the difference matrix.
      q_matrix_mult(d_currentMatrix, diffM, d_clutchMatrix);
  } else {
      // Multiply the current matrix by the difference matrix to update
      // it to the current time. 
      if (d_worldFrame) {
        // If using world frame:
        // 1. Separate out the translation and add to the differential translation
        tx += d_currentMatrix[3][0];
        ty += d_currentMatrix[3][1];
        tz += d_currentMatrix[3][2];
        diffM[3][0] = 0; diffM[3][1] = 0; diffM[3][2] = 0;
        d_currentMatrix[3][0] = 0; d_currentMatrix[3][1] = 0; d_currentMatrix[3][2] = 0;

        // 2. Compose the rotations.
        q_matrix_mult(d_currentMatrix, d_currentMatrix, diffM);

        // 3. Put the new translation back in the matrix.
        d_currentMatrix[3][0] = tx; d_currentMatrix[3][1] = ty; d_currentMatrix[3][2] = tz;

      } else {
        q_matrix_mult(d_currentMatrix, diffM, d_currentMatrix);
      }
  }

  // Finally, convert the matrix into a pos/quat
  // and copy it into the tracker position and quaternion structures.
  convert_matrix_to_tracker();
}
Exemplo n.º 7
0
void vrpn_Tracker_WiimoteHead::_update_2_LED_pose(q_xyz_quat_type & newPose) {
	if (d_points != 2) {
		// we simply stop updating our pos+orientation if we lost LED's
		// TODO: right now if we don't have exactly 2 points we lose the lock
		d_lock = false;
		d_flipState = FLIP_UNKNOWN;
		return;
	}

	// TODO right now only handling the 2-LED glasses

	d_lock = true;
	double rx, ry, rz;
	rx = ry = rz = 0;

	double X0, X1, Y0, Y1;

	X0 = d_vX[0];
	X1 = d_vX[1];
	Y0 = d_vY[0];
	Y1 = d_vY[1];

	if (d_flipState == FLIP_180) {
		/// If the first report of a new tracking lock indicated that
		/// our "up" vector had no positive y component, we have the
		/// points in the wrong order - flip them around.
		/// This uses the assumption that the first time we see the glasses,
		/// they ought to be right-side up (a reasonable assumption for
		/// head tracking)
		std::swap(X0, X1);
		std::swap(Y0, Y1);
	}

	const double dx = X0 - X1;
	const double dy = Y0 - Y1;
	const double dist = sqrt(dx * dx + dy * dy);
	const double angle = dist * cvtDistToAngle;
	// Note that this is an approximation, since we don't know the
	// distance/horizontal position.  (I think...)

	const double headDist = (d_blobDistance / 2.0) / tan(angle);

	// Translate the distance along z axis, and tilt the head
	newPose.xyz[2] = headDist;      // translate along Z
	rz = atan2(dy, dx);     // rotate around Z

	// Find the sensor pixel of the line of sight - directly between
	// the LEDs
	const double avgX = (X0 + X1) / 2.0;
	const double avgY = (Y0 + Y1) / 2.0;

	/// @todo For some unnerving reason, in release builds, avgX tends to become NaN/undefined
	/// However, any kind of inspection (such as the following, or even a simple cout)
	/// appears to prevent the issue.  This makes me uneasy, but I won't argue with
	/// what is working.
	if (wm_isnan(avgX)) {
		std::cerr << "NaN detected in avgX: X0 = " << X0 << ", X1 = " << X1 << std::endl;
		return;
	}

	if (wm_isnan(avgY)) {
		std::cerr << "NaN detected in avgY: Y0 = " << Y0 << ", Y1 = " << Y1 << std::endl;
		return;
	}

	// b is the virtual depth in the sensor from a point to the full sensor
	// used for finding similar triangles to calculate x/y translation
	const double bHoriz = xResSensor / 2 / tan(fovX / 2);
	const double bVert = yResSensor / 2 / tan(fovY / 2);

	// World head displacement (X and Y) from a centered origin at
	// the calculated distance from the sensor
	newPose.xyz[0] = headDist * (avgX - xResSensor / 2) / bHoriz;
	newPose.xyz[1] = headDist * (avgY - yResSensor / 2) / bVert;

	// set the quat. part of our pose with rotation angles
	q_from_euler(newPose.quat, rz, ry, rx);

	// Apply the new pose
	q_vec_copy(d_currentPose.xyz, newPose.xyz);
	q_copy(d_currentPose.quat, newPose.quat);
}