예제 #1
0
vrpn_IMU_SimpleCombiner::vrpn_IMU_SimpleCombiner
    (const char * name, vrpn_Connection * trackercon,
      vrpn_Tracker_IMU_Params *params,
      float update_rate, bool report_changes)
  : vrpn_Tracker(name, trackercon),
  d_update_interval(update_rate ? (1 / update_rate) : 1.0),
  d_report_changes(report_changes)
{
  // Set the restoration rates for gravity and north.
  // These are the fraction of the way to full restoration that should
  // occur over one second.
  d_gravity_restore_rate = 0.9;
  d_north_restore_rate = 0.9;

  // Hook up the parameters for acceleration and rotational velocity
  d_acceleration.params = params->d_acceleration;
  d_rotational_vel.params = params->d_rotational_vel;

  // Magetometers by definition produce unit vectors and
  // have their axes as (x,y,z) so we just have to specify
  // the name here and the others are all set to pass the
  // values through.
  d_magnetometer.params.name = params->d_magnetometer_name;
  for (int i = 0; i < 3; i++) {
    d_magnetometer.params.channels[i] = i;
    d_magnetometer.params.offsets[i] = 0;
    d_magnetometer.params.scales[i] = 1;
  }

  //--------------------------------------------------------------------
  // Open analog remotes for any channels that have non-NULL names.
  // If the name starts with the "*" character, use tracker
  // connection rather than getting a new connection for it.
  // Set up callbacks to handle updates to the analog values.
  setup_vector(&d_acceleration, handle_analog_update);
  setup_vector(&d_rotational_vel, handle_analog_update);
  setup_vector(&d_magnetometer, handle_analog_update);

  //--------------------------------------------------------------------
  // Set the current timestamp to "now", the current orientation to identity
  // and the angular rotation velocity to zero.
  vrpn_gettimeofday(&d_prevtime, NULL);
  d_prev_update_time = d_prevtime;
  vrpn_Tracker::timestamp = d_prevtime;
  q_vec_set(pos, 0, 0, 0);
  q_from_axis_angle(d_quat, 0, 0, 1, 0);
  q_vec_set(vel, 0, 0, 0);
  q_from_axis_angle(vel_quat, 0, 0, 1, 0);
  vel_quat_dt = 1;
}
    /// Interpolates the values at three 2D points to the
    /// location of a third point.
    static double interpolate(double p1X, double p1Y, double val1, double p2X,
                              double p2Y, double val2, double p3X, double p3Y,
                              double val3, double pointX, double pointY) {
        // Fit a plane to three points, using their values as the
        // third dimension.
        q_vec_type p1, p2, p3;
        q_vec_set(p1, p1X, p1Y, val1);
        q_vec_set(p2, p2X, p2Y, val2);
        q_vec_set(p3, p3X, p3Y, val3);

        // The normalized cross product of the vectors from the first
        // point to each of the other two is normal to this plane.
        q_vec_type v1, v2;
        q_vec_subtract(v1, p2, p1);
        q_vec_subtract(v2, p3, p1);
        q_vec_type ABC;
        q_vec_cross_product(ABC, v1, v2);
        if (q_vec_magnitude(ABC) == 0) {
            // We can't get a normal, degenerate points, just return the first
            // value.
            return val1;
        }
        q_vec_normalize(ABC, ABC);

        // Solve for the D associated with the plane by filling back
        // in one of the points.  This is done by taking the dot product
        // of the ABC vector with the first point.  We then solve for D.
        // AX + BY + CZ + D = 0; D = -(AX + BY + CZ)
        double D = -q_vec_dot_product(ABC, p1);

        // Evaluate the plane equations at our input point, which will interpolate
        // or extrapolate our values.
        // We're solving for Z in this case, so we get
        // CZ = -(AX + BY + D); Z = -(AX + BY + D)/C;
        return -(ABC[0] * pointX + ABC[1] * pointY + D) / ABC[2];
    }
예제 #3
0
void	vrpn_IMU_SimpleCombiner::update_matrix_based_on_values(double time_interval)
{
  //==================================================================
  // Adjust the orientation based on the rotational velocity, which is
  // measured in the rotated coordinate system.  We need to rotate the
  // difference vector back to the canonical orientation, apply the
  // orientation change there, and then rotate back.
  // Be sure to scale by the time value.
  q_type forward, inverse;
  q_copy(forward, d_quat);
  q_invert(inverse, forward);
  // Remember that Euler angles in Quatlib have rotation around Z in
  // the first term.  Compute the time-scaled delta transform in
  // canonical space.
  q_type delta;
  q_from_euler(delta,
    time_interval * d_rotational_vel.values[Q_Z],
    time_interval * d_rotational_vel.values[Q_Y],
    time_interval * d_rotational_vel.values[Q_X]);
  // Bring the delta back into canonical space
  q_type canonical;
  q_mult(canonical, delta, inverse);
  q_mult(canonical, forward, canonical);
  q_mult(d_quat, canonical, d_quat);

  //==================================================================
  // To the extent that the acceleration vector's magnitude is equal
  // to the expected gravity, rotate the orientation so that the vector
  // points downward.  This is measured in the rotated coordinate system,
  // so we need to rotate back to canonical orientation and apply
  // the difference there and then rotate back.  The rate of rotation
  // should be as specified in the gravity-rotation-rate parameter so
  // we don't swing the head around too quickly but only slowly re-adjust.

  double accel = sqrt(
      d_acceleration.values[0] * d_acceleration.values[0] +
      d_acceleration.values[1] * d_acceleration.values[1] +
      d_acceleration.values[2] * d_acceleration.values[2] );
  double diff = fabs(accel - 9.80665);

  // Only adjust if we're close enough to the expected gravity
  // @todo In a more complicated IMU tracker, base this on state and
  // error estimates from a Kalman or other filter.
  double scale = 1.0 - diff;
  if (scale > 0) {
    // Get a new forward and inverse matrix from the current, just-
    // rotated matrix.
    q_copy(forward, d_quat);

    // Change how fast we adjust based on how close we are to the
    // expected value of gravity.  Then further scale this by the
	// amount of time since the last estimate.
    double gravity_scale = scale * d_gravity_restore_rate * time_interval;

	// Rotate the gravity vector by the estimated transform.
	// We expect this direction to match the global down (-Y) vector.
	q_vec_type gravity_global;
	q_vec_set(gravity_global, d_acceleration.values[0],
		d_acceleration.values[1], d_acceleration.values[2]);
	q_vec_normalize(gravity_global, gravity_global);
	q_xform(gravity_global, forward, gravity_global);
	//printf("  XXX Gravity: %lg, %lg, %lg\n", gravity_global[0], gravity_global[1], gravity_global[2]);

	// Determine the rotation needed to take gravity and rotate
	// it into the direction of -Y.
	q_vec_type neg_y;
	q_vec_set(neg_y, 0, -1, 0);
	q_type rot;
	q_from_two_vecs(rot, gravity_global, neg_y);

	// Scale the rotation by the fraction of the orientation we
	// should remove based on the time that has passed, how well our
	// gravity vector matches expected, and the specified rate of
	// correction.
	static q_type identity = { 0, 0, 0, 1 };
	q_type scaled_rot;
	q_slerp(scaled_rot, identity, rot, gravity_scale);
	//printf("XXX Scaling gravity vector by %lg\n", gravity_scale);

    // Rotate by this angle.
    q_mult(d_quat, scaled_rot, d_quat);

    //==================================================================
    // If we are getting compass data, and to the extent that the
    // acceleration vector's magnitude is equal to the expected gravity,
    // compute the cross product of the cross product to find the
    // direction of north perpendicular to down.  This is measured in
    // the rotated coordinate system, so we need to rotate back to the
    // canonical orientation and do the comparison there.
	//  The fraction of rotation should be as specified in the
    // magnetometer-rotation-rate parameter so we don't swing the head
    // around too quickly but only slowly re-adjust.

    if (d_magnetometer.ana != NULL) {
      // Get a new forward and inverse matrix from the current, just-
      // rotated matrix.
      q_copy(forward, d_quat);

      // Find the North vector that is perpendicular to gravity by
      // clearing its Y axis to zero and renormalizing.
	  q_vec_type magnetometer;
      q_vec_set(magnetometer, d_magnetometer.values[0],
        d_magnetometer.values[1], d_magnetometer.values[2]);
	  q_vec_type magnetometer_global;
	  q_xform(magnetometer_global, forward, magnetometer);
	  magnetometer_global[Q_Y] = 0;
	  q_vec_type north_global;
	  q_vec_normalize(north_global, magnetometer_global);
	  //printf("  XXX north_global: %lg, %lg, %lg\n", north_global[0], north_global[1], north_global[2]);

      // Determine the rotation needed to take north and rotate it
	  // into the direction of negative Z.
	  q_vec_type neg_z;
	  q_vec_set(neg_z, 0, 0, -1);
	  q_from_two_vecs(rot, north_global, neg_z);

	  // Change how fast we adjust based on how close we are to the
	  // expected value of gravity.  Then further scale this by the
	  // amount of time since the last estimate.
	  double north_rate = scale * d_north_restore_rate * time_interval;

	  // Scale the rotation by the fraction of the orientation we
	  // should remove based on the time that has passed, how well our
	  // gravity vector matches expected, and the specified rate of
	  // correction.
	  static q_type identity = { 0, 0, 0, 1 };
	  q_slerp(scaled_rot, identity, rot, north_rate);

      // Rotate by this angle.
      q_mult(d_quat, scaled_rot, d_quat);
    }
  }

  //==================================================================
  // Compute and store the velocity information
  // This will be in the rotated coordinate system, so we need to
  // rotate back to the identity orientation before storing.
  // Convert from radians/second into a quaternion rotation as
  // rotated in a hundredth of a second and set the rotational
  // velocity dt to a hundredth of a second so that we don't
  // risk wrapping.
  // Remember that Euler angles in Quatlib have rotation around Z in
  // the first term.  Compute the time-scaled delta transform in
  // canonical space.
  q_from_euler(delta,
    1e-2 * d_rotational_vel.values[Q_Z],
    1e-2 * d_rotational_vel.values[Q_Y],
    1e-2 * d_rotational_vel.values[Q_X]);
  // Bring the delta back into canonical space
  q_mult(canonical, delta, inverse);
  q_mult(vel_quat, forward, canonical);
  vel_quat_dt = 1e-2;
}
예제 #4
0
void	vrpn_Tracker_AnalogFly::update_matrix_based_on_values
                  (double time_interval)
{
  double tx,ty,tz, rx,ry,rz;	// Translation (m/s) and rotation (rad/sec)
  q_matrix_type diffM;		// Difference (delta) matrix

  // For absolute trackers, the interval is treated as "1", so that the
  // translations and rotations are unscaled;
  if (d_absolute) { time_interval = 1.0; };

  // compute the translation and rotation
  tx = d_x.value * time_interval;
  ty = d_y.value * time_interval;
  tz = d_z.value * time_interval;
  
  rx = d_sx.value * time_interval * (2*VRPN_PI);
  ry = d_sy.value * time_interval * (2*VRPN_PI);
  rz = d_sz.value * time_interval * (2*VRPN_PI);

  // Build a rotation matrix, then add in the translation
  q_euler_to_col_matrix(diffM, rz, ry, rx);
  diffM[3][0] = tx; diffM[3][1] = ty; diffM[3][2] = tz;

  // While the clutch is not engaged, we don't move.  Record that
  // the clutch was off so that we know later when it is re-engaged.
  if (!d_clutch_engaged) {
    d_clutch_was_off = true;
    return;
  }
  
  // When the clutch becomes re-engaged, we store the current matrix
  // multiplied by the inverse of the present differential matrix so that
  // the first frame of the mouse-hold leaves us in the same location.
  // For the absolute matrix, this re-engages new motion at the previous
  // location.
  if (d_clutch_engaged && d_clutch_was_off) {
    d_clutch_was_off = false;
    q_type  diff_orient;
    // This is backwards, because Euler angles have rotation about Z first...
    q_from_euler(diff_orient, rz, ry, rx);
    q_xyz_quat_type diff;
    q_vec_set(diff.xyz, tx, ty, tz);
    q_copy(diff.quat, diff_orient);
    q_xyz_quat_type  diff_inverse;
    q_xyz_quat_invert(&diff_inverse, &diff);
    q_matrix_type di_matrix;
    q_to_col_matrix(di_matrix, diff_inverse.quat);
    di_matrix[3][0] = diff_inverse.xyz[0];
    di_matrix[3][1] = diff_inverse.xyz[1];
    di_matrix[3][2] = diff_inverse.xyz[2];
    q_matrix_mult(d_clutchMatrix, di_matrix, d_currentMatrix);
  }

  // Apply the matrix.
  if (d_absolute) {
      // The difference matrix IS the current matrix.  Catenate it
      // onto the clutch matrix.  If there is no clutching happening,
      // this matrix will always be the identity so this will just
      // copy the difference matrix.
      q_matrix_mult(d_currentMatrix, diffM, d_clutchMatrix);
  } else {
      // Multiply the current matrix by the difference matrix to update
      // it to the current time. 
      if (d_worldFrame) {
        // If using world frame:
        // 1. Separate out the translation and add to the differential translation
        tx += d_currentMatrix[3][0];
        ty += d_currentMatrix[3][1];
        tz += d_currentMatrix[3][2];
        diffM[3][0] = 0; diffM[3][1] = 0; diffM[3][2] = 0;
        d_currentMatrix[3][0] = 0; d_currentMatrix[3][1] = 0; d_currentMatrix[3][2] = 0;

        // 2. Compose the rotations.
        q_matrix_mult(d_currentMatrix, d_currentMatrix, diffM);

        // 3. Put the new translation back in the matrix.
        d_currentMatrix[3][0] = tx; d_currentMatrix[3][1] = ty; d_currentMatrix[3][2] = tz;

      } else {
        q_matrix_mult(d_currentMatrix, diffM, d_currentMatrix);
      }
  }

  // Finally, convert the matrix into a pos/quat
  // and copy it into the tracker position and quaternion structures.
  convert_matrix_to_tracker();
}
예제 #5
0
int vrpn_Tracker_DeadReckoning_Rotation::test(void)
{
    // Construct a loopback connection to be used by all of our objects.
    vrpn_Connection *c = vrpn_create_server_connection("loopback:");

    // Create a tracker server to be the initator and a dead-reckoning
    // rotation tracker to use it as a base; have it predict 1 second
    // into the future.
    vrpn_Tracker_Server *t0 = new vrpn_Tracker_Server("Tracker0", c, 2);
    vrpn_Tracker_DeadReckoning_Rotation *t1 =
        new vrpn_Tracker_DeadReckoning_Rotation("Tracker1", c, "*Tracker0", 2, 1);

    // Create a remote tracker to listen to t1 and set up its callbacks for
    // position and velocity reports.  They will fill in the static structures
    // listed above with whatever values they receive.
    vrpn_Tracker_Remote *tr = new vrpn_Tracker_Remote("Tracker1", c);
    tr->register_change_handler(&poseResponse, handle_test_tracker_report);
    tr->register_change_handler(&velResponse, handle_test_tracker_velocity_report);

    // Set up the values in the pose and velocity responses with incorrect values
    // so that things will fail if the class does not perform as expected.
    q_vec_set(poseResponse.pos, 1, 1, 1);
    q_make(poseResponse.quat, 0, 0, 1, 0);
    poseResponse.time.tv_sec = 0;
    poseResponse.time.tv_usec = 0;
    poseResponse.sensor = -1;

    // Send a pose report from sensors 0 and 1 on the original tracker that places
    // them at (1,1,1) and with the identity rotation.  We should get a response for
    // each of them so we should end up with the sensor-1 response matching what we
    // sent, with no change in position or orientation.
    q_vec_type pos = { 1, 1, 1 };
    q_type quat = { 0, 0, 0, 1 };
    struct timeval firstSent;
    vrpn_gettimeofday(&firstSent, NULL);
    t0->report_pose(0, firstSent, pos, quat);
    t0->report_pose(1, firstSent, pos, quat);

    t0->mainloop();
    t1->mainloop();
    c->mainloop();
    tr->mainloop();

    if ( (poseResponse.time.tv_sec != firstSent.tv_sec + 1)
        || (poseResponse.sensor != 1)
        || (q_vec_distance(poseResponse.pos, pos) > 1e-10)
        || (poseResponse.quat[Q_W] != 1)
       )
    {
        std::cerr << "vrpn_Tracker_DeadReckoning_Rotation::test(): Got unexpected"
            << " initial response: pos (" << poseResponse.pos[Q_X] << ", "
            << poseResponse.pos[Q_Y] << ", " << poseResponse.pos[Q_Z] << "), quat ("
            << poseResponse.quat[Q_X] << ", " << poseResponse.quat[Q_Y] << ", "
            << poseResponse.quat[Q_Z] << ", " << poseResponse.quat[Q_W] << ")"
            << " from sensor " << poseResponse.sensor
            << " at time " << poseResponse.time.tv_sec << ":" << poseResponse.time.tv_usec
            << std::endl;
        delete tr;
        delete t1;
        delete t0;
        c->removeReference();
        return 1;
    }
    
    // Send a second tracker report for sensor 0 coming 0.4 seconds later that has
    // translated to position (2,1,1) and rotated by 0.4 * 90 degrees around Z.
    // This should cause a prediction for one second later than this new pose
    // message that has rotated by very close to 1.4 * 90 degrees.
    q_vec_type pos2 = { 2, 1, 1 };
    q_type quat2;
    double angle2 = 0.4 * 90 * M_PI / 180.0;
    q_from_axis_angle(quat2, 0, 0, 1, angle2);
    struct timeval p4Second = { 0, 400000 };
    struct timeval firstPlusP4 = vrpn_TimevalSum(firstSent, p4Second);
    t0->report_pose(0, firstPlusP4, pos2, quat2);

    t0->mainloop();
    t1->mainloop();
    c->mainloop();
    tr->mainloop();

    double x, y, z, angle;
    q_to_axis_angle(&x, &y, &z, &angle, poseResponse.quat);
    if ((poseResponse.time.tv_sec != firstPlusP4.tv_sec + 1)
        || (poseResponse.sensor != 0)
        || (q_vec_distance(poseResponse.pos, pos2) > 1e-10)
        || !isClose(x, 0) || !isClose(y, 0) || !isClose(z, 1)
        || !isClose(angle, 1.4 * 90 * M_PI / 180.0)
       )
    {
        std::cerr << "vrpn_Tracker_DeadReckoning_Rotation::test(): Got unexpected"
            << " predicted pose response: pos (" << poseResponse.pos[Q_X] << ", "
            << poseResponse.pos[Q_Y] << ", " << poseResponse.pos[Q_Z] << "), quat ("
            << poseResponse.quat[Q_X] << ", " << poseResponse.quat[Q_Y] << ", "
            << poseResponse.quat[Q_Z] << ", " << poseResponse.quat[Q_W] << ")"
            << " from sensor " << poseResponse.sensor
            << std::endl;
        delete tr;
        delete t1;
        delete t0;
        c->removeReference();
        return 2;
    }

    // Send a velocity report for sensor 1 that has has translation by (1,0,0)
    // and rotating by 0.4 * 90 degrees per 0.4 of a second around Z.
    // This should cause a prediction for one second later than the first
    // report that has rotated by very close to 90 degrees.  The translation
    // should be ignored, so the position should be the original position.
    q_vec_type vel = { 0, 0, 0 };
    t0->report_pose_velocity(1, firstPlusP4, vel, quat2, 0.4);

    t0->mainloop();
    t1->mainloop();
    c->mainloop();
    tr->mainloop();

    q_to_axis_angle(&x, &y, &z, &angle, poseResponse.quat);
    if ((poseResponse.time.tv_sec != firstSent.tv_sec + 1)
        || (poseResponse.sensor != 1)
        || (q_vec_distance(poseResponse.pos, pos) > 1e-10)
        || !isClose(x, 0) || !isClose(y, 0) || !isClose(z, 1)
        || !isClose(angle, 90 * M_PI / 180.0)
        )
    {
        std::cerr << "vrpn_Tracker_DeadReckoning_Rotation::test(): Got unexpected"
            << " predicted velocity response: pos (" << poseResponse.pos[Q_X] << ", "
            << poseResponse.pos[Q_Y] << ", " << poseResponse.pos[Q_Z] << "), quat ("
            << poseResponse.quat[Q_X] << ", " << poseResponse.quat[Q_Y] << ", "
            << poseResponse.quat[Q_Z] << ", " << poseResponse.quat[Q_W] << ")"
            << " from sensor " << poseResponse.sensor
            << std::endl;
        delete tr;
        delete t1;
        delete t0;
        c->removeReference();
        return 3;
    }

    // To test the behavior of the prediction code when we're moving around more
    // than one axis, and when we're starting from a non-identity orientation,
    // set sensor 1 to be rotated 180 degrees around X.  Then send a velocity
    // report that will produce a rotation of 180 degrees around Z.  The result
    // should match a prediction of 180 degrees around Y (plus or minus 180, plus
    // or minus Y axis are all equivalent).
    struct timeval oneSecond = { 1, 0 };
    struct timeval firstPlusOne = vrpn_TimevalSum(firstSent, oneSecond);
    q_type quat3;
    q_from_axis_angle(quat3, 1, 0, 0, M_PI);
    t0->report_pose(1, firstPlusOne, pos, quat3);
    q_type quat4;
    q_from_axis_angle(quat4, 0, 0, 1, M_PI);
    t0->report_pose_velocity(1, firstPlusOne, vel, quat4, 1.0);

    t0->mainloop();
    t1->mainloop();
    c->mainloop();
    tr->mainloop();

    q_to_axis_angle(&x, &y, &z, &angle, poseResponse.quat);
    if ((poseResponse.time.tv_sec != firstPlusOne.tv_sec + 1)
        || (poseResponse.sensor != 1)
        || (q_vec_distance(poseResponse.pos, pos) > 1e-10)
        || !isClose(x, 0) || !isClose(fabs(y), 1) || !isClose(z, 0)
        || !isClose(fabs(angle), M_PI)
        )
    {
        std::cerr << "vrpn_Tracker_DeadReckoning_Rotation::test(): Got unexpected"
            << " predicted pose + velocity response: pos (" << poseResponse.pos[Q_X] << ", "
            << poseResponse.pos[Q_Y] << ", " << poseResponse.pos[Q_Z] << "), quat ("
            << poseResponse.quat[Q_X] << ", " << poseResponse.quat[Q_Y] << ", "
            << poseResponse.quat[Q_Z] << ", " << poseResponse.quat[Q_W] << ")"
            << " from sensor " << poseResponse.sensor
            << std::endl;
        delete tr;
        delete t1;
        delete t0;
        c->removeReference();
        return 4;
    }

    // To test the behavior of the prediction code when we're moving around more
    // than one axis, and when we're starting from a non-identity orientation,
    // set sensor 0 to start out at identity.  Then in one second it will be
    // rotated 180 degrees around X.  Then in another second it will be rotated
    // additionally by 90 degrees around Z; the prediction should be another
    // 90 degrees around Z, which should turn out to compose to +/-180 degrees
    // around +/- Y, as in the velocity case above.
    // To make this work, we send a sequence of three poses, one second apart,
    // starting at the original time.  We do this on sensor 0, which has never
    // had a velocity report, so that it will be using the pose-only prediction.
    struct timeval firstPlusTwo = vrpn_TimevalSum(firstPlusOne, oneSecond);
    q_from_axis_angle(quat3, 1, 0, 0, M_PI);
    t0->report_pose(0, firstSent, pos, quat);
    t0->report_pose(0, firstPlusOne, pos, quat3);
    q_from_axis_angle(quat4, 0, 0, 1, M_PI / 2);
    q_type quat5;
    q_mult(quat5, quat4, quat3);
    t0->report_pose(0, firstPlusTwo, pos, quat5);

    t0->mainloop();
    t1->mainloop();
    c->mainloop();
    tr->mainloop();

    q_to_axis_angle(&x, &y, &z, &angle, poseResponse.quat);
    if ((poseResponse.time.tv_sec != firstPlusTwo.tv_sec + 1)
        || (poseResponse.sensor != 0)
        || (q_vec_distance(poseResponse.pos, pos) > 1e-10)
        || !isClose(x, 0) || !isClose(fabs(y), 1.0) || !isClose(z, 0)
        || !isClose(fabs(angle), M_PI)
        )
    {
        std::cerr << "vrpn_Tracker_DeadReckoning_Rotation::test(): Got unexpected"
            << " predicted pose + pose response: pos (" << poseResponse.pos[Q_X] << ", "
            << poseResponse.pos[Q_Y] << ", " << poseResponse.pos[Q_Z] << "), quat ("
            << poseResponse.quat[Q_X] << ", " << poseResponse.quat[Q_Y] << ", "
            << poseResponse.quat[Q_Z] << ", " << poseResponse.quat[Q_W] << ")"
            << " from sensor " << poseResponse.sensor
            << "; axis = (" << x << ", " << y << ", " << z << "), angle = "
            << angle
            << std::endl;
        delete tr;
        delete t1;
        delete t0;
        c->removeReference();
        return 5;
    }

    // Done; delete our objects and return 0 to indicate that
    // everything worked.
    delete tr;
    delete t1;
    delete t0;
    c->removeReference();
    return 0;
}