Пример #1
0
/// Experimental
void buffers_swap(ot_queue* q1, ot_queue* q2) {
    ot_queue scratch;
    
    q_copy(&scratch, q1);
    q_copy(q1, q2);
    q_copy(q2, &scratch);
}
Пример #2
0
ot_int m2advp_init_flood(m2session* session, ot_u16 schedule) {
#if (SYS_FLOOD == ENABLED) 
#   ifdef DEBUG_ON
        // Bug catcher
        if (session->counter > (32767 /* -RADIO_TURNON_LAG */ )) {
            //OT_LOGFAIL();
            return -1;
        }
#   endif

    /// Set Netstate to match advertising type
    session->netstate = (   M2_NETFLAG_FLOOD | M2_NETSTATE_REQTX | \
                            M2_NETSTATE_INIT /* | M2_NETSTATE_SYNCED */   );

    /// Store existing TXQ (bit of a hack)
    q_copy(&advq, &txq);
    
    /// Reinit txq to the advertising buffer, and load data that will stay the
    /// same for all packets in the flood.
    q_init(&txq, txadv_buffer, 10);
    
    txq.front[0]    = session->subnet;
    txq.front[1]    = M2_PROTOCOL_M2ADVP;
    txq.front[2]    = session->channel;
    txq.front[3]    = ((ot_u8*)&schedule)[UPPER];
    txq.front[4]    = ((ot_u8*)&schedule)[LOWER];
 
    return 0;
#else
    return -1;
#endif
}
Пример #3
0
void vrpn_Tracker_DeadReckoning_Rotation::handle_tracker_velocity_report(void *userdata,
    const vrpn_TRACKERVELCB info)
{
    // Find the pointer to the class that registered the callback and get a
    // reference to the RotationState we're supposed to be using.
    vrpn_Tracker_DeadReckoning_Rotation *me =
        static_cast<vrpn_Tracker_DeadReckoning_Rotation *>(userdata);
    if (info.sensor >= me->d_numSensors) {
        me->send_text_message(vrpn_TEXT_WARNING)
            << "Received velocity message from sensor " << info.sensor
            << " but I only have " << me->d_numSensors
            << "sensors.  Discarding.";
        return;
    }
    vrpn_Tracker_DeadReckoning_Rotation::RotationState &state =
        me->d_rotationStates[info.sensor];

    // Store the rotational information and indicate that we have gotten
    // a report of the tracker velocity.
    state.d_receivedAngularVelocityReport = true;
    q_copy(state.d_rotationAmount, info.vel_quat);
    state.d_rotationInterval = info.vel_quat_dt;

    // We have new data, so we send a new prediction.
    me->sendNewPrediction(info.sensor);

    // Pass through the velocity estimate.
    me->report_pose_velocity(info.sensor, info.msg_time, info.vel,
        info.vel_quat, info.vel_quat_dt);
}
float KChainOrientationBodySchema::TryInverseKinematics(const cart_vec_t position){
  int i,j;
  float cand,rdist=0,tdist=0,k=0.0001; //rotation vs translation weight
  CVector3_t stack[MAX_LINKS];
  CVector3_t tar,newrod,rod,diff,pos;
  CQuat_t q1,q2,q3,q4,rot;
  
  // converting data format
  v_copy(position.GetArray(),pos);
  q_complete(position.GetArray()+3,rot);

  q_inv(rot,q1);
  for(i=0;i<nb_joints;i++){
    GetQuaternion(i,q2);
    q_multiply(q2,q1,q3);
    q_copy(q3,q1);
  } 
  for(j=0;j<50;j++){
#ifdef WITH_LAST_LINK
    last_link->GetTranslation(rod);
#else
    v_clear(rod);
#endif
    InverseKinematicsStack(pos,stack);
    for(i=nb_joints-1;i>=0;i--){
      GetInverseQuaternion(i,q2);
      q_multiply(q2,q1,q3); //q1*inv(Ri)
      if(IsInverted(i)==-1){
	v_copy(stack[i],tar);
	Translate(i,rod,newrod); //getting to joint
	cand = joints[i]->MinimizePositionAndRotationAngle(tar,newrod,q3,k);
	cand = AngleInClosestRange(i,cand);
	joints[i]->SetAngle(-cand);// todo to check if it is - 
	Rotate(i,newrod,rod);   // updating "rod"
	v_sub(tar,rod,diff);
      }
      else{
	Rotate(i,stack[i+1],tar); //rotating back
	cand = joints[i]->MinimizePositionAndRotationAngle(tar,rod,q3,k);
	cand = AngleInClosestRange(i,cand);
	joints[i]->SetAngle(cand);
	Rotate(i,rod,newrod);
	Translate(i,newrod,rod);
	v_sub(tar,newrod,diff);
      }
      GetQuaternion(i,q2);
      q_multiply(q3,q2,q1);
      q_multiply(q2,q3,q4);
      rdist = v_length(q4);//rotation distance, only the first 3 components 
      tdist = v_length(diff);//translation distance
      //     cout<<"rot "<<rdist<<" pos "<<tdist<<" prod: "<<(1-k)*rdist+k*tdist<<endl;
      if(tdist<tol && rdist<rot_tol){return rdist/rot_tol+tdist;}
    }
    q_multiply(rot,q1,q2);
    q_inv(rot,q3);
    q_multiply(q2,q3,q1);
  }
  return rdist/rot_tol + tdist;
}
Пример #5
0
static void VRPN_CALLBACK tracker_handler(void *userdata, const vrpn_TRACKERCB t)
{
        // Store the updated position and orientation of the targeted joint
        q_vec_copy(position[t.sensor], t.pos);
        q_copy(orientation[t.sensor], t.quat);


//	std::cout << "Tracker '" << t.sensor << "' : " << t.pos[0] << "," <<  t.pos[1] << "," << t.pos[2] << std::endl;
}
Пример #6
0
// Fills in the POSE_INFO structure that is passed in with the data it
// receives.
static void VRPN_CALLBACK handle_test_tracker_report(void *userdata,
    const vrpn_TRACKERCB info)
{
    POSE_INFO *pi = static_cast<POSE_INFO *>(userdata);
    pi->time = info.msg_time;
    pi->sensor = info.sensor;
    q_vec_copy(pi->pos, info.pos);
    q_copy(pi->quat, info.quat);
}
Пример #7
0
// Fills in the VEL_INFO structure that is passed in with the data it
// receives.
static void VRPN_CALLBACK handle_test_tracker_velocity_report(void *userdata,
    const vrpn_TRACKERVELCB info)
{
    VEL_INFO *vi = static_cast<VEL_INFO *>(userdata);
    vi->time = info.msg_time;
    vi->sensor = info.sensor;
    q_vec_copy(vi->pos, info.vel);
    q_copy(vi->quat, info.vel_quat);
    vi->quat_dt = info.vel_quat_dt;
}
Пример #8
0
void vrpn_Freespace::handleBodyFrame(const struct freespace_BodyFrame& body) {
			vrpn_gettimeofday(&(vrpn_Tracker::timestamp), NULL);
	vrpn_float64 now  = vrpn_Tracker::timestamp.tv_sec + MILLIS_TO_SECONDS(vrpn_Tracker::timestamp.tv_usec);
	vrpn_float64 delta = now - _lastBodyFrameTime;
	_lastBodyFrameTime = now;

    vrpn_Tracker::acc[0] = convertFreespaceLinearAccelation(body.linearAccelX);
	vrpn_Tracker::acc[1] = convertFreespaceLinearAccelation(body.linearAccelY);
	vrpn_Tracker::acc[2] = convertFreespaceLinearAccelation(body.linearAccelZ);
	q_copy(vrpn_Tracker::acc_quat, gs_qIdent);
	vrpn_Tracker::acc_quat_dt = delta;
    vrpn_Tracker_Server::report_pose_acceleration(0, vrpn_Tracker::timestamp,
        vrpn_Tracker::acc, vrpn_Tracker::acc_quat, delta);

	vrpn_Tracker::vel[0] = convertFreespaceAngularVelocity(body.angularVelX, 1);
	vrpn_Tracker::vel[1] = convertFreespaceAngularVelocity(body.angularVelY, 1);
	vrpn_Tracker::vel[2] = convertFreespaceAngularVelocity(body.angularVelZ, 1);
	q_copy(vrpn_Tracker::vel_quat, gs_qIdent);
	vrpn_Tracker::vel_quat_dt = delta;
    vrpn_Tracker_Server::report_pose_velocity(0, vrpn_Tracker::timestamp,
        vrpn_Tracker::vel, vrpn_Tracker::vel_quat, vrpn_Tracker::vel_quat_dt);

    vrpn_Button::buttons[0] = body.button1;
	vrpn_Button::buttons[1] = body.button2;
	vrpn_Button::buttons[2] = body.button3;
	vrpn_Button::buttons[3] = body.button4;
	vrpn_Button::buttons[4] = body.button5;
	
	vrpn_Button::timestamp = vrpn_Tracker::timestamp;
	vrpn_Button::report_changes();

	// this is totally arbitrary
	// I'm not sure how exactly the 'deltaWheel' should be interpreted, but it seems
	// like it would be the number of 'clicks' while turning the scroll wheel...
	// vrpn was this as a 'fraction of a revolution' so I would assume that a return of 1 means
	// the dial was spun 360 degrees.  I'm going to pretend it takes 16 clicks to make a full
	// revolution....'
	// this values should probably set in a configuration file.
	vrpn_Dial::dials[0] = body.deltaWheel / (vrpn_float64) 16;
	vrpn_Dial::timestamp = vrpn_Tracker::timestamp;
	vrpn_Dial::report_changes();
}
Пример #9
0
void vrpn_Tracker_DeadReckoning_Rotation::handle_tracker_report(void *userdata,
    const vrpn_TRACKERCB info)
{
    // Find the pointer to the class that registered the callback and get a
    // reference to the RotationState we're supposed to be using.
    vrpn_Tracker_DeadReckoning_Rotation *me =
        static_cast<vrpn_Tracker_DeadReckoning_Rotation *>(userdata);
    if (info.sensor >= me->d_numSensors) {
        me->send_text_message(vrpn_TEXT_WARNING)
            << "Received tracker message from sensor " << info.sensor
            << " but I only have " << me->d_numSensors
            << "sensors.  Discarding.";
        return;
    }
    vrpn_Tracker_DeadReckoning_Rotation::RotationState &state =
        me->d_rotationStates[info.sensor];

    if (!state.d_receivedAngularVelocityReport && me->d_estimateVelocity) {
        // If we have not received any velocity reports, then we estimate
        // the angular velocity using the last report (if any).  The new
        // combined rotation T3 = T2 * T1, where T2 is the difference in
        // rotation between the last time (T1) and now (T3).  We want to
        // solve for T2 (so we can keep applying it going forward).  We
        // find it by right-multiuplying both sides of the equation by
        // T1i (inverse of T1): T3 * T1i = T2.
        if (state.d_lastReportTime.tv_sec != 0) {
            q_type inverted;
            q_invert(inverted, state.d_lastOrientation);
            q_mult(state.d_rotationAmount, info.quat, inverted);
            state.d_rotationInterval = vrpn_TimevalDurationSeconds(
                info.msg_time, state.d_lastReportTime);
            // If we get a zero or negative rotation interval, we're
            // not going to be able to handle it, so we set things back
            // to no rotation over a unit-time interval.
            if (state.d_rotationInterval < 0) {
                state.d_rotationInterval = 1;
                q_make(state.d_rotationAmount, 0, 0, 0, 1);
            }
        }
    }

    // Keep track of the position, orientation and time for the next report
    q_vec_copy(state.d_lastPosition, info.pos);
    q_copy(state.d_lastOrientation, info.quat);
    state.d_lastReportTime = info.msg_time;

    // We have new data, so we send a new prediction.
    me->sendNewPrediction(info.sensor);
}
Пример #10
0
vrpn_Tracker_DeadReckoning_Rotation::vrpn_Tracker_DeadReckoning_Rotation(
    std::string myName, vrpn_Connection *c, std::string origTrackerName,
    vrpn_int32 numSensors, vrpn_float64 predictionTime, bool estimateVelocity)
    : vrpn_Tracker_Server(myName.c_str(), c, numSensors)
    , d_estimateVelocity(estimateVelocity)
{
    // Do the things all tracker servers need to do.
    num_sensors = numSensors;
    register_server_handlers();

    // Set values that don't need to be set in the list above, so that we
    // don't worry about out-of-order warnings from the compiler in case we
    // adjust the order in the header file in the future.
    d_predictionTime = predictionTime;
    d_numSensors = numSensors;

    // If the name of the tracker we're using starts with a '*' character,
    // we use our own connection to talk with it.  Otherwise, we open a remote
    // tracker with the specified name.
    if (origTrackerName[0] == '*') {
        d_origTracker = new vrpn_Tracker_Remote(&(origTrackerName.c_str()[1]), c);
    }
    else {
        d_origTracker = new vrpn_Tracker_Remote(origTrackerName.c_str());
    }

    // Initialize the rotational state of each sensor.  There has not bee
    // an orientation report, so we set its time to 0.
    // Set up callback handlers for the pose and pose velocity messages,
    // from which we will extract the orientation and orientation velocity
    // information.
    for (vrpn_int32 i = 0; i < numSensors; i++) {
        q_type no_rotation = { 0, 0, 0, 1 };
        RotationState rs;
        q_copy(rs.d_rotationAmount, no_rotation);
        rs.d_rotationInterval = 1;
        rs.d_receivedAngularVelocityReport = false;
        rs.d_lastReportTime.tv_sec = 0;
        rs.d_lastReportTime.tv_usec = 0;
        d_rotationStates.push_back(rs);
    }

    // Register handler for all sensors, so we'll be told the ID
    d_origTracker->register_change_handler(this, handle_tracker_report);
    d_origTracker->register_change_handler(this, handle_tracker_velocity_report);
}
Пример #11
0
void	vrpn_Tracker_AnalogFly::update_matrix_based_on_values
                  (double time_interval)
{
  double tx,ty,tz, rx,ry,rz;	// Translation (m/s) and rotation (rad/sec)
  q_matrix_type diffM;		// Difference (delta) matrix

  // For absolute trackers, the interval is treated as "1", so that the
  // translations and rotations are unscaled;
  if (d_absolute) { time_interval = 1.0; };

  // compute the translation and rotation
  tx = d_x.value * time_interval;
  ty = d_y.value * time_interval;
  tz = d_z.value * time_interval;
  
  rx = d_sx.value * time_interval * (2*VRPN_PI);
  ry = d_sy.value * time_interval * (2*VRPN_PI);
  rz = d_sz.value * time_interval * (2*VRPN_PI);

  // Build a rotation matrix, then add in the translation
  q_euler_to_col_matrix(diffM, rz, ry, rx);
  diffM[3][0] = tx; diffM[3][1] = ty; diffM[3][2] = tz;

  // While the clutch is not engaged, we don't move.  Record that
  // the clutch was off so that we know later when it is re-engaged.
  if (!d_clutch_engaged) {
    d_clutch_was_off = true;
    return;
  }
  
  // When the clutch becomes re-engaged, we store the current matrix
  // multiplied by the inverse of the present differential matrix so that
  // the first frame of the mouse-hold leaves us in the same location.
  // For the absolute matrix, this re-engages new motion at the previous
  // location.
  if (d_clutch_engaged && d_clutch_was_off) {
    d_clutch_was_off = false;
    q_type  diff_orient;
    // This is backwards, because Euler angles have rotation about Z first...
    q_from_euler(diff_orient, rz, ry, rx);
    q_xyz_quat_type diff;
    q_vec_set(diff.xyz, tx, ty, tz);
    q_copy(diff.quat, diff_orient);
    q_xyz_quat_type  diff_inverse;
    q_xyz_quat_invert(&diff_inverse, &diff);
    q_matrix_type di_matrix;
    q_to_col_matrix(di_matrix, diff_inverse.quat);
    di_matrix[3][0] = diff_inverse.xyz[0];
    di_matrix[3][1] = diff_inverse.xyz[1];
    di_matrix[3][2] = diff_inverse.xyz[2];
    q_matrix_mult(d_clutchMatrix, di_matrix, d_currentMatrix);
  }

  // Apply the matrix.
  if (d_absolute) {
      // The difference matrix IS the current matrix.  Catenate it
      // onto the clutch matrix.  If there is no clutching happening,
      // this matrix will always be the identity so this will just
      // copy the difference matrix.
      q_matrix_mult(d_currentMatrix, diffM, d_clutchMatrix);
  } else {
      // Multiply the current matrix by the difference matrix to update
      // it to the current time. 
      if (d_worldFrame) {
        // If using world frame:
        // 1. Separate out the translation and add to the differential translation
        tx += d_currentMatrix[3][0];
        ty += d_currentMatrix[3][1];
        tz += d_currentMatrix[3][2];
        diffM[3][0] = 0; diffM[3][1] = 0; diffM[3][2] = 0;
        d_currentMatrix[3][0] = 0; d_currentMatrix[3][1] = 0; d_currentMatrix[3][2] = 0;

        // 2. Compose the rotations.
        q_matrix_mult(d_currentMatrix, d_currentMatrix, diffM);

        // 3. Put the new translation back in the matrix.
        d_currentMatrix[3][0] = tx; d_currentMatrix[3][1] = ty; d_currentMatrix[3][2] = tz;

      } else {
        q_matrix_mult(d_currentMatrix, diffM, d_currentMatrix);
      }
  }

  // Finally, convert the matrix into a pos/quat
  // and copy it into the tracker position and quaternion structures.
  convert_matrix_to_tracker();
}
Пример #12
0
void m2advp_close() {
#if (SYS_FLOOD == ENABLED)
    /// Restore original TXQ
    q_copy(&txq, &advq);
#endif
}
Пример #13
0
void vrpn_Tracker_DeadReckoning_Rotation::sendNewPrediction(vrpn_int32 sensor)
{
    //========================================================================
    // Figure out which rotation state we're supposed to use.
    if (sensor >= d_numSensors) {
        send_text_message(vrpn_TEXT_WARNING)
            << "sendNewPrediction: Asked for sensor " << sensor
            << " but I only have " << d_numSensors
            << "sensors.  Discarding.";
        return;
    }
    vrpn_Tracker_DeadReckoning_Rotation::RotationState &state =
        d_rotationStates[sensor];

    //========================================================================
    // If we haven't had a tracker report yet, nothing to send.
    if (state.d_lastReportTime.tv_sec == 0) {
        return;
    }

    //========================================================================
    // If we don't have permission to estimate velocity and haven't gotten it
    // either, then we just pass along the report.
    if (!state.d_receivedAngularVelocityReport && !d_estimateVelocity) {
        report_pose(sensor, state.d_lastReportTime, state.d_lastPosition,
                    state.d_lastOrientation);
        return;
    }

    //========================================================================
    // Estimate the future orientation based on the current angular velocity
    // estimate and the last reported orientation.  Predict it into the future
    // the amount we've been asked to.

    // Start with the previous orientation.
    q_type newOrientation;
    q_copy(newOrientation, state.d_lastOrientation);

    // Rotate it by the amount to rotate once for every integral multiple
    // of the rotation time we've been asked to go.
    double remaining = d_predictionTime;
    while (remaining > state.d_rotationInterval) {
        q_mult(newOrientation, state.d_rotationAmount, newOrientation);
        remaining -= state.d_rotationInterval;
    }

    // Then rotate it by the remaining fractional amount.
    double fractionTime = remaining / state.d_rotationInterval;
    q_type identity = { 0, 0, 0, 1 };
    q_type fractionRotation;
    q_slerp(fractionRotation, identity, state.d_rotationAmount, fractionTime);
    q_mult(newOrientation, fractionRotation, newOrientation);

    //========================================================================
    // Find out the future time for which we will be predicting by adding the
    // prediction interval to our last report time.
    struct timeval future_time;
    struct timeval delta;
    delta.tv_sec = static_cast<unsigned long>(d_predictionTime);
    double remainder = d_predictionTime - delta.tv_sec;
    delta.tv_usec = static_cast<unsigned long>(remainder * 1e6);
    future_time = vrpn_TimevalSum(delta, state.d_lastReportTime);

    //========================================================================
    // Pack our predicted tracker report for this future time.
    // Use the position portion of the report unaltered.
    if (0 != report_pose(sensor, future_time, state.d_lastPosition, newOrientation)) {
      fprintf(stderr, "vrpn_Tracker_DeadReckoning_Rotation::sendNewPrediction(): Can't report pose\n");
    }
}
Пример #14
0
void vrpn_Tracker_WiimoteHead::_convert_pose_to_tracker() {
	q_vec_copy(pos, d_currentPose.xyz); // set position;
	q_copy(d_quat, d_currentPose.quat); // set orientation
}
Пример #15
0
void vrpn_Tracker_WiimoteHead::_update_2_LED_pose(q_xyz_quat_type & newPose) {
	if (d_points != 2) {
		// we simply stop updating our pos+orientation if we lost LED's
		// TODO: right now if we don't have exactly 2 points we lose the lock
		d_lock = false;
		d_flipState = FLIP_UNKNOWN;
		return;
	}

	// TODO right now only handling the 2-LED glasses

	d_lock = true;
	double rx, ry, rz;
	rx = ry = rz = 0;

	double X0, X1, Y0, Y1;

	X0 = d_vX[0];
	X1 = d_vX[1];
	Y0 = d_vY[0];
	Y1 = d_vY[1];

	if (d_flipState == FLIP_180) {
		/// If the first report of a new tracking lock indicated that
		/// our "up" vector had no positive y component, we have the
		/// points in the wrong order - flip them around.
		/// This uses the assumption that the first time we see the glasses,
		/// they ought to be right-side up (a reasonable assumption for
		/// head tracking)
		std::swap(X0, X1);
		std::swap(Y0, Y1);
	}

	const double dx = X0 - X1;
	const double dy = Y0 - Y1;
	const double dist = sqrt(dx * dx + dy * dy);
	const double angle = dist * cvtDistToAngle;
	// Note that this is an approximation, since we don't know the
	// distance/horizontal position.  (I think...)

	const double headDist = (d_blobDistance / 2.0) / tan(angle);

	// Translate the distance along z axis, and tilt the head
	newPose.xyz[2] = headDist;      // translate along Z
	rz = atan2(dy, dx);     // rotate around Z

	// Find the sensor pixel of the line of sight - directly between
	// the LEDs
	const double avgX = (X0 + X1) / 2.0;
	const double avgY = (Y0 + Y1) / 2.0;

	/// @todo For some unnerving reason, in release builds, avgX tends to become NaN/undefined
	/// However, any kind of inspection (such as the following, or even a simple cout)
	/// appears to prevent the issue.  This makes me uneasy, but I won't argue with
	/// what is working.
	if (wm_isnan(avgX)) {
		std::cerr << "NaN detected in avgX: X0 = " << X0 << ", X1 = " << X1 << std::endl;
		return;
	}

	if (wm_isnan(avgY)) {
		std::cerr << "NaN detected in avgY: Y0 = " << Y0 << ", Y1 = " << Y1 << std::endl;
		return;
	}

	// b is the virtual depth in the sensor from a point to the full sensor
	// used for finding similar triangles to calculate x/y translation
	const double bHoriz = xResSensor / 2 / tan(fovX / 2);
	const double bVert = yResSensor / 2 / tan(fovY / 2);

	// World head displacement (X and Y) from a centered origin at
	// the calculated distance from the sensor
	newPose.xyz[0] = headDist * (avgX - xResSensor / 2) / bHoriz;
	newPose.xyz[1] = headDist * (avgY - yResSensor / 2) / bVert;

	// set the quat. part of our pose with rotation angles
	q_from_euler(newPose.quat, rz, ry, rx);

	// Apply the new pose
	q_vec_copy(d_currentPose.xyz, newPose.xyz);
	q_copy(d_currentPose.quat, newPose.quat);
}
Пример #16
0
void vrpn_Tracker_OSVRHackerDevKit::on_data_received(std::size_t bytes,
                                                     vrpn_uint8 *buffer)
{
    if (bytes != 32 && bytes != 16) {
        send_text_message(vrpn_TEXT_WARNING)
            << "Received a report " << bytes
            << " in length, but expected it to be 32 or 16 bytes. Discarding. "
               "(May indicate issues with HID!)";
        return;
    }

    vrpn_uint8 firstByte = vrpn_unbuffer_from_little_endian<vrpn_uint8>(buffer);

    vrpn_uint8 version = vrpn_uint8(0x0f) & firstByte;
    _reportVersion = version;

    switch (version) {
    case 1:
        if (bytes != 32 && bytes != 16) {
            send_text_message(vrpn_TEXT_WARNING)
                << "Received a v1 report " << bytes
                << " in length, but expected it to be 32 or 16 bytes. "
                   "Discarding. "
                   "(May indicate issues with HID!)";
            return;
        }
        break;
    case 2:
        if (bytes != 16) {
            send_text_message(vrpn_TEXT_WARNING)
                << "Received a v2 report " << bytes
                << " in length, but expected it to be 16 bytes. Discarding. "
                   "(May indicate issues with HID!)";
            return;
        }
        break;

    case 3:
        /// @todo once this report format is finalized, tighten up the
        /// requirements.
        if (bytes < 16) {
            send_text_message(vrpn_TEXT_WARNING)
                << "Received a v3 report " << bytes
                << " in length, but expected it to be at least 16 bytes. "
                   "Discarding. "
                   "(May indicate issues with HID!)";
            return;
        }
        break;
    default:
        /// Highlight that we don't know this report version well...
        _knownVersion = false;
        /// Do a minimal check of it.
        if (bytes < 16) {
            send_text_message(vrpn_TEXT_WARNING)
                << "Received a report claiming to be version " << int(version)
                << " that was " << bytes << " in length, but expected it to be "
                                            "at least 16 bytes. Discarding. "
                                            "(May indicate issues with HID!)";
            return;
        }
        break;
    }

    // Report version as analog channel 0.
    channel[0] = version;

    vrpn_uint8 msg_seq = vrpn_unbuffer_from_little_endian<vrpn_uint8>(buffer);

    // Signed, 16-bit, fixed-point numbers in Q1.14 format.
    typedef vrpn::FixedPoint<1, 14> FixedPointValue;
    d_quat[Q_X] =
        FixedPointValue(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
            .get<vrpn_float64>();
    d_quat[Q_Y] =
        FixedPointValue(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
            .get<vrpn_float64>();
    d_quat[Q_Z] =
        FixedPointValue(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
            .get<vrpn_float64>();
    d_quat[Q_W] =
        FixedPointValue(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
            .get<vrpn_float64>();

    vrpn_Tracker::timestamp = _timestamp;
    {
        char msgbuf[512];
        int len = vrpn_Tracker::encode_to(msgbuf);
        if (d_connection->pack_message(len, _timestamp, position_m_id,
                                       d_sender_id, msgbuf,
                                       vrpn_CONNECTION_LOW_LATENCY)) {
            fprintf(stderr, "vrpn_Tracker_OSVRHackerDevKit: cannot write "
                            "message: tossing\n");
        }
    }
    if (version >= 2) {
        // We've got angular velocity in this message too
        // Given XYZ radians per second velocity.
        // Signed Q6.9
        typedef vrpn::FixedPoint<6, 9> VelFixedPoint;
        q_vec_type angVel;
        angVel[0] =
            VelFixedPoint(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
                .get<vrpn_float64>();
        angVel[1] =
            VelFixedPoint(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
                .get<vrpn_float64>();
        angVel[2] =
            VelFixedPoint(vrpn_unbuffer_from_little_endian<vrpn_int16>(buffer))
                .get<vrpn_float64>();

        //==================================================================
        // Determine the rotational velocity, which is
        // measured in the rotated coordinate system.  We need to rotate the
        // difference Euler angles back to the canonical orientation, apply
        // the change, and then rotate back to change our coordinates.
        // Be sure to scale by the time value vrpn_HDK_DT.
        q_type forward, inverse;
        q_copy(forward, d_quat);
        q_invert(inverse, forward);
        // Remember that Euler angles in Quatlib have rotation around Z in
        // the first term.  Compute the time-scaled delta transform in
        // canonical space.
        q_type delta;
        {
            delta[Q_W] = 0;
            delta[Q_X] = angVel[Q_X] * vrpn_HDK_DT * 0.5;
            delta[Q_Y] = angVel[Q_Y] * vrpn_HDK_DT * 0.5;
            delta[Q_Z] = angVel[Q_Z] * vrpn_HDK_DT * 0.5;
            q_exp(delta, delta);
            q_normalize(delta, delta);
        }
        // Bring the delta back into canonical space
        q_type canonical;
        q_mult(canonical, delta, inverse);
        q_mult(vel_quat, forward, canonical);

        // Send the rotational velocity information.
        // The dt value was set once, in the constructor.
        char msgbuf[512];
        int len = vrpn_Tracker::encode_vel_to(msgbuf);
        if (d_connection->pack_message(len, _timestamp, velocity_m_id,
                                       d_sender_id, msgbuf,
                                       vrpn_CONNECTION_LOW_LATENCY)) {
            fprintf(stderr, "vrpn_Tracker_OSVRHackerDevKit: cannot write "
                            "message: tossing\n");
        }
    }
    if (version < 3) {
        // No status info hidden in the first byte.
        channel[1] = STATUS_UNKNOWN;
    }
    else {
        // v3+: We've got status info in the upper nibble of the first byte.
        bool gotVideo = (firstByte & (0x01 << 4)) != 0;    // got video?
        bool gotPortrait = (firstByte & (0x01 << 5)) != 0; // portrait mode?
        if (!gotVideo) {
            channel[1] = STATUS_NO_VIDEO_INPUT;
        }
        else {
            if (gotPortrait) {
                channel[1] = STATUS_PORTRAIT_VIDEO_INPUT;
            }
            else {
                channel[1] = STATUS_LANDSCAPE_VIDEO_INPUT;
            }
        }
    }

    if (_messageCount == 0) {
        // When _messageCount overflows, send a report whether or not there was
        // a change.
        vrpn_Analog::report();
    }
    else {
        // otherwise just report if we have a change.
        vrpn_Analog::report_changes();
    };
    _messageCount = (_messageCount + 1) % vrpn_HDK_STATUS_STRIDE;
}
Пример #17
0
void	vrpn_IMU_SimpleCombiner::update_matrix_based_on_values(double time_interval)
{
  //==================================================================
  // Adjust the orientation based on the rotational velocity, which is
  // measured in the rotated coordinate system.  We need to rotate the
  // difference vector back to the canonical orientation, apply the
  // orientation change there, and then rotate back.
  // Be sure to scale by the time value.
  q_type forward, inverse;
  q_copy(forward, d_quat);
  q_invert(inverse, forward);
  // Remember that Euler angles in Quatlib have rotation around Z in
  // the first term.  Compute the time-scaled delta transform in
  // canonical space.
  q_type delta;
  q_from_euler(delta,
    time_interval * d_rotational_vel.values[Q_Z],
    time_interval * d_rotational_vel.values[Q_Y],
    time_interval * d_rotational_vel.values[Q_X]);
  // Bring the delta back into canonical space
  q_type canonical;
  q_mult(canonical, delta, inverse);
  q_mult(canonical, forward, canonical);
  q_mult(d_quat, canonical, d_quat);

  //==================================================================
  // To the extent that the acceleration vector's magnitude is equal
  // to the expected gravity, rotate the orientation so that the vector
  // points downward.  This is measured in the rotated coordinate system,
  // so we need to rotate back to canonical orientation and apply
  // the difference there and then rotate back.  The rate of rotation
  // should be as specified in the gravity-rotation-rate parameter so
  // we don't swing the head around too quickly but only slowly re-adjust.

  double accel = sqrt(
      d_acceleration.values[0] * d_acceleration.values[0] +
      d_acceleration.values[1] * d_acceleration.values[1] +
      d_acceleration.values[2] * d_acceleration.values[2] );
  double diff = fabs(accel - 9.80665);

  // Only adjust if we're close enough to the expected gravity
  // @todo In a more complicated IMU tracker, base this on state and
  // error estimates from a Kalman or other filter.
  double scale = 1.0 - diff;
  if (scale > 0) {
    // Get a new forward and inverse matrix from the current, just-
    // rotated matrix.
    q_copy(forward, d_quat);

    // Change how fast we adjust based on how close we are to the
    // expected value of gravity.  Then further scale this by the
	// amount of time since the last estimate.
    double gravity_scale = scale * d_gravity_restore_rate * time_interval;

	// Rotate the gravity vector by the estimated transform.
	// We expect this direction to match the global down (-Y) vector.
	q_vec_type gravity_global;
	q_vec_set(gravity_global, d_acceleration.values[0],
		d_acceleration.values[1], d_acceleration.values[2]);
	q_vec_normalize(gravity_global, gravity_global);
	q_xform(gravity_global, forward, gravity_global);
	//printf("  XXX Gravity: %lg, %lg, %lg\n", gravity_global[0], gravity_global[1], gravity_global[2]);

	// Determine the rotation needed to take gravity and rotate
	// it into the direction of -Y.
	q_vec_type neg_y;
	q_vec_set(neg_y, 0, -1, 0);
	q_type rot;
	q_from_two_vecs(rot, gravity_global, neg_y);

	// Scale the rotation by the fraction of the orientation we
	// should remove based on the time that has passed, how well our
	// gravity vector matches expected, and the specified rate of
	// correction.
	static q_type identity = { 0, 0, 0, 1 };
	q_type scaled_rot;
	q_slerp(scaled_rot, identity, rot, gravity_scale);
	//printf("XXX Scaling gravity vector by %lg\n", gravity_scale);

    // Rotate by this angle.
    q_mult(d_quat, scaled_rot, d_quat);

    //==================================================================
    // If we are getting compass data, and to the extent that the
    // acceleration vector's magnitude is equal to the expected gravity,
    // compute the cross product of the cross product to find the
    // direction of north perpendicular to down.  This is measured in
    // the rotated coordinate system, so we need to rotate back to the
    // canonical orientation and do the comparison there.
	//  The fraction of rotation should be as specified in the
    // magnetometer-rotation-rate parameter so we don't swing the head
    // around too quickly but only slowly re-adjust.

    if (d_magnetometer.ana != NULL) {
      // Get a new forward and inverse matrix from the current, just-
      // rotated matrix.
      q_copy(forward, d_quat);

      // Find the North vector that is perpendicular to gravity by
      // clearing its Y axis to zero and renormalizing.
	  q_vec_type magnetometer;
      q_vec_set(magnetometer, d_magnetometer.values[0],
        d_magnetometer.values[1], d_magnetometer.values[2]);
	  q_vec_type magnetometer_global;
	  q_xform(magnetometer_global, forward, magnetometer);
	  magnetometer_global[Q_Y] = 0;
	  q_vec_type north_global;
	  q_vec_normalize(north_global, magnetometer_global);
	  //printf("  XXX north_global: %lg, %lg, %lg\n", north_global[0], north_global[1], north_global[2]);

      // Determine the rotation needed to take north and rotate it
	  // into the direction of negative Z.
	  q_vec_type neg_z;
	  q_vec_set(neg_z, 0, 0, -1);
	  q_from_two_vecs(rot, north_global, neg_z);

	  // Change how fast we adjust based on how close we are to the
	  // expected value of gravity.  Then further scale this by the
	  // amount of time since the last estimate.
	  double north_rate = scale * d_north_restore_rate * time_interval;

	  // Scale the rotation by the fraction of the orientation we
	  // should remove based on the time that has passed, how well our
	  // gravity vector matches expected, and the specified rate of
	  // correction.
	  static q_type identity = { 0, 0, 0, 1 };
	  q_slerp(scaled_rot, identity, rot, north_rate);

      // Rotate by this angle.
      q_mult(d_quat, scaled_rot, d_quat);
    }
  }

  //==================================================================
  // Compute and store the velocity information
  // This will be in the rotated coordinate system, so we need to
  // rotate back to the identity orientation before storing.
  // Convert from radians/second into a quaternion rotation as
  // rotated in a hundredth of a second and set the rotational
  // velocity dt to a hundredth of a second so that we don't
  // risk wrapping.
  // Remember that Euler angles in Quatlib have rotation around Z in
  // the first term.  Compute the time-scaled delta transform in
  // canonical space.
  q_from_euler(delta,
    1e-2 * d_rotational_vel.values[Q_Z],
    1e-2 * d_rotational_vel.values[Q_Y],
    1e-2 * d_rotational_vel.values[Q_X]);
  // Bring the delta back into canonical space
  q_mult(canonical, delta, inverse);
  q_mult(vel_quat, forward, canonical);
  vel_quat_dt = 1e-2;
}
Пример #18
0
int test_parallel(int num_threads) {
    tbb::flow::graph g;
    tbb::flow::queue_node<T> q(g);
    tbb::flow::queue_node<T> q2(g);
    tbb::flow::queue_node<T> q3(g);
    {
        Check< T > my_check;
        T bogus_value(-1);
        T j = bogus_value;
        NativeParallelFor( num_threads, parallel_puts<T>(q) );

        T *next_value = new T[num_threads];
        for (int tid = 0; tid < num_threads; ++tid) next_value[tid] = T(0);

        for (int i = 0; i < num_threads * N; ++i ) {
            spin_try_get( q, j );
            check_item( next_value, j );
            j = bogus_value;
        }
        for (int tid = 0; tid < num_threads; ++tid)  {
            ASSERT( next_value[tid] == T(N), NULL );
        }
        delete[] next_value;

        j = bogus_value;
        g.wait_for_all();
        ASSERT( q.try_get( j ) == false, NULL );
        ASSERT( j == bogus_value, NULL );

        NativeParallelFor( num_threads, parallel_puts<T>(q) );

        {
            touches< T > t( num_threads );
            NativeParallelFor( num_threads, parallel_gets<T>(q, t) );
            g.wait_for_all();
            ASSERT( t.validate_touches(), NULL );
        }
        j = bogus_value;
        ASSERT( q.try_get( j ) == false, NULL );
        ASSERT( j == bogus_value, NULL );

        g.wait_for_all();
        {
            touches< T > t2( num_threads );
            NativeParallelFor( num_threads, parallel_put_get<T>(q, t2) );
            g.wait_for_all();
            ASSERT( t2.validate_touches(), NULL );
        }
        j = bogus_value;
        ASSERT( q.try_get( j ) == false, NULL );
        ASSERT( j == bogus_value, NULL );

        tbb::flow::make_edge( q, q2 );
        tbb::flow::make_edge( q2, q3 );

        NativeParallelFor( num_threads, parallel_puts<T>(q) );
        {
            touches< T > t3( num_threads );
            NativeParallelFor( num_threads, parallel_gets<T>(q3, t3) );
            g.wait_for_all();
            ASSERT( t3.validate_touches(), NULL );
        }
        j = bogus_value;
        g.wait_for_all();
        ASSERT( q.try_get( j ) == false, NULL );
        g.wait_for_all();
        ASSERT( q2.try_get( j ) == false, NULL );
        g.wait_for_all();
        ASSERT( q3.try_get( j ) == false, NULL );
        ASSERT( j == bogus_value, NULL );

        // test copy constructor
        ASSERT( q.remove_successor( q2 ), NULL );
        NativeParallelFor( num_threads, parallel_puts<T>(q) );
        tbb::flow::queue_node<T> q_copy(q);
        j = bogus_value;
        g.wait_for_all();
        ASSERT( q_copy.try_get( j ) == false, NULL );
        ASSERT( q.register_successor( q_copy ) == true, NULL );
        {
            touches< T > t( num_threads );
            NativeParallelFor( num_threads, parallel_gets<T>(q_copy, t) );
            g.wait_for_all();
            ASSERT( t.validate_touches(), NULL );
        }
        j = bogus_value;
        ASSERT( q.try_get( j ) == false, NULL );
        ASSERT( j == bogus_value, NULL );
        ASSERT( q_copy.try_get( j ) == false, NULL );
        ASSERT( j == bogus_value, NULL );
    }

    return 0;
}