コード例 #1
0
ファイル: vrpn_FileConnection.C プロジェクト: BlueBrain/vrpn
int vrpn_File_Connection::jump_to_filetime( timeval absolute_time )
{
	if( d_earliest_user_time_valid )
	{  return jump_to_time( vrpn_TimevalDiff( absolute_time, d_earliest_user_time ) );  }
	else
	{  return jump_to_time( vrpn_TimevalDiff( absolute_time, d_start_time ) );  }  // XX get rid of this option - dtm
}
コード例 #2
0
ファイル: vrpn_BaseClass.C プロジェクト: dborel/vrpn
void	vrpn_BaseClassUnique::client_mainloop(void)
{
    struct  timeval now;
    struct  timeval diff;

    // The first time through, set up a callback handler for the pong message so that we
    // know when we are getting them.  Also set up a handler for the system dropped-connection
    // message so that we can initiate a ping cycle when that happens.  Also, we'll initiate
    // the ping cycle here.

    if (d_first_mainloop && (d_connection != NULL)) {

	// Set up handlers for the pong message and for the system connection-drop message
	register_autodeleted_handler(d_pong_message_id, handle_pong, this, d_sender_id);
	register_autodeleted_handler(d_connection->register_message_type(vrpn_dropped_connection),
	    handle_connection_dropped, this);

	// Initiate a ping cycle;
	initiate_ping_cycle();

	// No longer first time through mainloop.
	d_first_mainloop = 0;
    }

    // If we are in the middle of a ping cycle...
    // Check if we've heard, if it has been long enough since we gave a warning or error (>= 1 sec).
    // If it has been three seconds or more since we sent our first ping,
    // start giving warnings.  If it has been ten seconds or more since we got one,
    // switch to errors.  New ping requests go out each second.

    if (d_unanswered_ping) {
    
        vrpn_gettimeofday(&now, NULL);
        diff = vrpn_TimevalDiff(now, d_time_last_warned);
        vrpn_TimevalNormalize(diff);
        
        if (diff.tv_sec >= 1) {
            
            // Send a new ping, since it has been a second since the last one
            d_connection->pack_message(0, now, d_ping_message_id, d_sender_id,
                                       NULL, vrpn_CONNECTION_RELIABLE);
            
            // Send another warning or error, and say if we're flatlined (10+ seconds)
            d_time_last_warned = now;
            if (!shutup) {
                diff = vrpn_TimevalDiff(now, d_time_first_ping);
                vrpn_TimevalNormalize(diff);	    
                if (diff.tv_sec >= 10) {
                    send_text_message("No response from server for >= 10 seconds", now, vrpn_TEXT_ERROR, diff.tv_sec);
                    d_flatline = 1;
                } else if (diff.tv_sec >= 3) {
                    send_text_message("No response from server for >= 3 seconds", now, vrpn_TEXT_WARNING, diff.tv_sec);
                }
            }
        }
    }
}
コード例 #3
0
ファイル: vrpn_FileConnection.C プロジェクト: BlueBrain/vrpn
// Returns the time since the connection opened.
// Some subclasses may redefine time.
// virtual
int vrpn_File_Connection::time_since_connection_open( timeval * elapsed_time )
{
	if( !d_earliest_user_time_valid )
	{  this->find_superlative_user_times( );  }
	if( d_earliest_user_time_valid )
	{  *elapsed_time = vrpn_TimevalDiff( d_time, d_earliest_user_time );  }
	else
	{  *elapsed_time = vrpn_TimevalDiff(d_time, d_start_time);  }  // XXX get rid of this option - dtm

    return 0;
}
コード例 #4
0
void vrpn_Imager_Stream_Buffer::handle_request_logging(
    const char *local_in_logfile_name, const char *local_out_logfile_name,
    const char *remote_in_logfile_name, const char *remote_out_logfile_name)
{
    // Request that the logging thread start new logs.
    d_shared_state.set_logfile_request(
        local_in_logfile_name, local_out_logfile_name, remote_in_logfile_name,
        remote_out_logfile_name);

    // Wait until we hear back from the logging thread or time out;
    // return empty if timeout and the strings we got back if not.
    // Remember to deallocated the memory if we got a response.
    struct timeval start, now;
    vrpn_gettimeofday(&start, NULL);
    do {
        char *lil, *lol, *ril, *rol;
        if (d_shared_state.get_logfile_result(&lil, &lol, &ril, &rol)) {
            send_report_logging(lil, lol, ril, rol);
            delete[] lil;
            delete[] lol;
            delete[] ril;
            delete[] rol;
            return;
        }
        vrpn_SleepMsecs(1);
        vrpn_gettimeofday(&now, NULL);
    } while (vrpn_TimevalDiff(now, start).tv_sec < 2);

    // Timeout, report failure of logging by saying that there are empty log
    // file names.
    send_report_logging("", "", "", "");
}
コード例 #5
0
void vrpn_Imager_Stream_Buffer::handle_got_first_connection(void)
{
    // There should be no thread in existence when this call is made.
    // If there is, kill it and complain.
    if (d_logging_thread->running()) {
        struct timeval now;
        vrpn_gettimeofday(&now, NULL);
        send_text_message(
            "handle_got_first_connection: Thread running when it should not be",
            now, vrpn_TEXT_ERROR);
        d_logging_thread->kill();
        return;
    }

    // Reset the shared state before starting the thread running.
    d_shared_state.init();

    // Create a thread whose userdata points at the object that
    // created it.  Then call the start function on that thread and wait
    // for its vrpn_Imager_Remote to receive the info from the remote server
    // it has connected to.  We time out after a few seconds if we don't
    // get the response, leaving us with a presumably broken connection
    // to the server.
    if (!d_logging_thread->go()) {
        struct timeval now;
        vrpn_gettimeofday(&now, NULL);
        send_text_message(
            "handle_got_first_connection: Failed to start logging thread", now,
            vrpn_TEXT_ERROR);
        delete d_logging_thread;
        d_logging_thread = NULL;
        return;
    }
    struct timeval start, now;
    vrpn_gettimeofday(&start, NULL);
    do {
        const char *channelBuffer = NULL;
        if (d_shared_state.get_imager_description(
                d_nRows, d_nCols, d_nDepth, d_nChannels, &channelBuffer)) {
            int i;
            const char *bufptr = channelBuffer;
            for (i = 0; i < d_nChannels; i++) {
                d_channels[i].unbuffer(&bufptr);
            }
            delete[] const_cast<char *>(channelBuffer);
            return;
        }

        vrpn_SleepMsecs(1);
        vrpn_gettimeofday(&now, NULL);
    } while (vrpn_TimevalDiff(now, start).tv_sec < 3);

    // Timed out, so we won't be hearing from the server!
    vrpn_gettimeofday(&now, NULL);
    send_text_message("handle_got_first_connection: Didn't hear from server.",
                      now, vrpn_TEXT_WARNING);
}
コード例 #6
0
ファイル: nidaqClient.cpp プロジェクト: BlueBrain/vrpn
void printNidaq( void *pvTVZero, const vrpn_ANALOGCB cbInfo ) {
  cerr << vrpn_TimevalMsecs( vrpn_TimevalDiff(cbInfo.msg_time,
					      (*(struct timeval *)pvTVZero)))
    *0.001 << " Voltages: ";
  for (int i=0;i<cbInfo.num_channel;i++) {
    cerr << cbInfo.channel[i] << "\t";
  }
  cerr << endl;
}
コード例 #7
0
ファイル: vrpn_FileConnection.C プロジェクト: BlueBrain/vrpn
//virtual
timeval vrpn_File_Connection::get_length()
{
    timeval len = {0, 0};

    if( !d_earliest_user_time_valid || !d_highest_user_time_valid )
    {
        this->get_lowest_user_timestamp( );
        this->get_highest_user_timestamp( );
    }

    len = vrpn_TimevalDiff( d_highest_user_time, d_earliest_user_time );
    return len;
}
コード例 #8
0
ファイル: vrpn_FileConnection.C プロジェクト: BlueBrain/vrpn
void vrpn_File_Connection::FileTime_Accumulator::accumulate_to(
    const timeval & now_time )
{
    timeval & accum       = d_filetime_accum_since_last_playback;
    timeval & last_accum  = d_time_of_last_accum;
    
    accum  // updated elapsed filetime
        = vrpn_TimevalSum (         // summed with previous elapsed time
            accum,
            vrpn_TimevalScale(      // scaled by replay rate
                vrpn_TimevalDiff(   // elapsed wallclock time
                    now_time,
                    d_time_of_last_accum ),
                d_replay_rate));
    last_accum = now_time;          // wallclock time of this whole mess
}
コード例 #9
0
ファイル: vrpn_LUDL.C プロジェクト: vrpn/vrpn
bool vrpn_LUDL_USBMAC6000::move_axis_to_position(int axis, int position)
{
  if (!_device_handle) { return false; }
  if (!_axis_destination || !_axis_moving) { return false; }

  // If we're already at the place we're being asked to move to,
  // then we just go ahead and return.  Otherwise, the code below
  // that waits for us to start moving hangs.
  if (_axis_destination[axis-1] == position) {
	return true;
  }

  // Send the command to the device asking it to move.
  if (!send_usbmac_command(axis, LUDL_MOTOR_ACTION, LUDL_START_MOTOR_TARGET, position)) {
    REPORT_ERROR("vrpn_LUDL_USBMAC6000::move_axis_to_position(): Could not send command");
    return false;
  }

  // Wait until that axis starts to move.  If we don't do this, then
  // sometimes we hear back that there are no axes moving even though
  // we told them to.  Just waiting a while after we told them to move
  // does not help; there is still a report saying that they are not moving.
  // If the stage is at its limits or if we asked it to go where it already
  // is, then we'll wait forever here because it will not move.  So this
  // needs to time out and not set the axis to moving if we never see
  // it start to move.
  struct timeval start, now;
  vrpn_gettimeofday(&start, NULL);
  while (!ludl_axis_moving(axis)) {
	vrpn_gettimeofday(&now, NULL);
	struct timeval diff = vrpn_TimevalDiff(now, start);
	if (diff.tv_sec > 1) {
	  // Say that we moved there, but don't say that the axis is
	  // moving.
	  _axis_destination[axis-1] = position;
	  return true;
	}
  };

  // Indicate that we're expecting this axis to be moving and where we think it is
  // going, so that when the axis becomes no longer busy we know that we have gotten
  // there.

  _axis_destination[axis-1] = position;
  _axis_moving[axis-1] = true;
  return true;
}
コード例 #10
0
ファイル: argonot.cpp プロジェクト: CISMM/video
void myDisplayFunc(void)
{
  // Clear the window and prepare to draw in the back buffer
  glDrawBuffer(GL_BACK);
  glClearColor(0.0, 0.0, 0.0, 0.0);
  glClear(GL_COLOR_BUFFER_BIT);

  // Store the pixels from the image into the frame buffer
  // so that they cover the entire image (starting from lower-left
  // corner, which is at (-1,-1)).
  glRasterPos2f(-1, -1);
  glDrawPixels(g_ti->nCols(),g_ti->nRows(), GL_RGB, GL_UNSIGNED_BYTE, g_image);

  // Swap buffers so we can see it.
  glutSwapBuffers();

  // Capture timing information and print out how many frames per second
  // are being drawn.

  { static struct timeval last_print_time;
    struct timeval now;
    static bool first_time = true;
    static int frame_count = 0;

    if (first_time) {
      gettimeofday(&last_print_time, NULL);
      first_time = false;
    } else {
      frame_count++;
      gettimeofday(&now, NULL);
      double timesecs = 0.001 * vrpn_TimevalMsecs(vrpn_TimevalDiff(now, last_print_time));
      if (timesecs >= 5) {
	double frames_per_sec = frame_count / timesecs;
	frame_count = 0;
	printf("Displayed frames per second = %lg\n", frames_per_sec);
	last_print_time = now;
      }
    }
  }

  // Have no longer posted redisplay since the last display.
  g_already_posted = false;
}
コード例 #11
0
ファイル: time_test.cpp プロジェクト: Lab411-HUST/Github
int main(int argc, char *argv[]) {

    /* XXX Checking how well the two clocks track each other
    unsigned long    lsec, lusec;
    unsigned long    fsec, fusec;
    long    dsec, dusec;
    int	    i;
    for (i = 0; i < 10; i++) {
	get_time_using_GetLocalTime(lsec, lusec);
	get_time_using_ftime(fsec, fusec);
	dsec = lsec - fsec;
	dusec = lusec - fusec;

	printf("L: %u:%u, F: %u:%u, Difference: %u:%ld\n",
	    lsec, lusec, fsec, fusec, dsec, dusec);

	Sleep(1000);
    }
    */

    /* Checking the vrpn_gettimeofday() function for monotonicity and step size */
    struct timeval last_time, this_time;
    double skip;
    vrpn_gettimeofday(&last_time, NULL);
    printf("Should be no further output if things are working\n");
    while (true) {
      vrpn_gettimeofday(&this_time, NULL);
      skip = vrpn_TimevalMsecs(vrpn_TimevalDiff(this_time, last_time));
      if (skip > 50) {
	printf("Skipped forward %lg microseconds\n", skip);
      }
      if (skip < 0) {
	printf("** Backwards %lg microseconds\n", skip);
      }
      if (skip == 0) {
	printf("Twice the same time\n");
      }
      last_time = this_time;
    }

    return 0;
}
コード例 #12
0
// Stop the logging thread function, cleanly if possible.  Returns true if
// the function stopped cleanly, false if it had to be killed.
bool vrpn_Imager_Stream_Buffer::stop_logging_thread(void)
{
    // Set the flag telling the logging thread to stop.
    d_shared_state.time_to_exit(true);

    // Wait for up to three seconds for the logging thread to die a clean death.
    // If it does, return true.
    struct timeval start, now;
    vrpn_gettimeofday(&start, NULL);
    do {
        if (!d_logging_thread->running()) {
            return true;
        }
        vrpn_SleepMsecs(1);
        vrpn_gettimeofday(&now, NULL);
    } while (vrpn_TimevalDiff(now, start).tv_sec < 3);

    d_logging_thread->kill();
    return false;
}
コード例 #13
0
ファイル: testimager_client.C プロジェクト: gtcoder2012/vrpn
//----------------------------------------------------------------------------
// Capture timing information and print out how many frames per second
// are being drawn.  Remove this function if you don't want timing info.
void print_timing_info(void)
{   static struct timeval last_print_time;
    struct timeval now;
    static bool first_time = true;
    static int frame_count = 0;

    if (first_time) {
        vrpn_gettimeofday(&last_print_time, NULL);
        first_time = false;
    } else {
        frame_count++;
        vrpn_gettimeofday(&now, NULL);
        double timesecs = 0.001 * vrpn_TimevalMsecs(vrpn_TimevalDiff(now, last_print_time));
        if (timesecs >= 5) {
            double frames_per_sec = frame_count / timesecs;
            frame_count = 0;
            printf("Displayed frames per second = %lg\n", frames_per_sec);
            last_print_time = now;
        }
    }
}
コード例 #14
0
ファイル: vrpn_ping.C プロジェクト: alexmarie93/vrpn_master
int VRPN_CALLBACK my_pong_handler(void *userdata, vrpn_HANDLERPARAM p)
{
    static int count = 0;
    static double min = 10000, max = 0, sum = 0;

    // See how long it has been between the ping request and
    // the pong response.
    struct timeval diff;
    vrpn_gettimeofday(&now, NULL);
    double msecs;
    diff = vrpn_TimevalDiff(now, last_ping);
    msecs = vrpn_TimevalMsecs(vrpn_TimevalNormalize(diff));

    // Keep statistics on the length (min, max, average)
    if (msecs < min) {
        min = msecs;
    };
    if (msecs > max) {
        max = msecs;
    };
    sum += msecs;

    // Print and reset the statistics every once in a while
    if (++count == 500) {
        printf("Min = %4.2g, Max = %4.2g, Mean = %4.2g\n", min, max,
               sum / count);
        count = 0;
        min = 10000;
        max = sum = 0.0;
    }

    // Send a new ping request, and record when we sent it.
    // REMEMBER not to call mainloop() within the handler.
    vrpn_gettimeofday(&last_ping, NULL);
    c->pack_message(0, last_ping, ping_message_id, sender, NULL,
                    vrpn_CONNECTION_RELIABLE);
    c->send_pending_reports();

    return 0;
}
コード例 #15
0
ファイル: vrpn_ping.C プロジェクト: Progga1/vrpn
int main(int argc, char* argv[])
{
	// Declare a new text receiver (all objects are text senders)
	// and find out what connection it is using.
	r = new vrpn_Text_Receiver (argv[1]);
	c = r->connectionPtr();

	// Declare the same sender and message types that the BaseClass
	// will use for doing the ping/pong, so we can use the same
	// mechanism.  Register a handler for the pong message, so we
	// can deal with them.
	ping_message_id = c->register_message_type("Server, are you there?");
	pong_message_id = c->register_message_type("Server is here!");
	sender = c->register_sender(vrpn_copy_service_name(argv[1]));
	c->register_handler(pong_message_id, my_pong_handler, NULL, sender);

	// Let the user kill the program "nicely."
	signal(SIGINT, handle_cntl_c);

	// Wait a few seconds (spinning while we do) in order to allow the
	// real pong message to clear from the system.
	struct timeval	then, diff;
	vrpn_gettimeofday(&then, NULL);
	do {
		vrpn_gettimeofday(&now, NULL);
		r->mainloop();
		diff = vrpn_TimevalDiff(now, then);
	} while ( vrpn_TimevalMsecs(vrpn_TimevalNormalize(diff)) < 2000);

	// Send a new ping request to the server, and start counting how
	// long it takes to respond.
	vrpn_gettimeofday(&last_ping, NULL);
	c->pack_message(0, last_ping, ping_message_id, sender, NULL,
		vrpn_CONNECTION_RELIABLE);

	// Loop forever.
	while (1) {
		r->mainloop();
	}
}
コード例 #16
0
void VRPNTrackerInstance::getAccReport(vrpn_TRACKERACCCB* cpy, timeval* ts, int in_sensor)
{
	TrackerAccList::iterator it;
	vrpn_TRACKERACCCB* last = NULL;
	for ( it = acc.begin(); it != acc.end(); it++ )
	{
		vrpn_TRACKERACCCB* curr = *it;
		if (curr->sensor == in_sensor)
		{
			if (ts == NULL)
			{
				*cpy = *curr;
				return;
			}
			else if (vrpn_TimevalGreater(*ts,curr->msg_time))
			{
				if (last)
				{
					double val = vrpn_TimevalMsecs(vrpn_TimevalDiff(*ts, curr->msg_time))/vrpn_TimevalMsecs(vrpn_TimevalDiff(last->msg_time, curr->msg_time));
					cpy->acc[0] = curr->acc[0] + val*(last->acc[0] - curr->acc[0]);
					cpy->acc[1] = curr->acc[1] + val*(last->acc[1] - curr->acc[1]);
					cpy->acc[2] = curr->acc[2] + val*(last->acc[2] - curr->acc[2]);
					q_slerp(cpy->acc_quat, curr->acc_quat, last->acc_quat, val); 
					cpy->acc_quat_dt = curr->acc_quat_dt + val*(last->acc_quat_dt - curr->acc_quat_dt);
					return;
				}
				else
				{
					*cpy = *curr;
					return;
				}
			}
			else
				last = curr;
		}
	}
}
コード例 #17
0
ファイル: argonot.cpp プロジェクト: CISMM/video
// New pixels coming; fill them into the image and tell Glut to redraw.
void  VRPN_CALLBACK handle_region_change(void *, const vrpn_IMAGERREGIONCB info)
{
    int r,c;	//< Row, Column
    int ir;	//< Inverted Row
    int offset,RegionOffset;
    const vrpn_Imager_Region* region=info.region;
    double  intensity_gain;
    double  intensity_offset;

    // Compute gain and offset so that pixels at or below the low-clip value are
    // set to zero and pixels at or above the high-clip value are set to 255.
    // First, make sure we've got legal settings.
    if (g_clip_high <= g_clip_low) {
      g_clip_high = g_clip_low + 1;
    }
    intensity_gain = 255.0/(g_clip_high-g_clip_low);
    intensity_offset = g_clip_low;

    int infoLineSize=region->d_cMax-region->d_cMin+1;
    vrpn_int32 nCols=g_ti->nCols();
    vrpn_int32 nRows=g_ti->nRows();
    unsigned char uns_pix;
    static  bool  subtracting_background = false;
    static  bool  averaging_background = false;
    static  bool  clearing_background = false;

    if (!g_ready_for_region) { return; }

    // If this is the first region and we have been asked to subtract the
    // background, then start copying regions into the background buffer,
    // with a negative sign for each pixel (so as to subtract this image
    // from future images).
    // If this is the first region and we used to be grabbing into the
    // background buffer, stop copying into the background buffer.
    if (info.region->d_rMin == 0) {
      if (g_subtract_background) {
	g_subtract_background = 0;
	subtracting_background = true;
	printf("Subtracting background\n");
      } else if (subtracting_background) {
	subtracting_background = false;
      }
    }
    if (subtracting_background) {
      for (r = info.region->d_rMin,RegionOffset=(r-region->d_rMin)*infoLineSize; r <= region->d_rMax; r++,RegionOffset+=infoLineSize) {
	ir = nRows - r - 1;
	int row_offset = ir*nCols;
	for (c = info.region->d_cMin; c <= info.region->d_cMax; c++) {
	  vrpn_uint8 val;
	  info.region->read_unscaled_pixel(c,r,val);
	  g_background[c + row_offset] = -val;
	}
      }
    }

    // If this is the first region and we have been asked to average the
    // background, then start copying pixel offsets from 128 into the background buffer.
    // This will make it so that if the same image appears in the future, it will have
    // an intensity value of 128 everywhere; other values will appear as image.
    // If this is the first region and we used to be averaging into the
    // background buffer, stop averaging into the background buffer.
    if (info.region->d_rMin == 0) {
      if (g_average_background) {
	g_average_background = 0;
	averaging_background = true;
	printf("Averaging background\n");
      } else if (averaging_background) {
	averaging_background = false;
      }
    }
    if (averaging_background) {
      for (r = info.region->d_rMin,RegionOffset=(r-region->d_rMin)*infoLineSize; r <= region->d_rMax; r++,RegionOffset+=infoLineSize) {
	ir = nRows - r - 1;
	int row_offset = ir*nCols;
	for (c = info.region->d_cMin; c <= info.region->d_cMax; c++) {
	  vrpn_uint8 val;
	  info.region->read_unscaled_pixel(c,r,val);
	  g_background[c + row_offset] = 128 - val;
	}
      }
    }

    // If we have been asked to clear the background, then start do it.
    if (g_clear_background) {
      printf("Clearing background\n");
      g_clear_background = 0;
      for (r = 0,RegionOffset=r*nCols; r < nRows; r++,RegionOffset+=nCols) {
	ir = nRows - r - 1;
	int row_offset = ir*nCols;
	for (c = 0; c < nCols; c++) {
	  g_background[c + row_offset] = 0;
	}
      }
    }

    // Copy pixels into the image buffer.  Flip the image over in
    // Y so that the image coordinates display correctly in OpenGL.
    for (r = info.region->d_rMin,RegionOffset=(r-region->d_rMin)*infoLineSize; r <= region->d_rMax; r++,RegionOffset+=infoLineSize) {
      ir = g_ti->nRows() - r - 1;
      int row_offset = ir*nCols;
      for (c = info.region->d_cMin; c <= region->d_cMax; c++) {
	int per_pixel_adjustment = g_background[c + row_offset];
	int temp;
	vrpn_uint8 val;
	info.region->read_unscaled_pixel(c,r,val);

	offset = 3 * (c + row_offset);
	temp = (int)(( (val + per_pixel_adjustment) - intensity_offset) * intensity_gain);
	if (temp < 0) { temp = 0; }
	if (temp > 255) { temp = 255; }
	uns_pix = (unsigned char)temp;
		  
	g_image[0 + offset] = uns_pix;
	g_image[1 + offset] = uns_pix;
	g_image[2 + offset] = uns_pix;
      }
    }

    // Capture timing information and print out how many frames per second
    // are being received.

    { static struct timeval last_print_time;
      struct timeval now;
      static bool first_time = true;
      static int frame_count = 0;

      if (first_time) {
	gettimeofday(&last_print_time, NULL);
	first_time = false;
      } else {
	static	unsigned  last_r = 10000;
	if (last_r > info.region->d_rMin) {
	  frame_count++;
	}
	last_r = info.region->d_rMin;
	gettimeofday(&now, NULL);
	double timesecs = 0.001 * vrpn_TimevalMsecs(vrpn_TimevalDiff(now, last_print_time));
	if (timesecs >= 5) {
	  double frames_per_sec = frame_count / timesecs;
	  frame_count = 0;
	  printf("Received frames per second = %lg\n", frames_per_sec);
	  last_print_time = now;
	}
      }
    }

    // Tell Glut it is time to draw.  Make sure that we don't post the redisplay
    // operation more than once by checking to make sure that it has been handled
    // since the last time we posted it.  If we don't do this check, it gums
    // up the works with tons of redisplay requests and the program won't
    // even handle windows events.
    if (!g_already_posted) {
      glutPostRedisplay();
      g_already_posted = true;
    }
}
コード例 #18
0
bool vrpn_Imager_Stream_Buffer::make_new_logging_connection(
    const char *local_in_logfile_name, const char *local_out_logfile_name,
    const char *remote_in_logfile_name, const char *remote_out_logfile_name)
{
    // Open a new connection to do logging on before deleting the old one so
    // that we keep at least one connection open to the server at all time.
    // This will prevent it from doing its "dropped last connection" things
    // which will include resetting the imager server.
    vrpn_Connection *new_log_connection = open_new_log_connection(
        local_in_logfile_name, local_out_logfile_name, remote_in_logfile_name,
        remote_out_logfile_name);
    if (new_log_connection == NULL) {
        fprintf(stderr, "vrpn_Imager_Stream_Buffer::make_new_logging_"
                        "connection(): Cannot open connection\n");
        return false;
    }

    // Unhook the callbacks from the existing logging connection so that
    // we don't end up with two callbacks for each message.
    if (!teardown_handlers_for_logging_connection(d_log_connection)) {
        fprintf(stderr, "vrpn_Imager_Stream_Buffer::make_new_logging_"
                        "connection(): Cannot teardown connection\n");
        return false;
    }

    // Hook the callbacks up to the new connection so that we will get reports
    // from the server.
    if (!setup_handlers_for_logging_connection(new_log_connection)) {
        fprintf(stderr, "vrpn_Imager_Stream_Buffer::make_new_logging_"
                        "connection(): Cannot setup connection\n");
        return false;
    }

    // Mainloop the new connection object until it becomes connected or we
    // time out.  If we time out, then put things back on the old connection
    // and tell the thread it is time to self-destruct.  The way we check
    // for connected cannot be just that the connection's connected() method
    // returns true (because our end can be marked connected before the other
    // end decides it has complete the connection.  Rather, we check to see
    // that we've got a new description report from the server -- indicating
    // that it has seen the new report.  This also lets us know that the old
    // log file will have accumulated all images up to the new report, so we
    // can shut it off without losing any images in the switch to the new
    // log file (there may be duplicates, but not losses).
    struct timeval start, now;
    vrpn_gettimeofday(&start, NULL);
    now = start;
    d_ready_to_drop_old_connection = false;
    while (!d_ready_to_drop_old_connection &&
           (vrpn_TimevalDiff(now, start).tv_sec < 3)) {
        new_log_connection->mainloop(); // Enable connection set-up to occur
        new_log_connection->save_log_so_far();
        d_log_connection->mainloop(); // Eat up (and log) any incoming messages
        d_log_connection->save_log_so_far();
        vrpn_gettimeofday(&now, NULL);
        vrpn_SleepMsecs(1);
    };
    if (!d_ready_to_drop_old_connection) {
        fprintf(stderr, "vrpn_Imager_Stream_Buffer::make_new_logging_"
                        "connection(): Could not connect new logging "
                        "connection\n");
        teardown_handlers_for_logging_connection(new_log_connection);
        setup_handlers_for_logging_connection(d_log_connection);
        new_log_connection->removeReference();
        d_shared_state.time_to_exit(true);
        return false;
    }

    // Delete the old connection object by reducing its reference count.
    d_log_connection->removeReference();

    // Set up to use the new connection
    d_log_connection = new_log_connection;
    return true;
}
コード例 #19
0
ファイル: vrpn_Shared.C プロジェクト: lpberg/vrpn
bool vrpn_test_threads_and_semaphores(void)
{
  //------------------------------------------------------------
  // Make a semaphore to test in single-threaded mode.  First run its count all the way
  // down to zero, then bring it back to the full complement and then bring it down
  // again.  Check that all of the semaphores are available and also that there are no
  // more than expected available.
  const unsigned sem_count = 5;
  vrpn_Semaphore s(sem_count);
  unsigned i;
  for (i = 0; i < sem_count; i++) {
    if (s.condP() != 1) {
      fprintf(stderr, "vrpn_test_threads_and_semaphores(): Semaphore ran out of counts\n");
      return false;
    }
  }
  if (s.condP() != 0) {
    fprintf(stderr, "vrpn_test_threads_and_semaphores(): Semaphore had too many counts\n");
    return false;
  }
  for (i = 0; i < sem_count; i++) {
    if (s.v() != 0) {
      fprintf(stderr, "vrpn_test_threads_and_semaphores(): Could not release Semaphore\n");
      return false;
    }
  }
  for (i = 0; i < sem_count; i++) {
    if (s.condP() != 1) {
      fprintf(stderr, "vrpn_test_threads_and_semaphores(): Semaphore ran out of counts, round 2\n");
      return false;
    }
  }
  if (s.condP() != 0) {
    fprintf(stderr, "vrpn_test_threads_and_semaphores(): Semaphore had too many counts, round 2\n");
    return false;
  }

  //------------------------------------------------------------
  // Get a semaphore and use it to construct a thread data structure and then
  // a thread.  Use that thread to test whether threading is enabled (if not, then
  // this completes our testing) and to find out how many processors there are.
  vrpn_ThreadData	td;
  td.pvUD = NULL;
  vrpn_Thread	t(vrpn_test_thread_body, td);

  // If threading is not enabled, then we're done.
  if (!t.available()) {
    return true;
  }

  // Find out how many processors we have.
  unsigned num_procs = t.number_of_processors();
  if (num_procs == 0) {
    fprintf(stderr, "vrpn_test_threads_and_semaphores(): vrpn_Thread::number_of_processors() returned zero\n");
    return false;
  }

  //------------------------------------------------------------
  // Now make sure that we can actually run a thread.  Do this by
  // creating a semaphore with one entry and calling p() on it.
  // Then make sure we can't p() it again and then run a thread
  // that will call v() on it when it runs.
  vrpn_Semaphore	sem;
  if (sem.p() != 1) {
    fprintf(stderr, "vrpn_test_threads_and_semaphores(): thread-test Semaphore had no count\n");
    return false;
  }
  if (sem.condP() != 0) {
    fprintf(stderr, "vrpn_test_threads_and_semaphores(): thread-test Semaphore had too many counts\n");
    return false;
  }
  t.userData(&sem);
  if (!t.go()) {
    fprintf(stderr, "vrpn_test_threads_and_semaphores(): Could not start thread\n");
    return false;
  }
  struct timeval start;
  struct timeval now;
  vrpn_gettimeofday(&start, NULL);
  while (true) {
    if (sem.condP() == 1) {
      // The thread must have run; we got the semaphore!
      break;
    }

    // Time out after three seconds if we haven't had the thread run to reset
    // the semaphore.
    vrpn_gettimeofday(&now, NULL);
    struct timeval diff = vrpn_TimevalDiff( now, start );
    if (diff.tv_sec >= 3) {
      fprintf(stderr, "vrpn_test_threads_and_semaphores(): Thread didn't run\n");
      return false;
    }

    vrpn_SleepMsecs(1);
  }

  return true;
}
コード例 #20
0
/** The first time we are called, start the filter graph running in continuous
    mode and grab the first image that comes out.  Later times, grab each new
    image as it comes.  The "mode" parameter tells what mode we are in:
      Mode 0 = Run()
      Mode 1 = Pause()
    */
bool  directx_camera_server::read_one_frame(unsigned minX, unsigned maxX,
			      unsigned minY, unsigned maxY,
			      unsigned exposure_millisecs)
{
  HRESULT hr;

  if (!_status) { return false; };

  // If we have not yet started the media graph running, set up the callback
  // handler for the sample grabber filter so that we will hear about each frame
  // as it comes in.  Then set the filter graph running.
  if (!_started_graph) {
    // Set the grabber do not do one-shot mode because that would cause
    // it to stop the filter graph after a single frame is captured.
    _pGrabber->SetOneShot(FALSE);

    // Set the grabber to not do buffering mode, because we've not implemented
    // the handler for buffered callbacks.
    _pGrabber->SetBufferSamples(FALSE);

    // Run the graph and wait until it captures a frame into its buffer
    switch (_mode) {
      case 0: // Case 0 = run
	hr = _pMediaControl->Run();
	break;
      case 1: // Case 1 = paused
	hr = _pMediaControl->Pause();
	break;
      default:
	fprintf(stderr, "directx_camera_server::read_one_frame(): Unknown mode (%d)\n", _mode);
	_status = false;
	return false;
    }
    if ( (hr != S_OK) && (hr != S_FALSE) ){
      fprintf(stderr,"directx_camera_server::read_one_frame(): Can't run filter graph\n");
      _status = false;
      return false;
    }

    _started_graph = true;
  }

  //XXX Should the app set the timeout period?

  // Wait until there is a sample ready in the callback handler.  If there is,
  // copy it into our buffer and then tell it we are done processing the sample.
  // If it takes too long, time out.
  const int TIMEOUT_MSECS = 250;
  BYTE	*imageLocation;
  if (!_pCallback->imageReady) {
    for (int i = 0; i < TIMEOUT_MSECS; i++) {
      vrpn_SleepMsecs(1);
      if (_pCallback->imageReady) { break; }	// Break out of the wait if its ready
    }
    if (!_pCallback->imageReady) {
#ifdef DEBUG
      fprintf(stderr,"directx_camera_server::read_one_frame(): Timeout when reading image\n");
#endif
      return false;
    }
  }

  // If we are in mode 2, then we pause the graph after we captured one image.
  if (_mode == 2) {
    _pMediaControl->Pause();
    _mode = 1;
  }

  if (_pCallback->imageReady) {
    _pCallback->imageReady = false;
    if (FAILED(_pCallback->imageSample->GetPointer(&imageLocation))) {
      fprintf(stderr,"directx_camera_server::read_one_frame(): Can't get buffer\n");
      _status = false;
      _pCallback->imageDone = true;
      return false;
    }
    // Step through each line of the video and copy it into the buffer.  We
    // do one line at a time here because there can be padding at the end of
    // each line on some video formats.
    for (DWORD iRow = 0; iRow < _num_rows; iRow++) {
      memcpy(_buffer+_num_columns*3*iRow, imageLocation+_stride*iRow, _num_columns*3);
    }
    _pCallback->imageDone = true;
  }

  // Capture timing information and print out how many frames per second
  // are being received.

  { static struct timeval last_print_time;
    struct timeval now;
    static bool first_time = true;
    static int frame_count = 0;

    if (first_time) {
      gettimeofday(&last_print_time, NULL);
      first_time = false;
    } else {
      static	unsigned  last_r = 10000;
      frame_count++;
      gettimeofday(&now, NULL);
      double timesecs = 0.001 * vrpn_TimevalMsecs(vrpn_TimevalDiff(now, last_print_time));
      if (timesecs >= 5) {
	double frames_per_sec = frame_count / timesecs;
	frame_count = 0;
	printf("Received frames per second = %lg\n", frames_per_sec);
	last_print_time = now;
      }
    }
  }

  return true;
}
コード例 #21
0
ファイル: vrpn_Nidaq.C プロジェクト: ASPePeX/vrpn
//#define VERBOSE
vrpn_Nidaq::vrpn_Nidaq(char *pchName, vrpn_Connection *pConnection,
		       double dSamplingRate, double dInterChannelRate, 
		       short sDeviceNumber, int cChannels, 
		       short rgsChan[], short rgsGain[],
		       short sInputMode, short sPolarity, int fNice)
  : vrpn_Analog(pchName, pConnection), pDAQ(NULL), fNice(fNice), fStop(0),
  fNewData(0), dSampleTime(0) {

	if (cChannels>vrpn_CHANNEL_MAX) {
      cerr << "vrpn_Nidaq::vrpn_Nidaq: vrpn_Analog allows only " 
	   << vrpn_CHANNEL_MAX << " channels (" << cChannels 
	   << " requested). DAQ not initialized." << endl;
      return;
    }
    
	if (fNice) {
		MMRESULT res = timeBeginPeriod(1);
		if (res != TIMERR_NOERROR) {
			cerr << "NidaqServer: timeBeginPeriod() failed!!!\n";
		}                       
	}

    num_channel = cChannels;
    daqSample.resize(cChannels);
    
    // calc the approximate offset between the clock the daq class uses
    // and the clock vrpn uses.
    
    // call each to get them in cache
    struct timeval tv, tvUpTime;
    double dTime1, dTime2;
    vrpn_gettimeofday(&tv, NULL);
    vrpn_gettimeofday(&tv, NULL);
    UpTime::Now();
    UpTime::Now();
    
    // Now calc offset
    dTime1=UpTime::Now();
    vrpn_gettimeofday(&tv, NULL);
    dTime2=UpTime::Now();
    
    dTime1 = (dTime1 + dTime2)/2.0;
    tvUpTime = vrpn_MsecsTimeval(dTime1*1000.0);
    tvOffset = vrpn_TimevalDiff(tv, tvUpTime);

    // later, add this to tvUpTime to get into vrpn_gettimeofday time frame

    // alloc the daq (which also starts it up)
    // args are:
    // rate for set of channels
    // rate between channels
    // which device num the daq has been set up as
    // the number of channels to read off of it
    // the array of channels to read
    // the gain to apply to each
    // differential or single ended
    // bipolar (+/-) or just unipolar (+)
    pDAQ = new DAQ(dSamplingRate, dInterChannelRate, sDeviceNumber, cChannels,
		   rgsChan, rgsGain, sInputMode, sPolarity );
	
	// start the DAQ-only thread
	InitializeCriticalSection(&csAnalogBuffer);
	hDAQThread = (HANDLE) _beginthreadex(NULL, 0, runThread, this, 0, NULL);
}
コード例 #22
0
ファイル: edt_server.cpp プロジェクト: CISMM/video
bool  edt_server::read_image_to_memory(unsigned minX, unsigned maxX,
							unsigned minY, unsigned maxY,
							double exposure_time_millisecs)
{
  u_char *image_p;
  struct timeval now;

  if (!_status) { return false; }

  //---------------------------------------------------------------------
  // XXX Set the exposure time.

  //---------------------------------------------------------------------
  // Set the size of the window to include all pixels if there were not
  // any binning.  This means adding all but 1 of the binning back at
  // the end to cover the pixels that are within that bin.
  _minX = minX * _binning;
  _maxX = maxX * _binning + (_binning-1);
  _minY = minY * _binning;
  _maxY = maxY * _binning + (_binning-1);

  //---------------------------------------------------------------------
  // If the maxes are greater than the mins, set them to the size of
  // the image.
  if (_maxX < _minX) {
    _minX = 0; _maxX = _num_columns - 1;
  }
  if (_maxY < _minY) {
    _minY = 0; _maxY = _num_rows - 1;
  }

  //---------------------------------------------------------------------
  // Clip collection range to the size of the sensor on the camera.
  if (_minX < 0) { _minX = 0; };
  if (_minY < 0) { _minY = 0; };
  if (_maxX >= _num_columns) { _maxX = _num_columns - 1; };
  if (_maxY >= _num_rows) { _maxY = _num_rows - 1; };

  // If the in-memory buffers have all been filled up, assume
  // that we have missed some unknown number of images.  Save this
  // so that it can be reported if we're sending VRPN messages.
  // Actually, we report this if we drop down to 1 buffer because
  // this was where the version running on one of our computers
  // hit the floor at and started losing frames.
  unsigned outstanding = edt_get_todo((PdvDev*)d_pdv_p) -
                         edt_done_count((PdvDev*)d_pdv_p);
  if ( outstanding <= 1 ) {
    d_missed_some_images = true;
  }

  // Once a second, tell how many buffers have been filled and are waiting
  // for us to process.
  static struct timeval last = { 0, 0 };
  vrpn_gettimeofday(&now, NULL);
  if (now.tv_sec > last.tv_sec) {
    last = now;
    //printf("XXX EDT: %d outstanding buffers\n", outstanding );
  }

  /*
   * get the image and immediately start the next one. Processing
   * can then occur in parallel with the next acquisition
   */
  unsigned int sec_usec[2];
  image_p = pdv_wait_image_timed((PdvDev*)d_pdv_p, sec_usec);
  if (image_p == NULL) {
    fprintf(stderr,"edt_server::read_image_to_memory(): Failed to read image\n");
    pdv_close((PdvDev*)d_pdv_p);
    d_pdv_p = NULL;
    _status = false;
    return false;
  }
  pdv_start_image((PdvDev*)d_pdv_p);
  /*XXX
  if (edt_reg_read((PdvDev*)d_pdv_p, PDV_STAT) & PDV_OVERRUN) {
    XXX_overrun_check; // http://www.edt.com/api/simple__take_8c-source.html
  }
  */

  // XXX We might think that we wanted to use the wait_images functions
  // rather than wait_image, but we do seem to fill up all of the buffers
  // when we can't send data out fast enough.  But it may be worth trying
  // to use it and ask for one at a time, then only start more reads when
  // half of them have been read or something?  And do this using a commend
  // to start multiple reads?  Does it re-start every time we call start?
  // If so, what's the point of multiple buffers in the simple_take.c file?
  // Google search for pdv_wait_image_timed turned up pointers to their docs.

  //---------------------------------------------------------------------
  // Time handling: We let the EDT board tell us what time each image
  // was put into the DMA buffer.  We don't know how this compares to
  // the absolute PC clock time, so we record the offset from the first
  // time we got an image (clock reading and EDT reading), so that the
  // time will be reported relative to the computer's clock.
  // If we don't have a nonzero PC time, this is the first time through,
  // so get both initial times.
  struct timeval edt_now = { sec_usec[0], sec_usec[1] };
  if ( d_pc_time_first_image.tv_sec == 0 ) {
    vrpn_gettimeofday(&d_pc_time_first_image, NULL);
    d_edt_time_first_image = edt_now;
  }
  struct timeval time_offset = vrpn_TimevalDiff(d_pc_time_first_image, d_edt_time_first_image);
  d_timestamp = vrpn_TimevalSum( edt_now, time_offset );

  // XXX The EDT-board time handling seems to be broken, so for now we just
  // put on the current time for the frame.
  vrpn_gettimeofday(&now, NULL);
  d_timestamp = now;

  // Check for timeouts in image transer from the camera into a memory
  // buffer.  This does NOT tell us when we ask for more images than will
  // fit into buffers, but rather when there was a communication error or
  // the system bus was too slow to pull the image off the camera.  We'll
  // need to look at timestamps to determine whether we missed any images
  // due to not having room in the buffers we allocated.

  unsigned timeouts = pdv_timeouts((PdvDev*)d_pdv_p);
  if (timeouts > d_last_timeouts)
  {
      /*
       * pdv_timeout_cleanup helps recover gracefully after a timeout,
       * particularly if multiple buffers were prestarted
       */
      if (d_num_buffers > 1)
      {
	  int     pending = pdv_timeout_cleanup((PdvDev*)d_pdv_p);
	  pdv_start_images((PdvDev*)d_pdv_p, pending);
      }
      d_unreported_timeouts += (timeouts - d_last_timeouts);
      d_last_timeouts = timeouts;
  }

  // If we're supposed to swap every other line, do that here.
  // The image lines are swapped in place.
  if (d_swap_lines) {
    unsigned j;    // Indexes into the lines, skipping every other.

    for (j = 0; j < _num_rows; j += 2) {
      memcpy(d_swap_buffer, image_p + j*_num_columns, _num_columns);
      memcpy(image_p + (j+1)*_num_columns, image_p + j*_num_columns, _num_columns);
      memcpy(image_p + (j+1)*_num_columns, d_swap_buffer, _num_columns);
    }
  }

  // Point to the image in memory.
  d_buffer = image_p;

  return true;
}
コード例 #23
0
ファイル: test_analogfly.C プロジェクト: ASPePeX/vrpn
int main (int argc, char * argv [])
{
	if (argc != 1) {
		fprintf(stderr, "Usage: %s\n", argv[0]);
		exit(-1);
	}

	//---------------------------------------------------------------------
	// explicitly open the connection
	connection = vrpn_create_server_connection(CONNECTION_PORT);

	//---------------------------------------------------------------------
	// Open the analog server, using this connection.
	sana = new vrpn_Analog_Server(ANALOG_NAME, connection);
	sana->setNumChannels(1);
	sana->channels()[0] = 1.0;	//< Set the value to 1
	printf("Analog's name is %s.\n", ANALOG_NAME);
	create_and_link_analog_remote();

	//---------------------------------------------------------------------
	// Open the tracker remotes, using this connection.
	printf("Tracker 1's name is %s.\n", TRACKER1_NAME);
	printf("Tracker 2's name is %s.\n", TRACKER2_NAME);
	create_and_link_tracker_remotes();

	//---------------------------------------------------------------------
	// Create the differential and absolute trackers.  Both should send one
	// report every two seconds.  First, create the axis descriptor (used
	// for all axes).  Then pack this into the parameter descriptor and start
	// the relative tracker and absolute tracker.
	
	vrpn_TAF_axis	taf_axis1;  // Axis that returns values from the analog
	char	name[500];
	sprintf(name, "*%s", ANALOG_NAME);
	taf_axis1.name = name;
	taf_axis1.channel = 0;
	taf_axis1.offset = 0.0;
	taf_axis1.thresh = 0.0;
	taf_axis1.scale = 1.0;
	taf_axis1.power = 1.0;

	vrpn_TAF_axis	taf_axisNULL;	// Axis that doesn't return anything
	taf_axisNULL.name = NULL;
	taf_axisNULL.channel = 0;
	taf_axisNULL.offset = 0.0;
	taf_axisNULL.thresh = 0.0;
	taf_axisNULL.scale = 1.0;
	taf_axisNULL.power = 1.0;

	vrpn_Tracker_AnalogFlyParam p;
	p.reset_name = NULL;
	p.reset_which = 0;
	p.x = taf_axis1;
	p.y = taf_axis1;
	p.z = taf_axis1;
	p.sx = taf_axisNULL;	// Don't want any rotation!
	p.sy = taf_axisNULL;
	p.sz = taf_axisNULL;

	stkr1 = new vrpn_Tracker_AnalogFly(TRACKER1_NAME, connection, &p, 0.5, vrpn_false);
	stkr2 = new vrpn_Tracker_AnalogFly(TRACKER2_NAME, connection, &p, 0.5, vrpn_true);

	/* 
	 * main interactive loop
	 */

	printf("You should see tracker1 positions increasing by 2 per 2 seconds\n");
	printf("You should see tracker2 positions remaining at 1\n");

	struct timeval start, now, diff;
	vrpn_gettimeofday(&start, NULL);
	vrpn_gettimeofday(&now, NULL);
	diff = vrpn_TimevalDiff(now, start);
	while ( diff.tv_sec <= 10 ) {

	    // Make sure that we are getting analog values
	    {	static	struct	timeval	last_report;
		static	int	first = 1;
		struct timeval now;

		if (first) {
		    vrpn_gettimeofday(&last_report, NULL);
		    first = 0;
		}
		vrpn_gettimeofday(&now, NULL);
		if (now.tv_sec - last_report.tv_sec > 1) {
		    if (!getting_analog_values) {
			fprintf(stderr, "Error - not getting analog values!\n");
		    }
		    vrpn_gettimeofday(&last_report, NULL);
		    getting_analog_values = 0; // Make sure we get more next time
		}
	    }

	    // Let the servers, clients and connection do their things
	    sana->report(); sana->mainloop();
	    stkr1->mainloop();
	    stkr2->mainloop();
	    rana->mainloop();
	    rtkr1->mainloop();
	    rtkr2->mainloop();
	    connection->mainloop();

	    // Sleep for 1ms each iteration so we don't eat the CPU
	    vrpn_SleepMsecs(1);
	    vrpn_gettimeofday(&now, NULL);
	    diff = vrpn_TimevalDiff(now, start);
	}

	delete sana;
	delete rtkr1;
	delete rtkr2;
	delete stkr1;
	delete stkr2;
	delete connection;

	return 0;
}   /* main */