예제 #1
0
void *freenect_threadfunc(void *arg)
{
	int accelCount = 0;

	freenect_set_tilt_degs(f_dev,freenect_angle);
	freenect_set_led(f_dev,LED_RED);
	freenect_set_depth_callback(f_dev, depth_cb);
	freenect_set_video_callback(f_dev, rgb_cb);
	freenect_set_video_mode(f_dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, current_format));
	freenect_set_depth_mode(f_dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
	freenect_set_video_buffer(f_dev, rgb_back);

	freenect_start_depth(f_dev);
	freenect_start_video(f_dev);

	printf("'w' - tilt up, 's' - level, 'x' - tilt down, '0'-'6' - select LED mode, '+' & '-' - change IR intensity \n");
	printf("'f' - change video format, 'm' - mirror video, 'o' - rotate video with accelerometer \n");
	printf("'e' - auto exposure, 'b' - white balance, 'r' - raw color, 'n' - near mode (K4W only) \n");

	while (!die && freenect_process_events(f_ctx) >= 0) {
		//Throttle the text output
		if (accelCount++ >= 2000)
		{
			accelCount = 0;
			freenect_raw_tilt_state* state;
			freenect_update_tilt_state(f_dev);
			state = freenect_get_tilt_state(f_dev);
			double dx,dy,dz;
			freenect_get_mks_accel(state, &dx, &dy, &dz);
			printf("\r raw acceleration: %4d %4d %4d  mks acceleration: %4f %4f %4f", state->accelerometer_x, state->accelerometer_y, state->accelerometer_z, dx, dy, dz);
			fflush(stdout);
		}

		if (requested_format != current_format) {
			freenect_stop_video(f_dev);
			freenect_set_video_mode(f_dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, requested_format));
			freenect_start_video(f_dev);
			current_format = requested_format;
		}
	}

	printf("\nshutting down streams...\n");

	freenect_stop_depth(f_dev);
	freenect_stop_video(f_dev);

	freenect_close_device(f_dev);
	freenect_shutdown(f_ctx);

	printf("-- done!\n");
	return NULL;
}
예제 #2
0
void *freenect_threadfunc(void *arg)
{
	printf("Freenect thread\n");
	int accelCount = 0;

	//freenect_set_tilt_degs(f_dev,freenect_angle);
	//freenect_set_led(f_dev,LED_RED);
	freenect_set_depth_callback(f_dev, depth_cb);
	freenect_set_video_callback(f_dev, rgb_cb);
	freenect_set_video_mode(f_dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, current_format));
	freenect_set_depth_mode(f_dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
	freenect_set_video_buffer(f_dev, rgb_back);

	freenect_start_depth(f_dev);
	freenect_start_video(f_dev);

	printf("'w'-tilt up, 's'-level, 'x'-tilt down, '0'-'6'-select LED mode, 'f'-video format\n");

	while (!die && freenect_process_events(f_ctx) >= 0) {
		//Throttle the text output
		if (accelCount++ >= 2000)
		{
			accelCount = 0;
			//freenect_raw_tilt_state* state;
			//freenect_update_tilt_state(f_dev);
			//state = freenect_get_tilt_state(f_dev);
			double dx,dy,dz;
			//freenect_get_mks_accel(state, &dx, &dy, &dz);
			//printf("\r raw acceleration: %4d %4d %4d  mks acceleration: %4f %4f %4f", state->accelerometer_x, state->accelerometer_y, state->accelerometer_z, dx, dy, dz);
			fflush(stdout);
		}

		if (requested_format != current_format) {
			freenect_stop_video(f_dev);
			freenect_set_video_mode(f_dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, requested_format));
			freenect_start_video(f_dev);
			current_format = requested_format;
		}
	}

	printf("\nshutting down streams...\n");

	freenect_stop_depth(f_dev);
	freenect_stop_video(f_dev);

	freenect_close_device(f_dev);
	freenect_shutdown(f_ctx);

	printf("-- done!\n");
	return NULL;
}
//----------------------------------------------------------------------------------------------------------------------
void KinectInterface::setVideoMode(
                                                     int _mode
                                                    )
{
    freenect_video_format vm=FREENECT_VIDEO_RGB;
    switch(_mode)
    {
        case 0 : { vm=FREENECT_VIDEO_RGB; break;}
        case 1 : { vm=FREENECT_VIDEO_YUV_RGB; break;}
        case 2 : { vm=FREENECT_VIDEO_IR_8BIT; break;}
        /*
        /// had issues with these modes so sticking to the 3 that work
        case 1 : { vm=FREENECT_VIDEO_BAYER; break;}
        case 2 : { vm=FREENECT_VIDEO_IR_8BIT; break;}
        case 3 : { vm=FREENECT_VIDEO_IR_10BIT; break;}
        case 4 : { vm=FREENECT_VIDEO_IR_10BIT_PACKED; break;}
        case 5 : { vm=FREENECT_VIDEO_YUV_RGB; break;}
        case 6 : { vm=FREENECT_VIDEO_YUV_RAW; break;}
        */
        default : qDebug()<<"index out of bounds for video mode\n";
                            vm=FREENECT_VIDEO_RGB;
        break;
    }
    /// stop the video and set to new mode
    freenect_stop_video(m_dev);
    freenect_set_video_mode(m_dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB));
    //deprecated
//	freenect_set_video_format(m_dev, vm);
    freenect_start_video(m_dev);
}
예제 #4
0
//---------------------------------------------------------------------------
void ofxKinect::threadedFunction(){

	if(currentLed < 0) { 
        freenect_set_led(kinectDevice, (freenect_led_options)ofxKinect::LED_GREEN); 
    }
	
	freenect_frame_mode videoMode = freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, bIsVideoInfrared?FREENECT_VIDEO_IR_8BIT:FREENECT_VIDEO_RGB);
	freenect_set_video_mode(kinectDevice, videoMode);
	freenect_frame_mode depthMode = freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, bUseRegistration?FREENECT_DEPTH_REGISTERED:FREENECT_DEPTH_MM);
	freenect_set_depth_mode(kinectDevice, depthMode);

	ofLogVerbose("ofxKinect") << "device " << deviceId << " " << serial << " connection opened";

	freenect_start_depth(kinectDevice);
	if(bGrabVideo) {
		freenect_start_video(kinectDevice);
	}

	while(isThreadRunning() && freenect_process_events(kinectContext.getContext()) >= 0) {        
		if(bTiltNeedsApplying) {
			freenect_set_tilt_degs(kinectDevice, targetTiltAngleDeg);
			bTiltNeedsApplying = false;
		}
		
		if(bLedNeedsApplying) {
			if(currentLed == ofxKinect::LED_DEFAULT) {
				freenect_set_led(kinectDevice, (freenect_led_options)ofxKinect::LED_GREEN);
			}
			else {
				freenect_set_led(kinectDevice, (freenect_led_options)currentLed);
			}
			bLedNeedsApplying = false;
		}

		freenect_update_tilt_state(kinectDevice);
		freenect_raw_tilt_state * tilt = freenect_get_tilt_state(kinectDevice);
		currentTiltAngleDeg = freenect_get_tilt_degs(tilt);

		rawAccel.set(tilt->accelerometer_x, tilt->accelerometer_y, tilt->accelerometer_z);

		double dx,dy,dz;
		freenect_get_mks_accel(tilt, &dx, &dy, &dz);
		mksAccel.set(dx, dy, dz);
	}
    
	// finish up a tilt on exit
	if(bTiltNeedsApplying) {
		freenect_set_tilt_degs(kinectDevice, targetTiltAngleDeg);
		bTiltNeedsApplying = false;
	}
    
	freenect_stop_depth(kinectDevice);
	freenect_stop_video(kinectDevice);
	if(currentLed < 0) {
        freenect_set_led(kinectDevice, (freenect_led_options)ofxKinect::LED_RED);
    }
    
	kinectContext.close(*this);
	ofLogVerbose("ofxKinect") << "device " << deviceId << " connection closed";
}
예제 #5
0
파일: Device.c 프로젝트: jarney/snackbot
/*
 * Class:     org_ensor_robots_sensors_kinect_Device
 * Method:    nativeStartVideo
 * Signature: (I)V
 */
JNIEXPORT void JNICALL Java_org_ensor_robots_sensors_kinect_Device_nativeStartVideo
  (JNIEnv *aJNIEnv, jobject aThisObject, jint aDeviceId)
{
    pthread_mutex_lock(&mutex);
    freenect_set_video_mode(f_devices[aDeviceId].f_dev,
            freenect_find_video_mode(
                FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB
            )
    );
    f_devices[aDeviceId].videobuffersize = 640 * 480 * 3;
    f_devices[aDeviceId].f_video_buffer = malloc(f_devices[aDeviceId].videobuffersize);
    freenect_set_video_buffer(
            f_devices[aDeviceId].f_dev,
            f_devices[aDeviceId].f_video_buffer
    );

    f_devices[aDeviceId].videobuffervalue = 
        (*aJNIEnv)->NewBooleanArray(aJNIEnv, f_devices[aDeviceId].videobuffersize);
    
    f_devices[aDeviceId].videobuffervalue = (*aJNIEnv)->NewGlobalRef(aJNIEnv, 
                f_devices[aDeviceId].videobuffervalue
            );

    freenect_set_video_callback(f_devices[aDeviceId].f_dev, video_cb);
    
    pthread_mutex_unlock(&mutex);
    
    freenect_start_video(f_devices[aDeviceId].f_dev);

    
}
예제 #6
0
		MyFreenectDevice(freenect_context *_ctx, int _index):
			Freenect::FreenectDevice(_ctx, _index),
			depth(freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM,FREENECT_DEPTH_REGISTERED).bytes),
			m_buffer_video(freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM,FREENECT_VIDEO_RGB).bytes),
			m_new_rgb_frame(false), m_new_depth_frame(false)
			{
			}
예제 #7
0
/*!
 * \brief constructor: reserve memory and set initial flags.
 * \param [out] _ctx return usb context to communicate with kinect.
 * \param [in] _index selected kinect index.
 */
Apikinect::Apikinect(freenect_context *_ctx, int _index)
    : Freenect::FreenectDevice(_ctx, _index),
      m_buffer_video(freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB).bytes),
      m_buffer_depth(freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_REGISTERED).bytes / 2),
      m_new_rgb_frame(false), m_new_depth_frame(false)
{
    myself = const_cast<freenect_device*>(this->getDevice());//las vueltas que da la vida :o)
    setDepthFormat(FREENECT_DEPTH_REGISTERED);
}
예제 #8
0
 MyFreenectDevice(freenect_context *_ctx, int _index)
   : Freenect::FreenectDevice(_ctx, _index), m_buffer_depth(freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB).bytes),m_buffer_video(freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB).bytes), m_gamma(2048), m_new_rgb_frame(false), m_new_depth_frame(false)
 {
   for( unsigned int i = 0 ; i < 2048 ; i++) {
     float v = i/2048.0;
     v = std::pow(v, 3)* 6;
     m_gamma[i] = v*6*256;
   }
 }
		FreenectDevice(freenect_context *_ctx, int _index)
			: m_video_resolution(FREENECT_RESOLUTION_MEDIUM), m_depth_resolution(FREENECT_RESOLUTION_MEDIUM)
		{
			if(freenect_open_device(_ctx, &m_dev, _index) < 0) throw std::runtime_error("Cannot open Kinect");
			freenect_set_user(m_dev, this);
			freenect_set_video_mode(m_dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB));
			freenect_set_depth_mode(m_dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
			freenect_set_depth_callback(m_dev, freenect_depth_callback);
			freenect_set_video_callback(m_dev, freenect_video_callback);
		}
예제 #10
0
//---------------------------------------------------------------------------
void ofxKinect::threadedFunction(){

	freenect_set_led(kinectDevice, LED_GREEN);
	freenect_frame_mode videoMode = freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, bIsVideoInfrared?FREENECT_VIDEO_IR_8BIT:FREENECT_VIDEO_RGB);
	freenect_set_video_mode(kinectDevice, videoMode);
	freenect_frame_mode depthMode = freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, bUseRegistration?FREENECT_DEPTH_REGISTERED:FREENECT_DEPTH_MM);
	freenect_set_depth_mode(kinectDevice, depthMode);

	ofLog(OF_LOG_VERBOSE, "ofxKinect: Device %d %s connection opened", deviceId, serial.c_str());

	freenect_start_depth(kinectDevice);
	if(bGrabVideo) {
		freenect_start_video(kinectDevice);
	}

	// call platform specific processors (needed for Win)
	if(freenect_process_events(kinectContext.getContext()) != 0) {
		ofLog(OF_LOG_ERROR, "ofxKinect: Device %d freenect_process_events failed!", deviceId);
		return;
	}

	while(isThreadRunning()) {
		if(bTiltNeedsApplying) {
			freenect_set_tilt_degs(kinectDevice, targetTiltAngleDeg);
			bTiltNeedsApplying = false;
		}

		freenect_update_tilt_state(kinectDevice);
		freenect_raw_tilt_state * tilt = freenect_get_tilt_state(kinectDevice);

		currentTiltAngleDeg = freenect_get_tilt_degs(tilt);

		rawAccel.set(tilt->accelerometer_x, tilt->accelerometer_y, tilt->accelerometer_z);

		double dx,dy,dz;
		freenect_get_mks_accel(tilt, &dx, &dy, &dz);
		mksAccel.set(dx, dy, dz);

		// ... and $0.02 for the scheduler
		ofSleepMillis(10);
	}

	// finish up a tilt on exit
	if(bTiltNeedsApplying) {
		freenect_set_tilt_degs(kinectDevice, targetTiltAngleDeg);
		bTiltNeedsApplying = false;
	}

	freenect_stop_depth(kinectDevice);
	freenect_stop_video(kinectDevice);
	freenect_set_led(kinectDevice, LED_YELLOW);

	kinectContext.close(*this);
	ofLog(OF_LOG_VERBOSE, "ofxKinect: Device %d connection closed", deviceId);
}
예제 #11
0
// the freenect thread. 
void *freenect_threadfunc(void *arg) {
	
	int accelCount = 0;
	
	freenect_set_tilt_degs(f_dev,freenect_angle);
	freenect_set_led(f_dev,LED_RED);
	freenect_set_depth_callback(f_dev, depth_cb);
	freenect_set_video_callback(f_dev, rgb_cb);
	freenect_set_video_mode(f_dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, current_format));
	freenect_set_depth_mode(f_dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
	freenect_set_video_buffer(f_dev, rgb_back);
	
	freenect_start_depth(f_dev);
	freenect_start_video(f_dev);
	
	while (!die && freenect_process_events(f_ctx) >= 0) {
		if (accelCount++ >= 2000)
		{
			accelCount = 0;
			freenect_raw_tilt_state* state;
			freenect_update_tilt_state(f_dev);
			state = freenect_get_tilt_state(f_dev);
			double dx,dy,dz;
			freenect_get_mks_accel(state, &dx, &dy, &dz);
		}
		
		if (requested_format != current_format) {
			freenect_stop_video(f_dev);
			freenect_set_video_mode(f_dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, requested_format));
			freenect_start_video(f_dev);
			current_format = requested_format;
		}
	}
	
	freenect_stop_depth(f_dev);
	freenect_stop_video(f_dev);
	
	freenect_close_device(f_dev);
	freenect_shutdown(f_ctx);

	return NULL;
}
예제 #12
0
        FreenectDevice(freenect_context *_ctx, char *_serial)
			: m_video_resolution(FREENECT_RESOLUTION_MEDIUM), m_depth_resolution(FREENECT_RESOLUTION_MEDIUM)
		{
            if(freenect_open_device_by_camera_serial(_ctx, &m_dev, _serial)) throw std::runtime_error("Cannot open Kinect");
            ///if(freenect_open_device(_ctx, &m_dev, _index) < 0) throw std::runtime_error("Cannot open Kinect");
			freenect_set_user(m_dev, this);
			freenect_set_video_mode(m_dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB));
            freenect_set_depth_mode(m_dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, /*FREENECT_DEPTH_11BIT*/FREENECT_DEPTH_REGISTERED));//FREENECT_DEPTH_REGISTERED
			freenect_set_depth_callback(m_dev, freenect_depth_callback);
			freenect_set_video_callback(m_dev, freenect_video_callback);
		}
예제 #13
0
static int change_video_format(sync_kinect_t *kinect, freenect_video_format fmt)
{
	freenect_stop_video(kinect->dev);
	free_buffer_ring(&kinect->video);
	if (alloc_buffer_ring_video(fmt, &kinect->video))
		return -1;
	freenect_set_video_mode(kinect->dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, fmt));
	freenect_set_video_buffer(kinect->dev, kinect->video.bufs[2]);
	freenect_start_video(kinect->dev);
	return 0;
}
예제 #14
0
		void setVideoFormat(freenect_video_format requested_format, freenect_resolution requested_resolution = FREENECT_RESOLUTION_MEDIUM) {
			if (requested_format != m_video_format || requested_resolution != m_video_resolution) {
				freenect_stop_video(m_dev);
				freenect_frame_mode mode = freenect_find_video_mode(requested_resolution, requested_format);
				if (!mode.is_valid) throw std::runtime_error("Cannot set video format: invalid mode");
				if (freenect_set_video_mode(m_dev, mode) < 0) throw std::runtime_error("Cannot set video format");
				freenect_start_video(m_dev);
				m_video_format = requested_format;
				m_video_resolution = requested_resolution;
			}
		}
예제 #15
0
		int getVideoBufferSize(){
			switch(m_video_format) {
				case FREENECT_VIDEO_RGB:
				case FREENECT_VIDEO_BAYER:
				case FREENECT_VIDEO_IR_8BIT:
				case FREENECT_VIDEO_IR_10BIT:
				case FREENECT_VIDEO_IR_10BIT_PACKED:
				case FREENECT_VIDEO_YUV_RGB:
				case FREENECT_VIDEO_YUV_RAW:
					return freenect_find_video_mode(m_video_resolution, m_video_format).bytes;
				default:
					return 0;
			}
		}
예제 #16
0
파일: record.c 프로젝트: ABMNYZ/libfreenect
void init()
{
	freenect_context *ctx;
	freenect_device *dev;
	if (freenect_init(&ctx, 0)) {
		printf("Error: Cannot get context\n");
		return;
	}

	// fakenect doesn't support audio yet, so don't bother claiming the device
	freenect_select_subdevices(ctx, (freenect_device_flags)(FREENECT_DEVICE_MOTOR | FREENECT_DEVICE_CAMERA));

	if (freenect_open_device(ctx, &dev, 0)) {
		printf("Error: Cannot get device\n");
		return;
	}
	print_mode("Depth", freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
	print_mode("Video", freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB));
	freenect_set_depth_mode(dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
	freenect_start_depth(dev);
	freenect_set_video_mode(dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB));
	freenect_start_video(dev);
	if (use_ffmpeg) {
		init_ffmpeg_streams();
		freenect_set_depth_callback(dev, depth_cb_ffmpeg);
		freenect_set_video_callback(dev, rgb_cb_ffmpeg);
	} else {
		freenect_set_depth_callback(dev, depth_cb);
		freenect_set_video_callback(dev, rgb_cb);
	}
	while (running && freenect_process_events(ctx) >= 0)
		snapshot_accel(dev);
	freenect_stop_depth(dev);
	freenect_stop_video(dev);
	freenect_close_device(dev);
	freenect_shutdown(ctx);
}
예제 #17
0
void*  f_threadfunc( void *arg )
{
	freenect_set_tilt_degs( f_device, f_angle );
	freenect_set_led( f_device, LED_RED );
	freenect_set_depth_callback( f_device, depth_cb );
	freenect_set_video_callback(f_device, rgb_cb);
	if( (W == 640) && H ==( 480 ))
	{
		freenect_set_video_mode( f_device, freenect_find_video_mode( FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB ) );
		printf("Resolution:%d:%d\n",W,H);
		fflush(stdout);
	}
	else
	if(( W == 1280) &&( H == 1024) )
	{
		freenect_set_video_mode( f_device, freenect_find_video_mode( FREENECT_RESOLUTION_HIGH, FREENECT_VIDEO_RGB ) );
		printf("Resolution:%d:%d\n",W,H);
		fflush(stdout);
	}
	else
	{
		printf("Unknown resolution. Shutting down...\n");
		CloseAll();
	}
	freenect_set_depth_mode( f_device, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT) );
	freenect_start_video( f_device );
	freenect_start_depth( f_device );
	while ( !shutdown && freenect_process_events( f_context ) >= 0 )
	{
	
	}
	CloseAll();
	printf( "Done. Shutting down...\n" );
	fflush(stdout);
	return NULL;
}
예제 #18
0
//send video ARGB to client
void sendVideo(){
	int n;
	uint32_t ts,x, y, i, j;
	freenect_sync_get_video(&buf_rgb_temp, &ts, 0, FREENECT_VIDEO_RGB);
	uint8_t *rgb = (uint8_t*)buf_rgb_temp;
	freenect_frame_mode video_mode = freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB);

	//MIRROR RGB DATA AND ADD ALPHA
	for(x = 0; x < video_mode.width; x++){
		for(y = 0; y < video_mode.height; y++){
			i = x + (y  * video_mode.width);
			if(!_video_mirrored)
				j = i;
			else
				j = (video_mode.width - x - 1) + (y  * video_mode.width);
			if(_video_compression != 0) {
				buf_rgb[3 * i + 2] = rgb[3 * j + 2];
				buf_rgb[3 * i + 1] = rgb[3 * j + 1];
				buf_rgb[3 * i + 0] = rgb[3 * j + 0];
			} else {
				buf_rgb[4 * i + 0] = rgb[3 * j + 2];
				buf_rgb[4 * i + 1] = rgb[3 * j + 1];
				buf_rgb[4 * i + 2] = rgb[3 * j + 0];
				buf_rgb[4 * i + 3] = 0x00;	
			}
		}
	}
	if(_video_compression != 0) {
		unsigned char *compressed_buff = (unsigned char *)malloc(AS3_BITMAPDATA_LEN);
		unsigned long len = 0;
		RGB_2_JPEG(buf_rgb, &compressed_buff, &len, _video_compression);
		n = freenect_network_sendMessage(0, 2, compressed_buff, (int)len);
		free(compressed_buff);
	} else {
		n = freenect_network_sendMessage(0, 2, buf_rgb, AS3_BITMAPDATA_LEN);
	}
	if ( n < 0)
	{
		printf("Error sending Video\n");
		client_connected = 0;
	}
}
예제 #19
0
int KinectFreenect::start() {
    if (freenect_init(&f_ctx, NULL) < 0) {
        printf("freenect_init() failed\n");
        return 1;
    }

    int nr_devices = freenect_num_devices(f_ctx);

    int user_device_number = 0;
    if (freenect_open_device(f_ctx, &f_dev, user_device_number) < 0) {
        printf("Could not open device.\n");
        return 1;
    }

    freenect_set_led(f_dev, LED_RED);
    freenect_set_depth_callback(f_dev, KinectFreenect::depth_cb);
    freenect_set_video_callback(f_dev, KinectFreenect::rgb_cb);
    freenect_set_video_mode(f_dev, freenect_find_video_mode(current_resolution, current_format));
    freenect_set_depth_mode(f_dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));

    freenect_start_depth(f_dev);
    freenect_start_video(f_dev);
    
    int res;
    res = pthread_create(&fnkt_thread, NULL, freenect_threadfunc, NULL);
    if (res) {
        printf("pthread_create failed\n");
        return 1;
    }

    int status = 0;
    while (!die && status >= 0) {
        char k = cvWaitKey(5);
        if( k == 27 ) {
            die = 1;
            break;
        }
    }


    return 0;
}
예제 #20
0
파일: sensor.c 프로젝트: samfoo/sentry
void *capture_loop(void *arg) {
    freenect_set_depth_callback(device, depth_captured);
    freenect_set_depth_mode(device, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_MM));
    freenect_start_depth(device);

    freenect_set_video_callback(device, rgb_captured);
    freenect_set_video_mode(device, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB));
    freenect_set_video_buffer(device, kinect_rgb_buffer);
    freenect_start_video(device);

    while (freenect_process_events(context) >= 0) {
        if (die) break;
    }

    printf("shutting down streams...\n");
    freenect_stop_depth(device);
    freenect_close_device(device);
    freenect_shutdown(context);
    printf("done!\n");

    return NULL;
}
예제 #21
0
void kinect_thread(void* user) { 

  Kinect* kinect = static_cast<Kinect*>(user);
  if(!kinect) {
    printf("Error: kinect thread didn't receive a reference to the Kinect instance.\n");
    ::exit(EXIT_FAILURE);
  }

  freenect_device* dev = kinect->device;
  
  //freenect_set_tilt_degs(dev, 15);
  freenect_set_led(dev, LED_RED);
  freenect_set_depth_callback(dev, kinect_depth_callback);
  freenect_set_video_callback(dev, kinect_video_callback);
  freenect_set_video_mode(dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB));
  freenect_set_depth_mode(dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
  freenect_set_video_buffer(dev, kinect->rgb_back);
  freenect_set_depth_buffer(dev, kinect->depth_back);
  freenect_start_depth(dev);
  freenect_start_video(dev);

  bool must_stop = false;

  while(freenect_process_events(kinect->ctx) >= 0) {
        
    uv_mutex_lock(&kinect->mutex);
      must_stop = kinect->must_stop;
    uv_mutex_unlock(&kinect->mutex);

    if(must_stop) {
      break;
    }

  }
  
  freenect_set_led(dev, LED_GREEN);
  freenect_stop_depth(dev);
  freenect_stop_video(dev);
}
예제 #22
0
static int alloc_buffer_ring_video(freenect_video_format fmt, buffer_ring_t *buf)
{
	int sz, i;
	switch (fmt) {
		case FREENECT_VIDEO_RGB:
		case FREENECT_VIDEO_BAYER:
		case FREENECT_VIDEO_IR_8BIT:
		case FREENECT_VIDEO_IR_10BIT:
		case FREENECT_VIDEO_IR_10BIT_PACKED:
			sz = freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, fmt).bytes;
			break;
		default:
			printf("Invalid video format %d\n", fmt);
			return -1;
	}
	for (i = 0; i < 3; ++i)
		buf->bufs[i] = malloc(sz);
	buf->timestamp = 0;
	buf->valid = 0;
	buf->fmt = fmt;
	return 0;
}
예제 #23
0
int freenect_process_events(freenect_context *ctx)
{
	/* This is where the magic happens. We read 1 update from the index
	   per call, so this needs to be called in a loop like usual.  If the
	   index line is a Depth/RGB image the provided callback is called.  If
	   the index line is accelerometer data, then it is used to update our
	   internal state.  If you query for the accelerometer data you get the
	   last sensor reading that we have.  The time delays are compensated as
	   best as we can to match those from the original data and current run
	   conditions (e.g., if it takes longer to run this code then we wait less).
	 */
	if (!index_fp)
		open_index();
	char type;
	double record_cur_time;
	unsigned int timestamp, data_size;
	char *data = NULL;
	if (parse_line(&type, &record_cur_time, &timestamp, &data_size, &data))
		return -1;
	// Sleep an amount that compensates for the original and current delays
	// playback_ is w.r.t. the current time
	// record_ is w.r.t. the original time period during the recording
	if (record_prev_time != 0. && playback_prev_time != 0.)
		sleep_highres((record_cur_time - record_prev_time) - (get_time() - playback_prev_time));
	record_prev_time = record_cur_time;
	switch (type) {
		case 'd':
			if (cur_depth_cb && depth_running) {
				void *cur_depth = skip_line(data);
				if (depth_buffer) {
					memcpy(depth_buffer, cur_depth, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT).bytes);
					cur_depth = depth_buffer;
				}
				cur_depth_cb(fake_dev, cur_depth, timestamp);
			}
			break;
		case 'r':
			if (cur_rgb_cb && rgb_running) {
				void *cur_rgb = skip_line(data);
				if (rgb_buffer) {
					memcpy(rgb_buffer, cur_rgb, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB).bytes);
					cur_rgb = rgb_buffer;
				}
				cur_rgb_cb(fake_dev, cur_rgb, timestamp);
			}
			break;
		case 'a':
			if (data_size == sizeof(state)) {
				memcpy(&state, data, sizeof(state));
			} else if (!already_warned) {
				already_warned = 1;
				printf("\n\nWarning: Accelerometer data has an unexpected"
				       " size [%u] instead of [%u].  The acceleration "
				       "and tilt data will be substituted for dummy "
				       "values.  This data was probably made with an "
				       "older version of record (the upstream interface "
				       "changed).\n\n",
				       data_size, (unsigned int)sizeof state);
			}
			break;
	}
	free(data);
	playback_prev_time = get_time();
	return 0;
}
//----------------------------------------------------------------------------------------------------------------------
bool KinectInterface::init()
{
    // first see if we can init the kinect
    if (freenect_init(&m_ctx, NULL) < 0)
    {
        qDebug()<<"freenect_init() failed\n";
        exit(EXIT_FAILURE);
    }
    /// set loggin level make this programmable at some stage
    freenect_set_log_level(m_ctx, FREENECT_LOG_DEBUG);
    /// see how many devices we have
    int nr_devices = freenect_num_devices (m_ctx);
    qDebug()<<"Number of devices found: "<<nr_devices<<"\n";

    if(nr_devices < 1)
    {
        //delete s_instance;
        //s_instance = 0;
        return false;
    }
    /// now allocate the buffers so we can fill them
    m_userDeviceNumber = 0;
    // grab the buffer size and store for later use
    m_resolutionRGBBytes=freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM,FREENECT_VIDEO_RGB).bytes;
    m_bufferDepth=cvCreateMat(480,640,CV_8UC3);

    //m_bufferVideo.resize(m_resolutionRGBBytes);

    m_bufferVideo = cvCreateMat(480,640,CV_8UC3);

//    m_nextBuffer = cvCreateMat(480,640,CV_8UC1);
//    m_prevBuffer = cvCreateMat(480,640,CV_8UC1);
//    m_diffBuffer = cvCreateMat(480,640,CV_8UC1);

    m_resolutionDepthBytes=freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM,FREENECT_DEPTH_11BIT).bytes;
    //m_bufferDepthRaw.resize(m_resolutionDepthBytes);
    m_bufferDepthRaw16=cvCreateMat(480,640,CV_8UC1);

    m_bufferDepthRaw=cvCreateMat(480,640,CV_8UC1);



   // m_originalFrameDepth=NULL;



    m_gamma.resize(2048);
    /// open the device at present hard coded to device 0 as I only
    /// have 1 kinect
    /// \todo make this support multiple devices at some stage
    if (freenect_open_device(m_ctx, &m_dev, m_userDeviceNumber) < 0)
    {
        qDebug()<<"Could not open device\n";
        exit(EXIT_FAILURE);
    }


    /// build the gamma table used for the depth to rgb conversion
    /// taken from the demo programs
//    for (int i=0; i<2048; ++i)
//    {
//        float v = i/2048.0;
//        v = std::pow(v, 3)* 6;
//        m_gamma[i] = v*6*256;
//    }


    // from opencv imaging imformation wiki page http://openkinect.org/wiki/Imaging_Information
    const float k1 = 1.1863;
    const float k2 = 2842.5;
    const float k3 = 0.1236;
    const float offset = 0.037;
    float depth = 0;
    for (size_t i=0; i<2048; i++)
    {
        depth = k3 * tanf(i/k2 + k1) - offset;
        m_gamma[i] = depth;
    }


    /// init our flags
    m_newRgbFrame=false;
    m_newDepthFrame=false;
    m_deviceActive=true;

    m_threshValue = 100;


    // set our video formats to RGB by default
    /// @todo make this more flexible at some stage
    freenect_set_video_mode(m_dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB));
    freenect_set_depth_mode(m_dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
    // deprecated
    //freenect_set_video_format(m_dev, FREENECT_VIDEO_RGB);
    //freenect_set_depth_format(m_dev, FREENECT_DEPTH_11BIT);
    /// hook in the callbacks
    freenect_set_depth_callback(m_dev, depthCallback);
    freenect_set_video_callback(m_dev, videoCallback);
    // start the video and depth sub systems
    startVideo();
    startDepth();
    // set the thread to be active and start
    m_process = new QKinectProcessEvents(m_ctx);
    m_process->setActive();
    m_process->start();

    m_depthLower = 0.02;
    m_depthHigher = 1.02; // has to be just above the table (in meteres)

    //m_selectedBoxCoords = NULL;

    m_selectedBoxCoords = cv::Rect(0,0,0,0);

    m_toggleTracking = false;
    m_setBounds = false;

    return true;
}
예제 #25
0
void jit_freenect_grab_open(t_jit_freenect_grab *x,  t_symbol *s, long argc, t_atom *argv)
{
	int ndevices, devices_left, dev_ndx;
	t_jit_freenect_grab *y;
	freenect_device *dev;
	
	postNesa("opening device...\n");//TODO: remove
	
	if(x->device){
		error("A device is already open.");
		return;
	}
	x->is_open = FALSE;
	if(!f_ctx){
		
		postNesa("!f_ctx is null, opening a new device\n");//TODO: remove
		
		if (jit_freenect_restart_thread(x)!=MAX_ERR_NONE) {
			
		//if (pthread_create(&capture_thread, NULL, capture_threadfunc, NULL)) {
			error("Failed to create capture thread.");
			return;
		}
		int bailout=0;
		while((!f_ctx)&&(++bailout<1000)){
			//systhread_sleep(1);
			sleep(0);
			//post("deadlocking in the sun %i",bailout);//TODO: remove
		}
		if (!f_ctx)
		{
			// TODO: replace with conditionall
			error("Failed to init freenect after %i retries.\n",bailout);
			return;
		}
	}
	
	ndevices = freenect_num_devices(f_ctx);
	
	if(!ndevices){
		error("Could not find any connected Kinect device. Are you sure the power cord is plugged-in?");
		return;
	}
	
	devices_left = ndevices;
	dev = f_ctx->first;
	while(dev){
		dev = dev->next;
		devices_left--;
	}
	
	if(!devices_left){
		error("All Kinect devices are currently in use.");
		return;
	}
	
	if(!argc){
		x->index = 0;	
	}
	else{
		//Is the device already in use?
		x->index = jit_atom_getlong(argv);
		
		dev = f_ctx->first;
		while(dev){
			y = freenect_get_user(dev);
			if(y->index == x->index){
				error("Kinect device %d is already in use.", x->index);
				x->index = 0;
				return;
			}
			dev = dev->next;
		}
	}
	
	if(x->index > ndevices){
		error("Cannot open Kinect device %d, only %d are connected.", x->index, ndevices);
		x->index = 0;
		return;
	}
	
	//Find out which device to open
	dev_ndx = x->index;
	if(!dev_ndx){
		int found = 0;
		while(!found){
			found = 1;
			dev = f_ctx->first;
			while(dev){
				y = freenect_get_user(dev);
				if(y->index-1 == dev_ndx){
					found = 0;
					break;
				}
				dev = dev->next;
			}
			dev_ndx++;
		}
		x->index = dev_ndx;
	}
		
	if (freenect_open_device(f_ctx, &(x->device), dev_ndx-1) < 0) {
		error("Could not open Kinect device %d", dev_ndx);
		x->index = 0;
		x->device = NULL;
	}
		else {
			postNesa("device open");//TODO: remove
		}

	//freenect_set_depth_buffer(x->device, x->depth_back);
	//freenect_set_video_buffer(x->device, x->rgb_back);
	
	freenect_set_depth_callback(x->device, depth_callback);
	freenect_set_video_callback(x->device, rgb_callback);
	if(x->format.a_w.w_sym == s_ir){
		freenect_set_video_mode(x->device, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_IR_8BIT));
	}
	else{
		freenect_set_video_mode(x->device, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB));
	}
	
	//TODO: add FREENECT_DEPTH_REGISTERED mode
	//FREENECT_DEPTH_REGISTERED   = 4, /**< processed depth data in mm, aligned to 640x480 RGB */
	//FREENECT_DEPTH_11BIT
	if (x->aligndepth==1)
	{
	postNesa("Depth is aligned to color");
		freenect_set_depth_mode(x->device, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_REGISTERED));
	}
	else 
	{
		freenect_set_depth_mode(x->device, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
	}
		//freenect_set_video_buffer(x->device, rgb_back);
	
	//Store a pointer to this object in the freenect device struct (for use in callbacks)
	freenect_set_user(x->device, x);  
	
	freenect_set_led(x->device,LED_RED);
	
	//freenect_set_tilt_degs(x->device,x->tilt);
	
	freenect_start_depth(x->device);
	freenect_start_video(x->device);
	
	x->is_open = TRUE;
	open_device_count++;
	freenect_active=TRUE;
}
예제 #26
0
파일: qKinect.cpp 프로젝트: getov/trunk
void qKinect::doStartGrabbing()
{
	assert(m_app);
	if (!m_app)
		return;

	f_ctx=0;
    f_dev=0;
	s_grabIndex=0;

	if (m_kDlg)
		delete m_kDlg;
	m_kDlg=0;

	s_max_depth_count = 0;
	if (s_depth_data)
		delete[] s_depth_data;
	s_depth_count = 0;
	s_depth_data = 0;
	s_wDepth = s_hDepth = 0;

	if (s_last_rgb_data)
		delete[] s_last_rgb_data;
	s_last_rgb_data = 0;
	s_rgb_count = 0;
	s_wRgb = s_hRgb = 0;

	if (freenect_init(&f_ctx, NULL) < 0)
	{
		m_app->dispToConsole("[qKinect] Failed to initialize kinect driver!",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
		return;
	}

	freenect_set_log_level(f_ctx, FREENECT_LOG_DEBUG);

	int nr_devices = freenect_num_devices(f_ctx);
	m_app->dispToConsole(qPrintable(QString("[qKinect] Number of devices found: %1").arg(nr_devices)));

	if (nr_devices < 1)
		return;

	if (freenect_open_device(f_ctx, &f_dev, 0) < 0)
	{
		m_app->dispToConsole("[qKinect] Failed to initialize kinect device!",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
		return;
	}

	//test: try to init high resolution mode
	//freenect_frame_mode upDepthMode = freenect_find_depth_mode(FREENECT_RESOLUTION_HIGH, FREENECT_DEPTH_11BIT);
	//int success = freenect_set_depth_mode(f_dev,upDepthMode);

	/*** Depth information ***/
	freenect_frame_mode depthMode = freenect_get_current_depth_mode(f_dev);
	if (!depthMode.depth_format == FREENECT_DEPTH_11BIT)
	{
		depthMode = freenect_find_depth_mode(depthMode.resolution, FREENECT_DEPTH_11BIT);
		if (freenect_set_depth_mode(f_dev,depthMode)<0)
		{
			m_app->dispToConsole("[qKinect] Failed to initialiaze depth mode!",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
			return;
		}
	}
	if (!getResolution(depthMode.resolution,s_wDepth,s_hDepth))
	{
		m_app->dispToConsole("[qKinect] Failed to read depth resolution!",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
		return;
	}
	m_app->dispToConsole(qPrintable(QString("[qKinect] Depth resolution: %1 x %2").arg(s_wDepth).arg(s_hDepth)));

	s_depth_data = new uint16_t[s_wDepth*s_hDepth];
	if (!s_depth_data)
	{
		m_app->dispToConsole("[qKinect] Not enough memory!",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
		return;
	}
	s_max_depth_count = 1;

	/*** RGB information ***/
	bool grabRGB = true;
	{
		freenect_frame_mode rgbMode = freenect_get_current_video_mode(f_dev);
		if (!rgbMode.video_format == FREENECT_VIDEO_RGB || depthMode.resolution != rgbMode.resolution)
		{
			rgbMode = freenect_find_video_mode(depthMode.resolution, FREENECT_VIDEO_RGB);
			if (freenect_set_video_mode(f_dev,rgbMode)<0)
			{
				m_app->dispToConsole("[qKinect] Can't find a video mode compatible with current depth mode?!");
				grabRGB = false;
			}
		}

		//still want to/can grab RGB info?
		if (grabRGB)
		{
			getResolution(rgbMode.resolution,s_wRgb,s_hRgb);

			s_last_rgb_data = new uint8_t[s_wRgb*s_hRgb*3];
			if (!s_last_rgb_data) //not enough memory for RGB
			{
				m_app->dispToConsole("[qKinect] Not enough memory to grab RGB info!");
				grabRGB = false;
			}
			else
			{
				m_app->dispToConsole(qPrintable(QString("[qKinect] RGB resolution: %1 x %2").arg(s_wRgb).arg(s_hRgb)));
			}
		}
	}

    int freenect_angle = 0;
	freenect_set_tilt_degs(f_dev,freenect_angle);
	freenect_set_led(f_dev,LED_RED);
	freenect_set_depth_callback(f_dev, depth_cb);
	freenect_set_video_callback(f_dev, rgb_cb);
	if (grabRGB)
		freenect_set_video_buffer(f_dev, s_last_rgb_data);

	freenect_start_depth(f_dev);
	if (s_last_rgb_data)
		freenect_start_video(f_dev);

	m_kDlg = new ccKinectDlg(m_app->getMainWindow());
	if (grabRGB)
		m_kDlg->addMode(QString("%1 x %2").arg(s_wDepth).arg(s_hDepth));
	else
		m_kDlg->grabRGBCheckBox->setChecked(false);
	m_kDlg->grabRGBCheckBox->setEnabled(grabRGB);
	m_kDlg->grabPushButton->setEnabled(true);

	connect(m_kDlg->grabPushButton, SIGNAL(clicked()), this, SLOT(grabCloud()));
	connect(m_kDlg, SIGNAL(finished(int)), this, SLOT(dialogClosed(int)));

	//m_kDlg->setModal(false);
	//m_kDlg->setWindowModality(Qt::NonModal);
	m_kDlg->show();

	if (!m_timer)
	{
		m_timer = new QTimer(this);
		connect(m_timer, SIGNAL(timeout()), this, SLOT(updateRTView()));
	}
	m_timer->start(0);
}
예제 #27
0
freenect_frame_mode freenect_get_video_mode(int mode_num)
{
    return freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB);
}
예제 #28
0
int kinect_init(Kinect* kt){
	pthread_mutex_init(&kt->mutex, NULL);

	kt->depth_mid   = (uint8_t*)malloc(640*480*3);
	kt->depth_front = (uint8_t*)malloc(640*480*3);
	kt->rgb_back    = (uint8_t*)malloc(640*480*3);
	kt->rgb_mid     = (uint8_t*)malloc(640*480*3);
	kt->rgb_front   = (uint8_t*)malloc(640*480*3);


	int i;
	for (i=0; i<2048; i++) {
		float v = i/2048.0;
		v = powf(v, 3) * 6;
		kt->t_gamma[i] = v*6*256;
	}



	if (freenect_init(&kt->ctx, NULL) < 0) {
		fprintf(stderr, "freenect_init() failed\n");
		return 1;
	}



	freenect_set_log_level(kt->ctx, FREENECT_LOG_DEBUG);
	freenect_select_subdevices(kt->ctx, (freenect_device_flags)(FREENECT_DEVICE_MOTOR | FREENECT_DEVICE_CAMERA));



	int nr_devices = freenect_num_devices (kt->ctx);
	fprintf(stderr, "Number of devices found: %d\n", nr_devices);



	int user_device_number = 0;

	if (nr_devices < 1) {
		fprintf(stderr, "No devices detected\n");
		freenect_shutdown(kt->ctx);
		return 1;
	}



	if (freenect_open_device(kt->ctx, &kt->dev, user_device_number) < 0) {
		fprintf(stderr, "Could not open device\n");
		freenect_shutdown(kt->ctx);
		return 1;
	}


	int freenect_angle = 0;
	freenect_set_tilt_degs(kt->dev,freenect_angle);
	freenect_set_led(kt->dev,LED_RED);
	freenect_set_depth_callback(kt->dev, depth_cb);
	freenect_set_video_callback(kt->dev, rgb_cb);
	freenect_set_video_mode(kt->dev, freenect_find_video_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_VIDEO_RGB));
	freenect_set_depth_mode(kt->dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
	freenect_set_video_buffer(kt->dev, kt->rgb_back);

	freenect_start_depth(kt->dev);
	freenect_start_video(kt->dev);


	return 0;

}
예제 #29
0
  /**
   * Reallocate the video buffer if the video format or resolution changes
   */
  void allocateBufferVideo(
      ImageBuffer& buffer,
      const freenect_video_format& format,
      const freenect_resolution& resolution,
      const freenect_registration& registration) {

    // Obtain a lock on the buffer. This is mostly for debugging, as allocate
    // buffer should only be called when the buffer is not being used by the
    // freenect thread
    boost::lock_guard<boost::mutex> buffer_lock(buffer.mutex);

    // Deallocate the buffer incase an exception happens (the buffer should no
    // longer be valid)
    buffer.image_buffer.reset();

    switch (format) {
      case FREENECT_VIDEO_RGB:
      case FREENECT_VIDEO_BAYER:
      case FREENECT_VIDEO_YUV_RGB:
      case FREENECT_VIDEO_IR_8BIT:
      case FREENECT_VIDEO_IR_10BIT:
      case FREENECT_VIDEO_IR_10BIT_PACKED:
        switch (resolution) {
          case FREENECT_RESOLUTION_HIGH:
          case FREENECT_RESOLUTION_MEDIUM:
            buffer.metadata = 
              freenect_find_video_mode(resolution, format);
            if (!buffer.metadata.is_valid) {
              throw std::runtime_error("libfreenect: Invalid video fmt, res: " + 
                  boost::lexical_cast<std::string>(format) + "," +
                  boost::lexical_cast<std::string>(resolution));
            }
            break;
          default:
            throw std::runtime_error("libfreenect: Invalid video resolution: " +
                boost::lexical_cast<std::string>(resolution));
        }
        break;
      default:
        throw std::runtime_error("libfreenect: Invalid video format: " +
            boost::lexical_cast<std::string>(format));
    }

    // All is good, reallocate the buffer and calculate other pieces of info
    buffer.image_buffer.reset(new unsigned char[buffer.metadata.bytes]);
    switch(format) {
      case FREENECT_VIDEO_RGB:
      case FREENECT_VIDEO_BAYER:
      case FREENECT_VIDEO_YUV_RGB:
        buffer.focal_length = getRGBFocalLength(buffer.metadata.width);
        break;
      case FREENECT_VIDEO_IR_8BIT:
      case FREENECT_VIDEO_IR_10BIT:
      case FREENECT_VIDEO_IR_10BIT_PACKED:
        buffer.focal_length = getDepthFocalLength(registration, 
            buffer.metadata.width);
        break;
      default:
        throw std::runtime_error("libfreenect: shouldn't reach here");
    }
    buffer.is_registered = false;
  }
예제 #30
0
/*
 * Get a video frame mode.
 */
freenect_frame_mode* find_video_mode_freenect(uint32_t res,
                                              uint32_t fmt){
  freenect_frame_mode* mode = malloc(sizeof (*mode));
  *mode = freenect_find_video_mode(res,fmt);
  return mode;
};