예제 #1
0
//Main: we start here
int main(int argc, char **argv)
{
	printf("as3kinect server %s\n", _current_version);
	//waiting for client led status
	freenect_sync_set_led((freenect_led_options) 4, 0);
	//Listening to C-c
  	if (signal (SIGINT, clean_exit) == SIG_IGN)
    	signal (SIGINT, SIG_IGN);
	//Listening to C-z
	#ifndef _WIN32
  	if (signal (SIGTSTP, clean_exit) == SIG_IGN)
    	signal (SIGTSTP, SIG_IGN);
	#endif
	//Alloc memory for video and depth frames
	buf_depth_temp = malloc(FREENECT_DEPTH_11BIT);
	buf_rgb_temp = malloc(FREENECT_VIDEO_RGB);
	
	//Initialize network socket
	if ( freenect_network_init(server_connected) < 0 )
		die = 1;
	
	//Sleep main process until exit
	while(!die)
		sleep(2);

	//making a clean exit
	free(buf_depth_temp);
	free(buf_rgb_temp);
	//device off led status
	freenect_sync_set_led((freenect_led_options) 0, 0);
	freenect_sync_stop();
	printf("\n[as3-server] cleanup done!\n");
	return 0;
}
예제 #2
0
KinectCloud::KinectCloud()
{
    //servoAngle = 0;
    width = 640;
    height = 480;
    freenect_sync_stop();
    //SetServoAngle(0);
    //state = 0;
}
예제 #3
0
파일: kinc.cpp 프로젝트: mfdeakin/camera3d
int main(int argc, char **argv)
{
    QApplication app(argc, argv);
    kinwnd *wnd = new kinwnd();
    wnd->show();
    app.setActiveWindow(wnd);
    int ret = app.exec();
    freenect_sync_stop();
    return ret;
}
예제 #4
0
void keyPressed(unsigned char key, int x, int y)
{
    if (key == 27) {
        freenect_sync_stop();
        glutDestroyWindow(window);
        exit(0);
    }
    if (key == 'w')
        zoom *= 1.1f;
    if (key == 's')
        zoom /= 1.1f;
    if (key == 'c')
        color = !color;
}
예제 #5
0
 int main()
 {   
     IplImage *image = cvCreateImageHeader(cvSize(640,480), 8, 3);
     while (cvWaitKey(10) < 0) 
     {
       char *data;
       unsigned int timestamp;
       freenect_sync_get_video((void**)(&data), &timestamp, 0, FREENECT_VIDEO_RGB);
       cvSetData(image, data, 640*3);
       cvCvtColor(image, image, CV_RGB2BGR);
       cvShowImage("RGB", image);
     }
     freenect_sync_stop();       
     cvFree(&image);
     return EXIT_SUCCESS;
 }
예제 #6
0
int main (int argc, char *argv[])
{
	CvHMM *models;
	char *win = "hand";
	int num, count=0, curr=1; 
	ptseq seq;
	
	parse_args(argc,argv);
	seq = ptseq_init();
	
	for (;;) {
		IplImage *depth, *tmp, *body, *hand;
		CvSeq *cnt;
		CvPoint cent;
		int z, p, k; 
		
		depth = freenect_sync_get_depth_cv(0);
		body = body_detection(depth);
		hand = hand_detection(body, &z);
		
		if (!get_hand_contour_basic(hand, &cnt, &cent))
			continue;
		
		if ((p = basic_posture_classification(cnt)) == -1)
			continue;
		
		if (cvhmm_get_gesture_sequence(p, cent, &seq)) {
			ptseq_draw(seq, 0);
			if ((k = cvWaitKey(0)) == 's') {
				save_sequence(outfile, seq, N);
				break;
			}
			seq = ptseq_reset(seq);
		}
		
		hand = draw_depth_hand(cnt, p);
		cvShowImage("hand", hand);

		if ((k = cvWaitKey(T)) == 'q')
			break;
	}

	freenect_sync_stop();
	cvDestroyAllWindows();

	return 0;
}
예제 #7
0
 void keyboardPressEvent(unsigned char key, int x, int y)
 {
     if(key == 27)
         freenect_sync_stop();
     if(key == '0')
         mDrawFlags[0] ^= true;
     if(key == '1')
         mDrawFlags[1] ^= true;
     if(key == '2')
         mDrawFlags[2] ^= true;
     if(key == 't')
         mFfusion->toggleTracking();
     if(key == 'u')
         mFfusion->toggleUpdate();
     
     DemoBase::keyboardPressEvent(key, x, y);
 }
예제 #8
0
int
kinect_shutdown() {
  freenect_sync_stop();
  return 1;
}
예제 #9
0
bool CameraNUI::onStop() {
	freenect_sync_stop();
	return true;
}
// 3.9 (fun_kstop) - freenect_sync_stop_cv
void freenect_sync_stop_cv (void)
{
	freenect_sync_stop();
}
예제 #11
0
int main (int argc, char *argv[])
{
	IplImage *rgb, *depth, *tmp, *body, *hand;
	CvMat **tr, *mean, *cov;
	CvFileStorage *fs;
	int count=0, p=0, warmup=1;

	parse_args(argc, argv);

	rgb = cvCreateImage(cvSize(W,H), 8, 3);
	tr = (CvMat**)malloc(sizeof(CvMat*)*num);
	init_training_data(tr,num);
	mean = cvCreateMat(1, FD_NUM, CV_64FC1);
	cov  = cvCreateMat(FD_NUM, FD_NUM, CV_64FC1);

	fs = cvOpenFileStorage(outfile, NULL, CV_STORAGE_WRITE, NULL);
	assert(fs);

	for (;;) {
		int z, k, c;
		CvMat *fd;
		CvSeq *cnt;
		
		tmp = freenect_sync_get_rgb_cv(0);
		cvCvtColor(tmp, rgb, CV_BGR2RGB);
		depth = freenect_sync_get_depth_cv(0);
		body = body_detection(depth);
		hand = hand_detection(body, &z);

		if (!get_hand_contour_advanced(hand, rgb, z, &cnt, NULL))
		 	continue; 

		if (warmup) {
			draw_contour(cnt);
			if (k = cvWaitKey(T) == 'g') {
				warmup = 0;
				cvDestroyAllWindows();
			}
			continue;
		}

		fd = get_fourier_descriptors(cnt);
		add_training_data(tr[count], fd);

		if (count == 0)
			printf("---> training hand pose %d\n", p);

		if (++count == num) {
			int c;

			cvCalcCovarMatrix((void*)tr, count, cov, mean, CV_COVAR_NORMAL);
			cvInvert(cov, cov, CV_LU);
			save_posture_model(fs, mean, cov);
			p++;
			count = 0;

			printf("save and quit:s  exit:q  next:any \n");
			
			if ((c = cvWaitKey(0)) == 's') {
				break;
			} else if (c == 'q') {
				break;
			} else {
				continue;
			}
		}

		draw_contour(cnt);
		cvWaitKey(T);
	}

	cvWriteInt(fs, "total", p);

	freenect_sync_stop();

	free_training_data(tr,num);
	cvReleaseFileStorage(&fs);
	cvReleaseMat(&mean);
	cvReleaseMat(&cov);
	cvReleaseImage(&rgb);

	return 0;
}
예제 #12
0
void* freenect_thread(void* v) {
  freenect_thread_params* params = (freenect_thread_params*) v;
  int retval = 0;

  sensor_msgs::CameraInfo rgb_info;
  sensor_msgs::Image rgb_image;
  if (params->video_mode.video_format != FREENECT_VIDEO_DUMMY) {
    rgb_image.header.frame_id=params->frame_id+"_rgb";
    rgb_image.is_bigendian=1;
    rgb_image.height=params->video_mode.height;
    rgb_image.width=params->video_mode.width;
    rgb_image.data.resize(params->video_mode.bytes);
    rgb_image.step=params->video_mode.bytes/params->video_mode.height;
    switch(params->video_mode.video_format){
    case FREENECT_VIDEO_RGB: rgb_image.encoding = "rgb8"; break;
    case FREENECT_VIDEO_BAYER: rgb_image.encoding = "bayer_gbrg8"; break;
    case FREENECT_VIDEO_IR_8BIT: rgb_image.encoding = "mono8"; break;
    case FREENECT_VIDEO_IR_10BIT: rgb_image.encoding = "mono16"; break;
    case FREENECT_VIDEO_IR_10BIT_PACKED: rgb_image.encoding = "mono10"; break;
    case FREENECT_VIDEO_YUV_RGB: rgb_image.encoding = "yuv_rgb"; break;
    case FREENECT_VIDEO_YUV_RAW: rgb_image.encoding = "yuv_raw"; break;
    }


    float hfov=M_PI/180.0*58.5;
    float vfov=M_PI/180.0*46.6;

    rgb_info.header.frame_id=rgb_image.header.frame_id;
    rgb_info.width=rgb_image.width;
    rgb_info.height=rgb_image.height;
    rgb_info.K.fill(0);
    rgb_info.K[0]=rgb_info.width/(2*tan(hfov/2)); //fx
    rgb_info.K[4]=rgb_info.height/(2*tan(vfov/2));; //fy
    rgb_info.K[2]=rgb_info.width/2; //cx
    rgb_info.K[5]=rgb_info.height/2; //cy
    rgb_info.K[8]=1;
  }
  printf("rgb image encoding: %s\n", rgb_image.encoding.c_str());

 
  sensor_msgs::CameraInfo depth_info;
  sensor_msgs::Image depth_image;
  if (params->depth_mode.depth_format != FREENECT_DEPTH_DUMMY) {
    if (params->depth_mode.depth_format == FREENECT_DEPTH_REGISTERED) {
      depth_image.header.frame_id=params->frame_id+"_rgb";
    } else 
      depth_image.header.frame_id=params->frame_id+"_depth";
    depth_image.is_bigendian=1;
    depth_image.height=params->depth_mode.height;
    depth_image.width=params->depth_mode.width;
    depth_image.data.resize(params->depth_mode.bytes);
    depth_image.step=params->depth_mode.bytes/params->depth_mode.height;
    switch(params->depth_mode.depth_format){
    case FREENECT_DEPTH_11BIT: depth_image.encoding = "mono16"; break;
    case FREENECT_DEPTH_10BIT: depth_image.encoding = "mono16"; break;
    case FREENECT_DEPTH_11BIT_PACKED: depth_image.encoding  = "mono11"; break;
    case FREENECT_DEPTH_10BIT_PACKED: depth_image.encoding = "mono10"; break;
    case FREENECT_DEPTH_REGISTERED:  depth_image.encoding = "mono16"; break;
    case FREENECT_DEPTH_MM: depth_image.encoding = "mono16"; break;
    }
 

    float hfov=M_PI/180.0*58.5;
    float vfov=M_PI/180.0*46.6;

    depth_info.header.frame_id=depth_image.header.frame_id;
    depth_info.width=depth_image.width;
    depth_info.height=depth_image.height;
    depth_info.K.fill(0);
    depth_info.K[0]=depth_info.width/(2*tan(hfov/2)); //fx
    depth_info.K[4]=depth_info.height/(2*tan(vfov/2));; //fy
    depth_info.K[2]=depth_info.width/2; //cx
    depth_info.K[5]=depth_info.height/2; //cy
    depth_info.K[8]=1;
  }

  printf("depth image encoding: %s\n", depth_image.encoding.c_str());
  int count = 0;
  int num_frames_stat=64;
  struct timeval previous_time;
  gettimeofday(&previous_time, 0);
  while (params->run && ! retval) {
    void* video_buffer=0, *depth_buffer=0;
    uint32_t video_ts;
    uint32_t depth_ts;

    if (params->depth_mode.depth_format != FREENECT_DEPTH_DUMMY) {
      retval = freenect_sync_get_depth(&depth_buffer, 
				       &depth_ts, 
				       params->device_num, 
				       params->depth_mode.depth_format);
      if (retval < 0) {
	printf("error in getting depth: %d\n", retval);
	break;
      } else if (! (count % params->frame_skip)) {
	depth_image.header.stamp=ros::Time::now();
	depth_image.header.seq=count;
	memcpy(&depth_image.data[0], depth_buffer, depth_image.data.size());
	params->pub_depth.publish(depth_image);
	depth_info.header.seq = count;
	depth_info.header.stamp=depth_image.header.stamp;
	params->pub_camera_info_depth.publish(depth_info);
      }
    }
    if (params->video_mode.video_format != FREENECT_VIDEO_DUMMY) {
      retval = freenect_sync_get_video(&video_buffer, 
				       &video_ts, 
				       params->device_num, 
				       params->video_mode.video_format);
      if (retval < 0) {
	printf("error in getting rgb: %d\n", retval);
	break;
      } else if (! (count % params->frame_skip)){
	rgb_image.header.stamp=ros::Time::now();
	rgb_image.header.seq=count;
	memcpy(&rgb_image.data[0], video_buffer, rgb_image.data.size());
	params->pub_rgb.publish(rgb_image);
	rgb_info.header.seq = count;
	rgb_info.header.stamp=depth_image.header.stamp;
	params->pub_camera_info_rgb.publish(rgb_info);
      }
    }
    if (!(count%num_frames_stat)){
      struct timeval current_time, interval;
      gettimeofday(&current_time, 0);
      timersub(&current_time, &previous_time, &interval);
      previous_time = current_time;
      double fps = num_frames_stat/(interval.tv_sec +1e-6*interval.tv_usec);
      printf("running at %lf fps\n", fps);
      fflush(stdout);
     
    }
    count++;
  }
  freenect_sync_stop();
  
}
///functions
int main(int argc, char* argv[])
{
	//input parameters
	if(argc != 3){
		printf("usage: %s <first ip> <second ip>\n", argv[0]);
        return EXIT_FAILURE;
	}
	//set Kinect angles to 0° & set LED colour
	if(freenect_sync_set_tilt_degs(0, 0)){
        printf("Could not tilt device 0.\n");
        return EXIT_FAILURE;
	}
	if(freenect_sync_set_led(LED_GREEN, 0)){
        printf("Could not change LED of device 0.\n");
        return EXIT_FAILURE;
	}
	//set UDP socket
	struct sockaddr_in si_other, si_other2;
	int s, i, slen=sizeof(si_other);
	char buf[BUFLEN];
	if ((s=socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP))==-1){
		fprintf(stderr, "socket() failed\n");
		return 1;
	}
	//first IP
	memset((char *) &si_other, 0, sizeof(si_other));
	si_other.sin_family = AF_INET;
	si_other.sin_port = htons(PORT);
	if (inet_aton(argv[1], &si_other.sin_addr)==0) {
		fprintf(stderr, "inet_aton() failed\n");
		return 1;
	}
	//second IP
	memset((char *) &si_other2, 0, sizeof(si_other2));
	si_other2.sin_family = AF_INET;
	si_other2.sin_port = htons(PORT);
	if (inet_aton(argv[2], &si_other2.sin_addr)==0) {
		fprintf(stderr, "inet_aton() failed\n");
		return 1;
	}
	//set cameras
	TDepthCamera mainCam;
	createPrimaryCamera(&mainCam, 0);
	//get calibration values acquired by calibration program.
	FILE* pFile = NULL;
	pFile = fopen("calibrationValuesOne.cal", "r");
	if(pFile == NULL){
		puts("Could not get calibration data.");
	}else{
		fread(&minZ, sizeof(int), 1, pFile);
		fread(&maxZ, sizeof(int), 1, pFile);
	}
	fclose(pFile);
	TVecList mainList;
	unsigned int timestamp;
	contLoop = 1;
	//show current calibration values.
	printf("Current calibration values:\nCeiling: %d, Floor: %d\n", maxZ, minZ);
	puts("\n\nAre those values correct? [Y/N]");
	char tmpChar = getchar();
	if(tmpChar == 'N' || tmpChar == 'n'){
        contLoop = 0;
        puts("\nUse calibration program to correct the values.");
	}
	fflush(stdin);
	//start thread
	pthread_t thread;
	int rc;
	long t = 0;
	rc = pthread_create(&thread, NULL, readAsync, (void *)t);
	if (rc){
		printf("ERROR; return code from pthread_create() is %d\n", rc);
		exit(-1);
	}
	//main loop
	while(contLoop){
		//acquire data for main Kinect & process data
		if(updateCamera(&mainCam, &timestamp)){
            printf("Could not update feed for device 0.");
            return EXIT_FAILURE;
		}
		if(detectDrone(mainCam.data, &mainList, &vec3DDistance)){
            printf("Could not process data for for device 0.");
            return EXIT_FAILURE;
		}
		//display list
		system("clear");
		puts("Press Enter to exit.\n\n---------------\nLIST:");
		displayVecList(&mainList);
		//send position to the given IP address
		TVec4D* maxVect = maxPointList(&mainList);
		if(maxVect != NULL){
            writePacket(buf, 'k', maxVect->x, maxVect->y, maxVect->z);
			if (sendto(s, buf, BUFLEN, 0, &si_other, slen)==-1){
				fprintf(stderr, "sendto() failed\n");
				return 1;
			}
			if (sendto(s, buf, BUFLEN, 0, &si_other2, slen)==-1){
				fprintf(stderr, "sendto() failed\n");
				return 1;
			}
		}
	}
	//close socket
	close(s);
	//free all data
	freeCamera(&mainCam);
	//stop kinects
	freenect_sync_stop();
	//stop pthread
	pthread_exit(NULL);
	return EXIT_SUCCESS;
}