Exemplo n.º 1
0
static value openflkinect_get_user_col(value ref, value userID)
{
  val_check_kind(ref, k_Kinect);

  Kinect* k = static_cast<Kinect*>(val_data(ref));
  return alloc_int( k->getUserColor( val_int(userID) ) );
}
Exemplo n.º 2
0
static value openflkinect_get_status(value ref)
{
  val_check_kind(ref, k_Kinect);

  Kinect* k = static_cast<Kinect*>(val_data(ref));
  return alloc_int( k->nuiStatus() );
}
Exemplo n.º 3
0
static value openflkinect_get_is_capturing(value ref)
{
  val_check_kind(ref, k_Kinect);
  Kinect* k = static_cast<Kinect*>(val_data(ref));

  return alloc_bool(k->isCapturing());
}
Exemplo n.º 4
0
static void openflkinect_set_tilt(value ref, value degrees)
{
  val_check_kind(ref, k_Kinect);
  val_check(degrees, int);
  Kinect* k = static_cast<Kinect*>(val_data(ref));

  k->setTilt(val_int(degrees));
}
Exemplo n.º 5
0
static value openflkinect_update_interactions(value ref)
{
  val_check_kind(ref, k_Kinect);

  Kinect* k = static_cast<Kinect*>(val_data(ref));

  vector<NUI_USER_INFO> userInfos = k->interactionInfo();
  value users = alloc_array(userInfos.size());
  for ( int i=0; i<userInfos.size(); i++ )
  {
    NUI_USER_INFO& s = userInfos[i];
    value user = alloc_empty_object();
    val_array_set_i(users, i, user);

    if ( userInfos[i].SkeletonTrackingId > 0 )
    {
      alloc_field( user, val_id( "isTracked" ),
          alloc_bool( true ) );
      alloc_field( user, val_id("skeletonTrackingId"),
          alloc_int(userInfos[i].SkeletonTrackingId ));
      value hands = alloc_array( NUI_USER_HANDPOINTER_COUNT );
      alloc_field( user, val_id("handPointerInfos"),
          hands);

      for ( int j=0; j<NUI_USER_HANDPOINTER_COUNT; j++ )
      {
        value hand = alloc_empty_object();
        val_array_set_i(hands, j, hand);
        alloc_field( hand, val_id("handTypeEvent"),
            alloc_float( userInfos[i].HandPointerInfos[j].HandEventType ));
        alloc_field( hand, val_id("handType"),
            alloc_float( userInfos[i].HandPointerInfos[j].HandType ));
        alloc_field( hand, val_id("pressExtent"),
            alloc_float( userInfos[i].HandPointerInfos[j].PressExtent ));
        alloc_field( hand, val_id("rawX"),
            alloc_float( userInfos[i].HandPointerInfos[j].RawX ));
        alloc_field( hand, val_id("rawY"),
            alloc_float( userInfos[i].HandPointerInfos[j].RawY ));
        alloc_field( hand, val_id("rawZ"),
            alloc_float( userInfos[i].HandPointerInfos[j].RawZ ));
        alloc_field( hand, val_id("state"),
            alloc_float( userInfos[i].HandPointerInfos[j].State ));
        alloc_field( hand, val_id("x"),
            alloc_float( userInfos[i].HandPointerInfos[j].X ));
        alloc_field( hand, val_id("y"),
            alloc_float( userInfos[i].HandPointerInfos[j].Y ));
        val_array_set_i(hands, j, hand);

      }
    }
    else
    {
      alloc_field( user, val_id( "isTracked" ), alloc_bool( false ) );
    }
  }
  return users;
}
Exemplo n.º 6
0
int main(){
	Kinect K;
	Filters F;
	cout<<K.initialiseKinect()<<'\n';
	KOCVStream STREAM(K,F);
	STREAM.display("dei");
	cin.get();
	return 0;
}
Exemplo n.º 7
0
void kinectSkelApp::mouseUp( MouseEvent event )
{
	writeImage( getHomeDirectory() + "kinect_color.png", mKinect.getColorImage() );
	writeImage( getHomeDirectory() + "kinect_depth.png", mKinect.getDepthImage() );
	
	// set tilt to random angle
	

	mKinect.setLedColor( Kinect::LED_YELLOW );
}
Exemplo n.º 8
0
int main( int argc, char* argv[] )
{
    try{
        Kinect kinect;
        kinect.run();
    } catch( std::exception& ex ){
        std::cout << ex.what() << std::endl;
    }

    return 0;
}
Exemplo n.º 9
0
static value openflkinect_get_user_count(value ref)
{
  val_check_kind(ref, k_Kinect);
  Kinect* k = static_cast<Kinect*>(val_data(ref));

  if ( k->isCapturing() == false )
  {
    return alloc_int(0);
  }

  return alloc_int(k->getUserCount());
}
Exemplo n.º 10
0
static value openflkinect_depth_frame_rate(value ref)
{
  val_check_kind(ref, k_Kinect);
  Kinect* k = static_cast<Kinect*>(val_data(ref));

  if ( k->isCapturing() == false )
  {
    return alloc_float(0.0f);
  }

  return alloc_float(k->depthFrameRate());
}
Exemplo n.º 11
0
void KinectStreamerApp::setup()
{
	setupFont();
	mOscBroadcaster.registerParams(mSettings);
	mJointParameters.registerParams(mSettings);
	mKinect.registerParams(mSettings);
	mSettings.load(getAssetPath("kinectStreamerSettings.json").string());
	mSettings.setup();
	mKinect.setup();
	mOscBroadcaster.setup(&mKinect);
//	mOscBroadcaster.setDestination("127.0.0.1", 37000);
}
Exemplo n.º 12
0
//boucle de traitement de la kinect
void* Kinect::runKinect(void* k) 
{
	//setup
    Kinect* kinect = static_cast<Kinect*>(k);
    drawgrid = kinect->grille_;
	Mat depthMat(480,640,CV_16UC1);
    Freenect::Freenect freenect;
    MyFreenectDevice& device = freenect.createDevice<MyFreenectDevice>(0);
    device.startDepth();
    //grille ¨virtuelle¨ plus petite pour A* au forum (pour accélérer la recherche de trajectoire).
    char **sgrille = new char*[175];
    for(int i=0;i<175;i++)
            sgrille[i]=new char[175];
    clock_t time;
    while (!kinect->die_) 
    {
		//si pas d´image disponible : attente de 10ms puis relancer la boucle.
        if(!device.getDepth(depthMat))
		{
			delay(10);
			continue;
		}
		pthread_mutex_lock(&mut);
        depthMat.convertTo(depthf, CV_8UC1, 255.0/2048.0);
		time = clock();	//debug : utilisé pour cronométrer les boucles
		kinect->marquer_obstacles(depthMat); 
//	std::cout << "marquer" << (clock() - time) * 1000 / CLOCKS_PER_SEC << std::endl;
	
		 //executer AStar, division de grille par 2 pour aller plus vite
		for(int x =0; x <175; x++)
			for(int y =0; y <175; y++)
				sgrille[x][y] = kinect->grille_->grille_[2 *x][2 * y] + kinect->grille_->grille_[2 *x + 1][2 * y] + kinect->grille_->grille_[2 *x][2 * y + 1] + kinect->grille_->grille_[2 *x+1][2 * y+1];
			
		char *route = AStarStatic(240 / 2, 0, 240 / 2, TARGET / 2, sgrille, 175,175);
		char lroute[ 2 * strlen(route)];  
		for(int x =0; x <strlen(route); x++)
		{
			lroute[2 * x] = route[x];
			lroute[2 * x + 1] = route[x];
		}
		drawRouteOnMap(kinect->grille_->grille_, lroute, 240, 0);
		if(strlen(route) != 0)
			free(route);
		//	std::cout << (clock() - time) * 1000 / CLOCKS_PER_SEC << std::endl;

		pthread_cond_signal(&cond);
		pthread_mutex_unlock(&mut);
		delay(40);	//40ms sleep : laisse le temps pour dessiner la fenetre avant de reprendre le mutex
    }
    device.stopDepth();
    delete(sgrille);
	return 0;
}
Exemplo n.º 13
0
int main()
{
    
    Kinect *Program  = new Kinect();

    Program->PrzygotujChmury();
  
    Program->NARF_ICP();
    Program->ICP();
    
    delete Program;

    return 0;
}
Exemplo n.º 14
0
static value openflkinect_get_skeleton_depth_pos(value ref, value vec3)
{
  val_check_kind(ref, k_Kinect);

  Kinect* k = static_cast<Kinect*>(val_data(ref));
  Vec2i adjusted = k->getSkeletonDepthPos(Vec3f(
        val_float(val_field(vec3, val_id("x"))),
        val_float(val_field(vec3, val_id("y"))),
        val_float(val_field(vec3, val_id("z"))) ));
  value ret = alloc_empty_object();
  alloc_field( ret, val_id("x"), alloc_int(adjusted.x));
  alloc_field( ret, val_id("y"), alloc_int(adjusted.y));
  return ret;
}
Exemplo n.º 15
0
static value openflkinect_init(value in)
{
  value v;
  v = alloc_abstract(k_Kinect, new Kinect());

  Kinect* k = static_cast<Kinect*>(val_data(v));
  DeviceOptions& opts = k->getDeviceOptions();

  opts.deviceId = val_string(val_field(in, val_id("deviceId")));
  opts.deviceIndex = val_int(val_field(in, val_id("deviceIndex;")));

  opts.depthEnabled = val_bool(val_field(in, val_id("depthEnabled")));
  opts.colorEnabled = val_bool(val_field(in, val_id("colorEnabled")));
  opts.irEnabled = val_bool(val_field(in, val_id("irEnabled")));
  opts.skeletonTrackingEnabled =
    val_bool(val_field(in, val_id("skeletonTrackingEnabled")));
  opts.interactionEnabled = val_bool(val_field(in, val_id("interactionEnabled")));
  cout << opts.interactionEnabled << endl;
  opts.depthResolution =
     opts.getResolution(val_string(val_field(in, val_id("depthResolution"))));

  opts.depthSize = opts.Vec2fromHaxePoint(in, "depthSize");

  opts.colorResolution =
      opts.getResolution(val_string(
                  val_field(in, val_id("colorResolution"))));
  opts.colorSize = opts.Vec2fromHaxePoint(in, "colorSize");
  opts.irResolution =
      opts.getResolution(val_string(
                  val_field(in, val_id("irResolution"))));
  opts.irSize = opts.Vec2fromHaxePoint(in, "irSize");

  opts.nearModeEnabled = val_bool(val_field(in, val_id("nearModeEnabled")));
  opts.seatedModeEnabled = val_bool(val_field(in, val_id("seatedModeEnabled")));
  opts.userTrackingEnabled = val_bool(val_field(in, val_id("userTrackingEnabled")));
  opts.extendedModeEnabled = val_bool(val_field(in, val_id("extendedModeEnabled")));
  opts.mapColorToDepth = val_bool(val_field(in, val_id("mapColorToDepth")));

  opts.binaryMode = val_bool(val_field(in, val_id("binaryMode")));
  opts.userColor = val_bool(val_field(in, val_id("userColor")));
  opts.removeBackground = val_bool(val_field(in, val_id("removeBackground")));
  opts.inverted = val_bool(val_field(in, val_id("inverted")));

  opts.flipped = val_bool(val_field(in, val_id("flipped")));
  opts.transform = opts.getTransform(val_string(val_field(in,
          val_id("transform"))));
  return v;
}
void kinectPointCloudApp::update()
{
	if( mKinect.checkNewDepthFrame() )
		mDepthTexture = mKinect.getDepthImage();
	
	// This sample does not use the color data
	//if( mKinect.checkNewVideoFrame() )
	//	mColorTexture = mKinect.getVideoImage();

	if( mKinectTilt != mKinect.getTilt() )
		mKinect.setTilt( mKinectTilt );
		
	mEye = Vec3f( 0.0f, 0.0f, mCameraDistance );
	mCam.lookAt( mEye, mCenter, mUp );
	gl::setMatrices( mCam );
}
Exemplo n.º 17
0
void ScheinrieseApp::setup()
{
    // GUI
    mGui = new SimpleGUI(this);
    mGui->addColumn();
    mGui->addLabel("CONTROLS");
    mGui->addParam("Threshold", &mThreshold, 0, 255, 127);
    mGui->addParam("Blur", &mBlur, 1, 20, 1);
    mGui->addParam("Tilt", &mKinectTilt, -30, 30, 0);
    mGui->addColumn();
    mGui->addLabel("DEBUG VIEW");
    mGui->addParam("Show Debug", &mShowDebug, true);
//    mGui->addButton("Show Debug")->registerClick(this, &ScheinrieseApp::showDebug);

    mGui->load(getResourcePath(RES_SETTINGS));
    mGui->setEnabled(false);

    mBlur = 1;
    mThreshold = 127;

    mShowDebug = true;

    // KINECT
    hasKinect = false;
    console() << "### INFO: There are " << Kinect::getNumDevices() << " Kinects connected." << endl;
    if (Kinect::getNumDevices() >= 1) {
        mKinect = Kinect( Kinect::Device() );
        mKinect.setTilt(mKinectTilt);
        hasKinect = true;
    }
}
Exemplo n.º 18
0
void HiKinectApp::update()
{
	if( mKinect.checkNewDepthFrame() ) {
		mDepthTexture = mKinect.getDepthImage();
		mDepthSurface = Surface32f( mKinect.getDepthImage() );
		mKinectReady = true;
		if ( !mKinectIR ) {
			mKinectIR = true;
			mKinect.setVideoInfrared( true );
		}
		
		ci::Surface captureSurface = Surface8u( mKinect.getDepthImage() );
		ci::Surface outputSurface = captureSurface;
		mContours->clear();
		mSilhouetteDetector->processSurface(&captureSurface, mContours, &outputSurface);
	}
	
	if( mKinect.checkNewColorFrame() )
		mColorTexture = mKinect.getVideoImage();
	
	if( mIsMouseDown ) // using small number instead of 0.0 because lights go black after a few seconds when going to 0.0f
		mDirectional -= ( mDirectional - 0.00001f ) * 0.1f;  
	else 
		mDirectional -= ( mDirectional - 1.0f ) * 0.1f;
	
	if (mKinectReady)
		mGridMesh.updateKinect(mKinect);
	else
		mGridMesh.update();
}
Exemplo n.º 19
0
void ContoursApp::update()
{
	if ( mKinectReady && !mKinectIR )
		mKinect.setVideoInfrared( true );
	
	if( mKinect.checkNewDepthFrame() ) {
		mDepthTexture = mKinect.getDepthImage();
		mDepthSurface = Surface8u( mKinect.getDepthImage() );
		mKinectReady = true;
		
		ci::Surface captureSurface = Surface8u( mKinect.getDepthImage() );
		ci::Surface outputSurface = captureSurface;
		
		contours->clear();
		
		silhouetteDetector->processSurface(&captureSurface, contours, &outputSurface);
		
		console() << contours->size() << " is the size " << endl;
		
		
		mTexture1 = outputSurface;
	}
	
	if( mKinect.checkNewColorFrame() ) {
		mTexture2 = gl::Texture( mKinect.getVideoImage() );
	}
}
Exemplo n.º 20
0
void KinectStreamerApp::draw()
{
	// clear out the window with black
	gl::clear( Color( 0, 0, 0 ) );
	mKinect.draw();
	mOscBroadcaster.draw();
	hud().draw();
	mSettings.draw();
}
Exemplo n.º 21
0
static value openflkinect_update_skeletons(value ref)
{
  val_check_kind(ref, k_Kinect);

  Kinect* k = static_cast<Kinect*>(val_data(ref));

  vector<Skeleton> trackedSkels = k->skeletons();
  value skeletons = alloc_array(trackedSkels.size());
  for ( int i=0; i<trackedSkels.size(); i++ )
  {
    Skeleton& s = trackedSkels[i];
    value skeleton = alloc_empty_object();
    val_array_set_i(skeletons, i, skeleton);

    if ( trackedSkels[i].size() > 0 )
    {
      alloc_field( skeleton, val_id( "isTracked" ),
          alloc_bool( true ) );
      value bones = alloc_array( trackedSkels[i].size() );
      alloc_field(skeleton, val_id("bones"), bones);

      for ( int j=0; j<trackedSkels[i].size(); j++ )
      {
        int k = 0;
        for ( boneIt it = trackedSkels[i].begin();
            it != trackedSkels[i].end(); ++it )
        {
          //cout << it->second.orientation.startJoint << endl;
          value b = get_bone(it->second);
          val_array_set_i(bones, k++, b);
        }
      }

    }
    else
    {
      alloc_field( skeleton, val_id( "isTracked" ),
          alloc_bool( false ) );

    }
  }
  return skeletons;
}
Exemplo n.º 22
0
int main()
{
    time_t t = time(NULL);
    tm* local = new tm;
    char buf[26] = {0};
    localtime_r(&t,local);
    strftime(buf, 64, "%Y-%m-%d %H-%M-%S", local);
    mkdir(buf,S_IRWXU | S_IRWXG);
    chdir(buf);

    visionsensor.viewcloud();
    visionsensor.start();

#if ARIS_PLATFORM_==_PLATFORM_LINUX_
    //char RemoteIp[] = "192.168.1.100";
    char RemoteIp[] = "127.0.0.1";
#endif


    CONN VisualSystem;
    pVisualSystem = &VisualSystem;

    /*注册所有的消息函数*/
    Aris::Core::RegisterMsgCallback(VisualSystemDataNeeded, OnVisualSystemDataNeeded);
    Aris::Core::RegisterMsgCallback(VisualSystemLost, OnVisualSystemLost);

    Aris::Core::RegisterMsgCallback(NeedUpperControl, OnUpperControl);
    Aris::Core::RegisterMsgCallback(NeedStepUp, OnStepUp);
    Aris::Core::RegisterMsgCallback(NeedStepDown, OnStepDown);
    Aris::Core::RegisterMsgCallback(NeedStepOver, OnStepOver);

    /*设置所有CONN类型的回调函数*/
    VisualSystem.SetCallBackOnReceivedData(OnConnDataReceived);
    VisualSystem.SetCallBackOnLoseConnection(OnConnectionLost);

    /*连接服务器*/
    VisualSystem.Connect(RemoteIp, "5691");
    //VisualSystem.Connect(RemoteIp, "5688");

    /*开始消息循环*/
    Aris::Core::RunMsgLoop();
}
Exemplo n.º 23
0
static value openflkinect_update_depth_pixels(value ref, value depthArray)
{
  val_check_kind(ref, k_Kinect);
  Kinect* k = static_cast<Kinect*>(val_data(ref));

  int* ints = val_array_int(depthArray);
  if ( ! ints )
  {
    return alloc_bool(false);
  }

  if ( k->isCapturing() == false )
  {
    return alloc_bool(false);
  }

  int* depth = k->depthPixels();
  int dims = k->getDeviceOptions().numDepthPixels();
  memcpy(ints, depth, dims*sizeof(int));
  return alloc_bool(true);
}
Exemplo n.º 24
0
void KinectStreamerApp::update()
{
	gMouseX = float(getMousePos().x) / getWindowWidth();
	gMouseY = float(getMousePos().y) / getWindowHeight();
	double elapsedTime = getElapsedSeconds();
	double dt = elapsedTime - mElapsedTime;
	mElapsedTime = elapsedTime;
	
	mKinect.update(dt, mElapsedTime, mJointParameters);
	mOscBroadcaster.update(dt, mElapsedTime);
	hud().update(dt, mElapsedTime);
}
Exemplo n.º 25
0
void ScheinrieseApp::handleKinect()
{
    if (!hasKinect) {
        return;
    }

    if( mKinectTilt != mKinect.getTilt() ) {
        mKinect.setTilt( mKinectTilt );
    }

    if( mKinect.checkNewDepthFrame() ) {
        mDepthTexture = mKinect.getDepthImage();
    }

    if( mKinect.checkNewVideoFrame() ) {
        mColorTexture = mKinect.getVideoImage();
    }

    /* debug view */
    if (mColorTexture && !mDebugViewColor) {
        mGui->addLabel("COLOR");
        mDebugViewColor = mGui->addParam("COLOR", &mColorTexture);
        mDebugViewColor->var = &mColorTexture;
        console() << "color" << endl;
    }

    if (mDepthTexture && !mDebugViewDepth) {
        mGui->addLabel("DEPTH");
        mDebugViewDepth = mGui->addParam("DEPTH", &mDepthTexture);
        mDebugViewDepth->var = &mDepthTexture;
        console() << "depth" << endl;
    }
}
Exemplo n.º 26
0
	void Autonomous(void) {
		GetWatchdog().SetEnabled(true);
		bool isHybrid = false;
		Kinect* kinect = Kinect::GetInstance();
		isHybrid = (kinect->GetNumberOfPlayers() > 0);
		if (!isHybrid) {
			_driveControl.initializeAutonomous();
			shooterControl.InitializeAutonomous();
			_poleVaultControl.initialize();
		} else {
			_driveControl.initializeHybrid();
			shooterControl.InitializeHybrid();
			_poleVaultControl.initialize();
		}
		while (IsEnabled() && IsAutonomous()) {
			GetWatchdog().Feed();
			dsLCD->Clear();
			if (!isHybrid) { //Run Autonomous
				dsLCD->PrintfLine(DriverStationLCD::kUser_Line1,
						"Autonomous Mode");
				//if (_driveControl.RunAuto()) {
					shooterControl.RunAuto();
				//}
//				if(_driveControl.RunAuto()){
//					_poleVaultControl.deploy();
//				}
			} else { //Run Hybrid
				dsLCD->PrintfLine(DriverStationLCD::kUser_Line1, "Hybrid Mode");
				shooterControl.Run();
				_driveControl.act();
				_poleVaultControl.act();
			}
			dsLCD->UpdateLCD();
			Wait(WAIT_TIME);
		}
		GetWatchdog().SetEnabled(false);
	}
Exemplo n.º 27
0
void main(void) {

	unsigned char menu =0;  

	unsigned total_n_pose = 12;

	unsigned pose_n_frames = 100;

	unsigned feature_dimension = 60;  // 특징 차원 수

	double *features = new double[feature_dimension];  // 특징벡터

	float gamma[12] = {0.1, 0.1, 0.1, 0.1, 
						0.1, 0.1, 0.1, 0.1,
						0.1, 0.1, 0.1, 0.1};

	Kinect kinect;
	if(!kinect.init()) {
		cout << "Kinect is not working";
		return;
	}

	long angle = kinect.getCameraElevationAngle();

	IplImage *frame = cvCreateImage(cvSize(640,480),8,3);
	IplImage *frame_masked = cvCreateImage(cvSize(640,480),8,3);
	IplImage *depth8 = cvCreateImage(cvSize(320,240),8,1);
	IplImage *depth8_masked = cvCreateImage(cvSize(320,240),8,1);
	IplImage *depthPlayer = cvCreateImage(cvSize(320,240),8,3);
	char text[256],text2[256];;

	while(1) {
		cvDestroyAllWindows();
		cout << " 1 : 키넥트 연결 확인 " << endl;
		cout << " 2 : 포즈 저장하기 " << endl;
		cout << " 3 : 포즈 확인하기 " << endl;
		cout << " 4 : SVM 학습 및 검증 집합 만들기 " << endl;
		cout << " 5 : SVM 학습하기 " << endl;
		cout << " 6 : SVM 검증집합 인식결과 " << endl;
		cout << " 7 : 실시간 포즈 인식 " << endl;
		cout << " u : 카메라 각도 올리기" << endl;
		cout << " d : 카메라 각도 내리기" << endl;
		cout << " 종료 : ctrl+c"<<endl;
		cout << " > ";
		cin >> menu;
		getchar();

		/* 
			키넥트 영상 보기
		*/
		if(menu == '1') {
			cout << "메뉴복귀 : esc 키 누르기 (opencv 윈도우 활성화된 상태에서) " << endl;
			int key = 0;
			while(1) {
				kinect.videoImage(frame);
				kinect.depth8BitImage(depth8);
				kinect.depthPlayerImage(depthPlayer);

				//cout << "Number of Players : " << kinect.numOfPlayers() << endl;
				for(int i=0; i<NUI_SKELETON_COUNT; i++)
					if(kinect.getPlayer(i).isInitialized())
						kinect.getPlayer(i).drawSkeletonInDepthCoordinates(depthPlayer);
				cvShowImage("depth",depth8);
				cvShowImage("depthPlayer",depthPlayer);
				cvShowImage("color",frame);
				key = cvWaitKey(10);
				if(key==27 || key=='q' || key=='Q')
					break;
			}
		}	
		
		/*
			포즈 저장하기
		*/
		else if(menu == '2') {

			int key = 0;
			
			cout << " 몇개의 포즈를 찍으시겠습니까 ? > " ;
			cin >> total_n_pose;

			cout << " 포즈당 몇 프레임을 찍으시겠습니까 ? > " ;
			cin >> pose_n_frames;


			DBforKinect db;

				
			for(int i=0; i<total_n_pose; i++) {
				key =0;
				while(key != 1) {
					cout << i << "번째 포즈 저장 시작 [1 엔터] " << endl;
					cin >> key;
				}
				Sleep(5000);
				sprintf(text,"%d_pose",i);
				db.createFolder(text);
				db.locateFolder(text);
				db.initCnt();

				for(int j=0; j<pose_n_frames; ) {
					
					kinect.videoImage(frame);
					kinect.depth8BitImage(depth8);
					kinect.depthPlayerImage(depthPlayer);
					cvShowImage("players",depthPlayer);
				
					int index = kinect.findNearestPlayerNum();

					if(index != -1 && kinect.getPlayer(index).isInitialized()) {
						cout << i << "pose , " << j << " frame " << endl;
						KinectPlayer player = kinect.getPlayer(index);
						cvShowImage("player",player.getDepthMask());
						sprintf(text,"player",i);
						db.savePlayer(player,text);
						db.saveImg(frame,"color");
						db.saveImg(depth8,"depth");
						db.increaseCnt();
						++j;
						player.drawSkeletonInColorCoordinates(frame);
						cvShowImage("check",frame);
						cvWaitKey(10);
					}					
					key = cvWaitKey(10);
					if(key==27 || key=='q' || key=='Q')
						break;
				}
				cout << i<< "번째 포즈를 저장 완료" << endl;
				cvDestroyAllWindows();
			}
			cout << " 모든 포즈를 저장하였습니다. " << endl;
			
		}

		/*
			포즈 저장된 것 확인
		*/
		else if(menu == '3') {
Exemplo n.º 28
0
int main(int argc, char *argv[]){

	#pragma region // --- init ---
	//kinectクラスの宣言と初期化
	Kinect kinect;
	kinect.Initialize(NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX );

	//ストリーム作る。画像用とデプス用
	ImageStream& video = kinect.VideoStream();
	video.Open( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480 );

	ImageStream& depth = kinect.DepthStream();
	depth.Open( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240 );

	//skeletonを使う準備
	kinect::nui::SkeletonEngine& skeleton = kinect.Skeleton();
	skeleton.Enable();

	//opencvのmatとwindowの準備
	namedWindow("camera_window");
	Mat camera_img = Mat(Size(video.Width(), video.Height()), CV_8UC4);

	namedWindow("depth_window");
	Mat depth_img = Mat(Size(depth.Width(), depth.Height()), CV_16UC1); //depthは16bitみたい

	//Depthとcameraの位置合わせ(kinect_utility.h)
	ColorFromDepthEngine CFDengine;

	#pragma endregion

	#pragma region // --- my init ---
	mine::flag skeleton_flag;
	#pragma endregion


	while ( 1 ) {
		# pragma region // --- get data ---
		// データの更新を待つ
		kinect.WaitAndUpdateAll();

		//Skeletonを取得
		kinect::nui::SkeletonFrame skeletonframe= skeleton.GetNextFrame();

		// 次のフレームのデータを取得する(OpenNIっぽく)
		ImageFrame image( video );
		DepthFrame depthMD( depth );
		// cv::Matへのデータのコピー			
		camera_img = Mat(camera_img.size(),	CV_8UC4, (BYTE *)image.Bits());
		depth_img = Mat(depth_img.size(), CV_16UC1, (BYTE *)depthMD.Bits());

		// adjust with utility
		cv::Mat adjusted_camera_img(depth_img.size(), CV_8UC4);
		adjusted_camera_img = CFDengine.getAdjustedImage(camera_img);
		#pragma endregion

		#pragma region // --- processing ---
		SkeletonDrawer skeletondrawer(skeletonframe);
		SkeletonPoints me = skeletondrawer.me(depth_img.cols, depth_img.rows); //Depth画面中のskeletonを取得

		if(skeleton_flag.IsTrue){
			me.Drawall(adjusted_camera_img);
		}
		
		if(me.IsTracked){ // 画面内に人がいたら
			Point prhand = me.getPoint(NUI_SKELETON_POSITION_HAND_RIGHT);
			Point plhand = me.getPoint(NUI_SKELETON_POSITION_HAND_LEFT);

			// 色の決定
			Scalar color_rhand = Scalar(0, 0, 255);
			Scalar color_lhand = Scalar(0, 255, 0);
			if(norm(prhand-plhand) < THRESH_SAMEPOS){
				ushort drhand = depth_img.at<ushort>(prhand);
				ushort dlhand = depth_img.at<ushort>(plhand);
				if(abs(drhand-dlhand) < THRESH_SAMEDEPTH){
					Scalar mix = color_lhand;
					mix += color_rhand;
					color_rhand = mix;
					color_lhand = mix;
				}
			}

			// 画像に塗る
			circle(adjusted_camera_img, prhand, 5, color_rhand, -1);
			circle(adjusted_camera_img, plhand, 5, color_lhand, -1);
		}

		#pragma endregion

		#pragma region // --- show ---
		cv::resize(adjusted_camera_img, camera_img, camera_img.size());
		cv::imshow("camera_window", camera_img);	
		#pragma endregion

		#pragma region // --- keyboard callback ---
		int key = waitKey(1);
		if ( key == 'q' ) {
			break;
		}
		else if ( key == 's' ) {
			skeleton_flag.reverse();
		}
		#pragma endregion
	
	}
	return 0; // 正常終了
}
Exemplo n.º 29
0
int main(int argc, char *argv[]) {

    #pragma region // --- init ---
    //kinectクラスの宣言と初期化
    Kinect kinect;
    kinect.Initialize(NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX );

    //ストリーム作る。画像用とデプス用
    ImageStream& video = kinect.VideoStream();
    video.Open( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480 );

    ImageStream& depth = kinect.DepthStream();
    depth.Open( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240 );

    //skeletonを使う準備
    kinect::nui::SkeletonEngine& skeleton = kinect.Skeleton();
    skeleton.Enable();

    //opencvのmatとwindowの準備
    namedWindow("camera_window");
    Mat camera_img = Mat(Size(video.Width(), video.Height()), CV_8UC4);

    namedWindow("depth_window");
    Mat depth_img = Mat(Size(depth.Width(), depth.Height()), CV_16UC1); //depthは16bitみたい

    //Depthとcameraの位置合わせ(kinect_utility.h)
    ColorFromDepthEngine CFDengine;

    #pragma endregion

    #pragma region // --- my init ---
    mine::flag skeleton_flag;

    // 一時的に絵を書くためのバッファ。透過色(黒)で塗りつぶしておく
    cv::Mat buf_img(depth_img.size(), CV_8UC4, Scalar(0));
    cv::Mat buf_depth_img(depth_img.size(), CV_16UC1, Scalar(0));

    // 右手のエフェクト
    float rballsize = 3.0;
    Point prhand_prev(0, 0);
    ushort drhand_prev = 0;
    mine::ThrownObject rball;
    #pragma endregion


    while ( 1 ) {
        # pragma region // --- get data ---
        // データの更新を待つ
        kinect.WaitAndUpdateAll();

        //Skeletonを取得
        kinect::nui::SkeletonFrame skeletonframe= skeleton.GetNextFrame();

        // 次のフレームのデータを取得する(OpenNIっぽく)
        ImageFrame image( video );
        DepthFrame depthMD( depth );
        // cv::Matへのデータのコピー
        camera_img = Mat(camera_img.size(),	CV_8UC4, (BYTE *)image.Bits());
        depth_img = Mat(depth_img.size(), CV_16UC1, (BYTE *)depthMD.Bits());

        // adjust with utility
        cv::Mat adjusted_camera_img(depth_img.size(), CV_8UC4);
        adjusted_camera_img = CFDengine.getAdjustedImage(camera_img);
        #pragma endregion

        #pragma region // --- processing ---
        SkeletonDrawer skeletondrawer(skeletonframe);
        SkeletonPoints me = skeletondrawer.me(depth_img.cols, depth_img.rows); //Depth画面中のskeletonを取得

        if(skeleton_flag.IsTrue) {
            me.Drawall(adjusted_camera_img);
        }

        if(me.IsTracked) { // 画面内に人がいたら
            Point prhand = me.getPoint(NUI_SKELETON_POSITION_HAND_RIGHT);
            Point plhand = me.getPoint(NUI_SKELETON_POSITION_HAND_LEFT);

            Scalar color_rhand = Scalar(0, 0, 255);
            Scalar color_lhand = Scalar(0, 255, 0);

            #pragma region // --- shoot ---
            if(  prhand.inside(Rect(0, 0, depth_img.cols, depth_img.rows)) ) {
                ushort drhand = depth_img.at<ushort>(prhand);
                if((norm(prhand-prhand_prev) < THRESH_SAMEPOS) && abs(drhand - drhand_prev) < THRESH_SAMEDEPTH) {
                    rballsize += 0.5;
                    if(rballsize > 10) { // 十分大きくなったら
                        rballsize = 20;
                    }
                }
                else {
                    if(rballsize == 20) { // チャージ後初めて動いた
                        std::cout<<"start"<<drhand<<"delta"<<drhand-drhand_prev<<std::endl;
                        rball = mine::ThrownObject(depth_img, prhand, drhand, prhand-prhand_prev, drhand-drhand_prev);
                        //rball = mine::ThrownObject(depth_img, prhand, depth_img.at<ushort>(me.getPoint(NUI_SKELETON_POSITION_SPINE)), prhand-prhand_prev, 0);
                    }
                    rballsize = 3.0;
                }
                // 更新
                prhand_prev = prhand;
                drhand_prev = drhand;
            }

            // ボールの描画
            if(rball.IsExist) {
                Mat rball_img(depth_img.size(), CV_8UC4, Scalar(0));
                Mat rball_depth_img(depth_img.size(), CV_16UC1, Scalar(0));

                rball.drawBall(rball_img);
                SetDepth(rball_img, rball_depth_img, rball.pnow[2]);

                DepthBlend(buf_img, buf_depth_img, rball_img, rball_depth_img);

                rball.gonext();
            }

            #pragma endregion

            #pragma region // --- painting ---


            Mat hands_img(depth_img.size(), CV_8UC4, Scalar(0));
            Mat hands_depth_img(depth_img.size(), CV_16UC1, Scalar(0) );

            circle(hands_img, prhand, rballsize, color_rhand, -1);
            circle(hands_img, plhand, 5, color_lhand, -1);
            circle(adjusted_camera_img, prhand, rballsize, color_rhand, -1);
            circle(adjusted_camera_img, plhand, 5, color_lhand, -1);

            SetDepth(hands_img, hands_depth_img, drhand_prev);
            cv::imshow("hands_depth", hands_depth_img);
            DepthBlend(buf_img, buf_depth_img, hands_img, hands_depth_img);

            Mat alpha_buf_img = adjusted_camera_img.clone();
            AlphaCopy(buf_img, alpha_buf_img, 0.5);
            cv::imshow("alphabuf", alpha_buf_img);

            cv::imshow("buf_depth", buf_depth_img);
            DepthBlend(adjusted_camera_img, depth_img, alpha_buf_img, buf_depth_img);



            // 残像
            Mat tempbuf_img(buf_img.size(), CV_8UC4, Scalar(0) );
            AlphaCopy(buf_img, tempbuf_img, 0.9);

            buf_img=tempbuf_img;
            DepthMasking(buf_img, buf_depth_img);
            cv::imshow("buf_depth", buf_depth_img);
            cv::imshow("buf", buf_img);

            #pragma endregion
        }

        #pragma endregion

        #pragma region // --- show ---
        cv::resize(adjusted_camera_img, camera_img, camera_img.size());
        cv::imshow("camera_window", camera_img);
        #pragma endregion

        #pragma region // --- keyboard callback ---
        int key = waitKey(1);
        if ( key == 'q' ) {
            break;
        }
        else if ( key == 's' ) {
            skeleton_flag.reverse();
        }
        #pragma endregion

    }
    return 0; // 正常終了
}
Exemplo n.º 30
0
int main(int argc, char *argv[]) {

    #pragma region // --- init ---
    //kinectクラスの宣言と初期化
    Kinect kinect;
    kinect.Initialize(NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX );

    //ストリーム作る。画像用とデプス用
    ImageStream& video = kinect.VideoStream();
    video.Open( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480 );

    ImageStream& depth = kinect.DepthStream();
    depth.Open( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240 );

    //skeletonを使う準備
    kinect::nui::SkeletonEngine& skeleton = kinect.Skeleton();
    skeleton.Enable();

    //opencvのmatとwindowの準備
    namedWindow("camera_window");
    Mat camera_img = Mat(Size(video.Width(), video.Height()), CV_8UC4);

    namedWindow("depth_window");
    Mat depth_img = Mat(Size(depth.Width(), depth.Height()), CV_16UC1); //depthは16bitみたい

    //Depthとcameraの位置合わせ(kinect_utility.h)
    ColorFromDepthEngine CFDengine;

    #pragma endregion

    while ( 1 ) {
        # pragma region // --- get data ---
        // データの更新を待つ
        kinect.WaitAndUpdateAll();

        //Skeletonを取得
        kinect::nui::SkeletonFrame skeletonframe= skeleton.GetNextFrame();

        // 次のフレームのデータを取得する(OpenNIっぽく)
        ImageFrame image( video );
        DepthFrame depthMD( depth );
        // cv::Matへのデータのコピー
        camera_img = Mat(camera_img.size(),	CV_8UC4, (BYTE *)image.Bits());
        depth_img = Mat(depth_img.size(), CV_16UC1, (BYTE *)depthMD.Bits());

        // adjust with utility
        cv::Mat adjusted_camera_img(depth_img.size(), CV_8UC4);
        adjusted_camera_img = CFDengine.getAdjustedImage(camera_img);
        #pragma endregion

        #pragma region // --- processing ---
        SkeletonDrawer skeletondrawer(skeletonframe);
        SkeletonPoints me = skeletondrawer.me(depth_img.cols, depth_img.rows); //Depth画面中のskeletonを取得

        if(me.IsTracked) { // 画面内に人がいたら
            me.Drawall(adjusted_camera_img); // Skeleton を描く

            // 左右の手
            circle(adjusted_camera_img, me.getPoint(NUI_SKELETON_POSITION_HAND_LEFT), 5, cv::Scalar(0,0,255), -1);
            circle(adjusted_camera_img, me.getPoint(NUI_SKELETON_POSITION_HAND_RIGHT), 5, cv::Scalar(0,255,0), -1);

            std::cout<<"z"<<(int)me.get3dPoint(NUI_SKELETON_POSITION_HAND_RIGHT)[2]<<depth_img.at<ushort>(me.getPoint(NUI_SKELETON_POSITION_HAND_RIGHT))<<std::endl;
        }



        #pragma endregion

        #pragma region // --- show ---
        cv::resize(adjusted_camera_img, camera_img, camera_img.size());
        cv::imshow("camera_window", camera_img);
        #pragma endregion

        #pragma region // --- keyboard callback ---
        int key = waitKey(1);
        if ( key == 'q' ) {
            break;
        }
        #pragma endregion

    }
    return 0; // 正常終了
}