コード例 #1
0
ファイル: main.cpp プロジェクト: ndruger/kinect-modeling-tool
// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

		// Process the data
		g_DepthGenerator.GetMetaData(depthMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);
		DrawDepthMap(depthMD, sceneMD);

	glutSwapBuffers();
	Sleep(5);	// todo: adjust
}
コード例 #2
0
ファイル: cap_openni.cpp プロジェクト: AndreSteenveld/opencv
bool CvCapture_OpenNI::grabFrame()
{
    if( !isOpened() )
        return false;

    bool isGrabbed = false;
    if( !approxSyncGrabber.empty() && approxSyncGrabber->isRun() )
    {
        isGrabbed = approxSyncGrabber->grab( depthMetaData, imageMetaData );
    }
    else
    {
        XnStatus status = context.WaitAndUpdateAll();
        if( status != XN_STATUS_OK )
            return false;

        if( depthGenerator.IsValid() )
            depthGenerator.GetMetaData( depthMetaData );
        if( imageGenerator.IsValid() )
            imageGenerator.GetMetaData( imageMetaData );
        isGrabbed = true;
    }

    return isGrabbed;
}
コード例 #3
0
ファイル: main.cpp プロジェクト: sravanthib/openni_annotator
// this function is called each frame
void glutDisplay (void)
{
	// Read next available data
	g_Context.WaitAndUpdateAll();

	// Process the data
	g_pSessionManager->Update(&g_Context);

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	#ifdef USE_GLUT
	glOrtho(0, GL_WIN_SIZE_X, 0, GL_WIN_SIZE_Y, -1.0, 1.0);
	#else
	glOrthof(0, GL_WIN_SIZE_X, 0, GL_WIN_SIZE_Y, -1.0, 1.0);
	#endif

	glDisable(GL_TEXTURE_2D);

	// Draw the MyBoxes
	g_pBox[0]->Draw();
	g_pBox[1]->Draw();
	g_pBox[2]->Draw();

	// Draw the slider
	DrawSlider();

	#ifdef USE_GLUT
	glutSwapBuffers();
	#endif
}
コード例 #4
0
ファイル: ImageFlinger.cpp プロジェクト: Topographic0cean/src
void Loop(int sock)
{
	XnStatus nRetVal = XN_STATUS_OK;
	struct timespec last,now;
	double nowtime, starttime;

	clock_gettime(CLOCK_REALTIME, &last);
	double lasttime = (double)(last.tv_sec)  + ((double)(last.tv_nsec))/1000000000;
	int frames=0;

	while (g_notDone)
	{
		if ((nRetVal = g_context.WaitAndUpdateAll()) != XN_STATUS_OK)
		{
			fprintf(stderr,"Could not update ir: %s\n", xnGetStatusString(nRetVal));
			continue;
		}

		const XnDepthPixel* pDepthMap = g_depth.GetDepthMap();
		const XnRGB24Pixel* pImage    = NULL;//g_image.GetRGB24ImageMap();
		const XnIRPixel*    pIR       = NULL;//g_ir.GetIRMap(); 

		ProcessDepthFrame(pDepthMap, g_depthWidth, g_depthHeight);

		FindFingertip();

		frames++;
		clock_gettime(CLOCK_REALTIME, &now);
	        nowtime = (double)(now.tv_sec)  + ((double)(now.tv_nsec))/1000000000;

		if (g_stabalize) // If we are still stablizing then don't do anything
		{
			if ((nowtime - starttime) >= STABILIZATION_TIME_SECS) 
			{
				g_stabalize = FALSE;
				g_set_background = TRUE;
			}
		}
		else if (g_calibrate)  // Do we need to calibrate?
				Calibrate(sock);
		else 
				SendFingers(sock);  // otherwise just send the touches
		
		/*
		if (nowtime - lasttime >= 1.0)
		{
			printf("%d FPS\n", (int)(frames/(nowtime-lasttime)));
			lasttime = nowtime;
			frames = 0;
			if (sock >= 0 && pDepthMap != 0 )// && pImage != 0 )//&& pIR != 0)
				SendImage(sock,pDepthMap, pImage, pIR, g_depthWidth, g_depthHeight);
		}
		*/
	}
}
コード例 #5
0
ファイル: StickFigure.cpp プロジェクト: imclab/StickFigureOSC
/*
 DWORD WINAPI EEThreadProc(LPVOID lpThreadParameter)
 {
	{
		g_Context.WaitAndUpdateAll();
		if (g_bRecord)
		{
			g_pRecorder->Record();
		}
	}
	return 0;
 }
 */
int EEThreadProc(void *lpThreadParameter)
{
	{
		g_Context.WaitAndUpdateAll();
		if (g_bRecord)
		{
			g_pRecorder->Record();
		}
	}
	return 0;
}
コード例 #6
0
ファイル: main.cpp プロジェクト: DaikiMaekawa/openni_sandbox
    // メインループ
    void run()
    {
        int nAudioNextBuffer = 0;

        printf ("Press any key to exit...\n");

        // 今のデータを捨てる
        audio.WaitAndUpdateData();

        while (!xnOSWasKeyboardHit()) {
            // データの更新
            XnStatus nRetVal = context.WaitAndUpdateAll();
            if (nRetVal != XN_STATUS_OK) {
                throw std::runtime_error(xnGetStatusString(nRetVal));
            }

            // バッファの取得
            WAVEHDR* pHeader = &AudioBuffers[nAudioNextBuffer];
            if ((pHeader->dwFlags & WHDR_DONE) == 0) {
                printf("No audio buffer is available!. Audio buffer will be lost!\n");
                continue;
            }

            // WAVEヘッダのクリーンアップ
            MMRESULT mmRes = waveOutUnprepareHeader(hWaveOut, pHeader, sizeof(WAVEHDR));
            if ( mmRes != MMSYSERR_NOERROR ) {
                OutputErrorText( mmRes );
            }

            // WAVEデータの取得
            pHeader->dwBufferLength = audio.GetDataSize();
            pHeader->dwFlags = 0;
            xnOSMemCopy(pHeader->lpData, audio.GetAudioBuffer(), pHeader->dwBufferLength);

            // WAVEヘッダの初期化
            mmRes = waveOutPrepareHeader(hWaveOut, pHeader, sizeof(WAVEHDR));
            if ( mmRes != MMSYSERR_NOERROR ) {
                OutputErrorText( mmRes );
                continue;
            }

            // WAVEデータを出力キューに入れる
            mmRes = waveOutWrite(hWaveOut, pHeader, sizeof(WAVEHDR));
            if ( mmRes != MMSYSERR_NOERROR ) {
                OutputErrorText( mmRes );
                continue;
            }

            // 次のバッファインデックス
            nAudioNextBuffer = (nAudioNextBuffer + 1) % NUMBER_OF_AUDIO_BUFFERS;
        }
    }
コード例 #7
0
ファイル: kivi.cpp プロジェクト: phelrine/kica
void gl_onDisplay()
{
  gContext.WaitAndUpdateAll();
  glClear(GL_COLOR_BUFFER_BIT);	

  for(int i = 0; i < 2; i++){
    if(gDrawOrder[i] != NULL){
      gDrawOrder[i]->draw();
    }
  }
  
  glutSwapBuffers();
}
コード例 #8
0
ファイル: main.cpp プロジェクト: AF83/ucengine-kinect
// this function is called each frame
void glutDisplay (void)
{
	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();
	XnMapOutputMode mode;
	g_DepthGenerator.GetMapOutputMode(mode);
	glOrtho(0, mode.nXRes, mode.nYRes, 0, -1.0, 1.0);
	glDisable(GL_TEXTURE_2D);
	g_Context.WaitAndUpdateAll();
	g_pSessionManager->Update(&g_Context);
	PrintSessionState(g_SessionState);
	glutSwapBuffers();
}
コード例 #9
0
ファイル: kica.cpp プロジェクト: phelrine/kica
void gl_onDisplay()
{
  gContext.WaitAndUpdateAll();
  glClear(GL_COLOR_BUFFER_BIT);	

  gDepthDrawer->draw();
  gSkeletonDrawer->draw();
  gImageDrawer->draw();

  if(capture->isCapturing()){
    capture->captureSkeleton(gUserGenerator.GetSkeletonCap());
    gl_labelCapturingUser(capture->getCapturingUser());
  }
  
  glutSwapBuffers();
}
コード例 #10
0
// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}
	ros::Time tstamp=ros::Time::now();
		// Process the data
		g_DepthGenerator.GetMetaData(depthMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);
		DrawDepthMap(depthMD, sceneMD);
		std::vector<mapping_msgs::PolygonalMap> pmaps;
		body_msgs::Skeletons skels;
		getSkels(pmaps,skels);
		ROS_DEBUG("skels size %d \n",pmaps.size());
		if(pmaps.size()){

		   skels.header.stamp=tstamp;
		   skels.header.seq = depthMD.FrameID();
		   skels.header.frame_id="/openni_depth_optical_frame";
		   skel_pub.publish(skels);
	      pmaps.front().header.stamp=tstamp;
	      pmaps.front().header.seq = depthMD.FrameID();
	      pmaps.front().header.frame_id="/openni_depth_optical_frame";
		   pmap_pub.publish(pmaps[0]);
		}


	glutSwapBuffers();
}
コード例 #11
0
ファイル: main.cpp プロジェクト: sravanthib/openni_annotator
// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
	#ifdef USE_GLUT
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

		// Process the data
		//DRAW
		g_DepthGenerator.GetMetaData(depthMD);
		g_SceneAnalyzer.GetMetaData(sceneMD);

		DrawDepthMap(depthMD, sceneMD);
		if (g_bPrintFrameID)
		{
			DrawFrameID(depthMD.FrameID());
		}

	#ifdef USE_GLUT
	glutSwapBuffers();
	#endif
}
コード例 #12
0
int main(int argc, char **argv) {
  ros::init(argc, argv, "scene_tracker");
  ros::NodeHandle nh;

  string configFilename = ros::package::getPath("openni_tracker") + "/openni_tracker.xml";
  XnStatus nRetVal = g_Context.InitFromXmlFile(configFilename.c_str());
  CHECK_RC(nRetVal, "InitFromXml");

  nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
  CHECK_RC(nRetVal, "Find depth generator");

	nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
	if (nRetVal != XN_STATUS_OK) {
		nRetVal = g_UserGenerator.Create(g_Context);
		CHECK_RC(nRetVal, "Find user generator");
	}

  XnCallbackHandle hUserCallbacks;
	g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);

	nRetVal = g_Context.StartGeneratingAll();
	CHECK_RC(nRetVal, "StartGenerating");

	ros::Rate r(30);

        
  ros::NodeHandle pnh("~");
  string frame_id("openni_depth_frame");
  pnh.getParam("camera_frame_id", frame_id);
                
	while (ros::ok()) {
		g_Context.WaitAndUpdateAll();
		publishTransforms(frame_id);
		r.sleep();
	}

	g_Context.Shutdown();
	return 0;
}
コード例 #13
0
ファイル: main.cpp プロジェクト: cvilas/scratch
bool DataCapture::captureOne()
{
    XnStatus rc = context_.WaitAndUpdateAll(); // want this to be WaitOneUpdateAll(RGB image)
    if( rc != XN_STATUS_OK )
    {
        std::cout << "WaitAndUpdateAll: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    // grab image
    imageGen_.GetMetaData(imageMd_);
    const XnRGB24Pixel* rgbData = imageMd_.RGB24Data();
    for( unsigned int i = 0; i < 640 * 480; ++i )
    {
        pRgbData_[3*i] = rgbData->nRed;
        pRgbData_[3*i + 1] = rgbData->nGreen;
        pRgbData_[3*i + 2] = rgbData->nBlue;
        ++rgbData;
    }

    // grab depth image
    depthGen_.GetMetaData(depthMd_);
    const uint16_t* pDepthDataU16 = depthMd_.Data();
    for( int i = 0; i < 640 * 480; ++i)
    {
        uint16_t d = pDepthDataU16[i];
        if( d != 0 )
        {
            pDepthData_[i] = (d * 255)/2048;
        }
        else
        {
            pDepthData_[i] = 0; // should be NAN
        }
    }
    return true;
}
コード例 #14
0
ファイル: user_tracker.cpp プロジェクト: TjlHope/Rutler
int main(int argc, char **argv)
{
    ros::init(argc, argv, "kinnect");
    ros::NodeHandle nh;

    string configFilename = ros::package::getPath("kinect") + "/config/openni.xml";
    XnStatus nRetVal = g_Context.InitFromXmlFile(configFilename.c_str());
    CHECK_RC(nRetVal, "InitFromXml");

	// init kinect user IDs to 0 to indicate they're invalid.
	for (int i = 0; i < NUSERS; i++)
	{
		kinectUsers[i].id = 0;
	}

    nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
    CHECK_RC(nRetVal, "Find depth generator");

	nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
	if (nRetVal != XN_STATUS_OK)
	{
		nRetVal = g_UserGenerator.Create(g_Context);
		CHECK_RC(nRetVal, "Find user generator");
	}

	if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
	{
		ROS_INFO("Supplied user generator doesn't support skeleton");
		return 1;
	}

    XnCallbackHandle hUserCallbacks;
	g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);

	XnCallbackHandle hCalibrationCallbacks;
	g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);

	if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
	{
		g_bNeedPose = TRUE;
		if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
		{
			ROS_INFO("Pose required, but not supported");
			return 1;
		}

		XnCallbackHandle hPoseCallbacks;
		g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);

		g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
	}

	g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

	nRetVal = g_Context.StartGeneratingAll();
	CHECK_RC(nRetVal, "StartGenerating");

	ros::Rate r(30);

	ros::NodeHandle pnh("~");
	string frame_id("openni_depth_frame");
	pnh.getParam("camera_frame_id", frame_id);
                
	ros::Publisher user_pub = nh.advertise<kinect::User>("kinect_users", 1000);

	while (ros::ok()) {
		g_Context.WaitAndUpdateAll();
		publishTransforms(frame_id, user_pub);
		r.sleep();
	}

	g_Context.Shutdown();
	return 0;
}
コード例 #15
0
int main(int argc, char **argv){
  ros::init(argc, argv, "pplTracker");
  ros::NodeHandle nh;

  std::string configFilename = ros::package::getPath("people_tracker_denbyk") + "/init/openni_tracker.xml";
  genericUserCalibrationFileName = ros::package::getPath("people_tracker_denbyk") + "/init/GenericUserCalibration.bin";

  ros::Rate loop_rate(1);

  //valore di ritorno Xn
  XnStatus nRetVal;

  //while (ros::ok())
  while (nh.ok())
  {
    //inizializzo contesto openni
    //ROS_INFO(configFilename.c_str(),xnGetStatusString(nRetVal));
    nRetVal = g_Context.InitFromXmlFile(configFilename.c_str());

    //TODO: remove
    nRetVal = XN_STATUS_OK;

    //riprovo tra un po'
    if(nRetVal != XN_STATUS_OK)
    {
      ROS_INFO("InitFromXml failed: %s Retrying in 3 seconds...", xnGetStatusString(nRetVal));
      ros::Duration(3).sleep();

    }
    else
    {
      break;
    }
  }


  //std::string frame_id;

  nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);


  if(nRetVal != XN_STATUS_OK)
  {
    ROS_ERROR("Find depth generator failed: %s", xnGetStatusString(nRetVal));
  }

  //cerca nodo ti tipo user generator e lo salva in g_UserGenerator
  nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);


  if (nRetVal != XN_STATUS_OK)
  {
    //crea lo userGenerator del g_context. SE non riesce probabilmente manca NITE
    nRetVal = g_UserGenerator.Create(g_Context);

    if (nRetVal != XN_STATUS_OK)
    {
      ROS_ERROR("NITE is likely missing: Please install NITE >= 1.5.2.21. Check the readme for download information. Error Info: User generator failed: %s", xnGetStatusString(nRetVal));
      return nRetVal;
    }
  }
  //veriica che lo userGenerator creato supporti SKELETON
  if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
  {
    ROS_INFO("Supplied user generator doesn't support skeleton");
    return EXIT_FAILURE;
  }

  //imposto la modalità dello skeleton, quali giunzioni rilevare e quali no.
  //in questo caso upper è il torso/la testa
  g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_UPPER);

  //setto varie callbacks per entrata, uscita e rientro user nello UserGenerator
  XnCallbackHandle hUserCallbacks;
  g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
  g_UserGenerator.RegisterToUserExit(User_OutOfScene, NULL, hUserCallbacks);
  g_UserGenerator.RegisterToUserReEnter(User_BackIntoScene, NULL, hUserCallbacks);

  //attivo la generazione dei vari generators
  nRetVal = g_Context.StartGeneratingAll();

  if(nRetVal != XN_STATUS_OK)
  {
    ROS_ERROR("StartGenerating failed: %s", xnGetStatusString(nRetVal));
  }

  //recupera un parametro passato al nodo dal server dei parametri ROS.
  //usando la chiave camera_frame_id e lo memorizza nella variabile frame_id
  std::string frame_id("camera_depth_frame");
  nh.getParam("camera_frame_id", frame_id);


  std::cout << "init ok\n";
  //ciclo principale.
  while(nh.ok())
  {
    //aggiorna il contesto e aspetta
    g_Context.WaitAndUpdateAll();
    //pubblica le trasformazioni su frame_id
    publishTransforms(frame_id);
    //dormi per un tot.
    ros::Rate(30).sleep();
  }

  //rilascia risorse e esci.
  g_Context.Shutdown();
  return EXIT_SUCCESS;
}
コード例 #16
0
void glut_display() {
	xn::DepthMetaData pDepthMapMD;
	xn::ImageMetaData pImageMapMD;
	XnUserID pUser[2];
	XnUInt16 nUsers=2;
#ifdef DEBUGOUT
	ofstream datei;
#endif

	glEnable(GL_TEXTURE_2D);

	pUser[0] = 0;
	pUser[1] = 0;

	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	glMatrixMode(GL_PROJECTION);
	glLoadIdentity();
	/*glFrustum(	-ROOM_X/2+(1500.0/1750.0)*static_cast<int>(headtrans.x),
				ROOM_X/2+(1500.0/1750.0)*static_cast<int>(headtrans.x),
				-ROOM_Y/2-(1500.0/1750.0)*static_cast<int>(headtrans.y),
				ROOM_Y/2-(1500.0/1750.0)*static_cast<int>(headtrans.y),
				1525,
				2525);*/

	float nearplane = 1525;
	float screenaspect = ROOM_X/ROOM_Y;
	glFrustum(	nearplane*(-0.5 * screenaspect + headtrans.x)/headtrans.z,
				nearplane*( 0.5 * screenaspect + headtrans.x)/headtrans.z,
				nearplane*(-0.5 + headtrans.y)/headtrans.z,
				nearplane*( 0.5 + headtrans.y)/headtrans.z,
				nearplane,
				2525);


	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();

	nRetVal = context.WaitAndUpdateAll();
	checkError("Fehler beim Aktualisieren der Daten", nRetVal);

	/* Anzahl der User auslesen und in Objekten speichern */
	user.GetUsers(pUser, nUsers);
	if(pUser[0]!=0 && pUserOld!=1) {
		cout << "User 1 erkannt" << endl;
		pUserOld=1;
	}

	xn::SkeletonCapability pSkeleton = user.GetSkeletonCap();
	XnCallbackHandle hnd;
	pSkeleton.RegisterCalibrationCallbacks(skel_cal_start, skel_cal_end, 0,hnd);
	
	if(calibration){

	}

	pSkeleton.SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
	if(pSkeleton.IsCalibrated(pUser[0])) {
		XnSkeletonJointTransformation head;
		pSkeleton.StartTracking(pUser[0]);
		pSkeleton.GetSkeletonJoint(pUser[0], XN_SKEL_HEAD, head);

		if(head.position.fConfidence && head.orientation.fConfidence) {
/*			headtrans.rotmat[0] = head.orientation.orientation.elements[0];
			headtrans.rotmat[1] = head.orientation.orientation.elements[1];
			headtrans.rotmat[2] = head.orientation.orientation.elements[2];
			headtrans.rotmat[3] = 0;
			
			headtrans.rotmat[4] = head.orientation.orientation.elements[3];
			headtrans.rotmat[5] = head.orientation.orientation.elements[4];
			headtrans.rotmat[6] = head.orientation.orientation.elements[5];
			headtrans.rotmat[7] = 0;
			
			headtrans.rotmat[8] = -head.orientation.orientation.elements[6];
			headtrans.rotmat[9] = -head.orientation.orientation.elements[7];
			headtrans.rotmat[10] =-head.orientation.orientation.elements[8];
			headtrans.rotmat[11] = 0;

			headtrans.rotmat[12] = 0;
			headtrans.rotmat[13] = 0;
			headtrans.rotmat[14] = 0;
			headtrans.rotmat[15] = 1;*/

			headtrans.x = head.position.position.X;
			headtrans.y = head.position.position.Y;
			headtrans.z = head.position.position.Z;
			
			 
#ifdef CONSOLEOUT
			clearScreen();
			cout	<< "Confidence Position: " << head.position.fConfidence
					<< " X: " << head.position.position.X
					<< " Y: " << head.position.position.Y
					<< " Z: " << head.position.position.Z << endl

					<< "Confidence Rotation: " << head.orientation.fConfidence << endl
					<< "\t" << headtrans.rotmat[0]
					<< "\t" << headtrans.rotmat[4]
					<< "\t" << headtrans.rotmat[8] << endl
					<< "\t" << headtrans.rotmat[1]
					<< "\t" << headtrans.rotmat[5]
					<< "\t" << headtrans.rotmat[9] << endl
					<< "\t" << headtrans.rotmat[2]
					<< "\t" << headtrans.rotmat[6]
					<< "\t" << headtrans.rotmat[10] << endl << endl;
#endif
		}
	}

	//------------------------------------------------------------------------------------------
	//BEGIN: Kamera-Test
	//------------------------------------------------------------------------------------------
	//glTranslatef(0,0,-2000);
	glTranslatef(headtrans.x,headtrans.y,1-headtrans.z);
	cout << headtrans.x << " " << headtrans.y << " " << headtrans.z << endl;
	cout	<< nearplane*(-0.5 * screenaspect + headtrans.x)/headtrans.z	<< " "	
			<< nearplane*( 0.5 * screenaspect + headtrans.x)/headtrans.z	<< " "	
			<< nearplane*(-0.5 + headtrans.y)/headtrans.z					<< " "	
			<< nearplane*( 0.5 + headtrans.y)/headtrans.z					<< " "	<< endl;

	
		
		
	draw_room();

	//glPushAttrib(GL_ALL_ATTRIB_BITS);
	//glutSolidCube(5);
	//glPopAttrib();
	//------------------------------------------------------------------------------------------
	
	glDisable(GL_TEXTURE_2D);
	glutSwapBuffers();
#ifdef DEBUGOUT
	if(printFile) {
		printFile=false;
		datei.close();
	}
#endif
}
コード例 #17
0
int main(int argc, char **argv)
{
ros::init(argc, argv, "openni_tracker");
ros::NodeHandle nh, nh_private("~");
ROS_INFO_STREAM("Initialising OpenNI tracker ...");

default_user =  0;
available_tracked_users_pub = nh_private.advertise<std_msgs::UInt16MultiArray>("available_users", 10, true);
default_user_pub = nh_private.advertise<std_msgs::UInt16>("tracked_user", 10, true);
user_chooser_sub = nh_private.subscribe("user_chooser", 10, userChooserCallback);

string configFilename = ros::package::getPath("openni_tracker") + "/openni_tracker.xml";
ROS_INFO_STREAM("Setting up configuration from XML file '" << configFilename << "'");
XnStatus nRetVal = g_Context.InitFromXmlFile(configFilename.c_str());
CHECK_RC(nRetVal, "InitFromXml");

ROS_INFO_STREAM("Looking for existing depth generators ...");
nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
CHECK_RC(nRetVal, "Find depth generator");
ROS_INFO_STREAM("nRetVal: " << xnGetStatusString(nRetVal));

ROS_INFO_STREAM("Looking for existing user generators ...");
nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
ROS_INFO_STREAM("nRetVal: " << xnGetStatusString(nRetVal));

if (nRetVal != XN_STATUS_OK)
{
  nRetVal = g_UserGenerator.Create(g_Context);
  CHECK_RC(nRetVal, "Find user generator");
  ROS_INFO_STREAM("No existing user generators found. Created new one.");
  ROS_INFO_STREAM("nRetVal: " << xnGetStatusString(nRetVal));
}

if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
{
  ROS_INFO_STREAM("Supplied user generator doesn't support skeleton");
  return 1;
}

ROS_INFO_STREAM("Registering user callbacks ...");
XnCallbackHandle hUserCallbacks;
g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);

ROS_INFO_STREAM("Registering calibration callbacks ...");
XnCallbackHandle hCalibrationCallbacks;
g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart,
                                                              UserCalibration_CalibrationEnd, NULL,
                                                              hCalibrationCallbacks);

ROS_INFO_STREAM("Checking pose detection capability ...");
if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
{
  g_bNeedPose = TRUE;
  if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
  {
    ROS_INFO_STREAM("Pose required, but not supported");
    return 1;
  }

  ROS_INFO_STREAM("Registering pose callbacks ...");
  XnCallbackHandle hPoseCallbacks;
  g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);

  ROS_INFO_STREAM("Getting calibration pose ...");
  g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
}

ROS_INFO_STREAM("Setting skeleton profile ...");
g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

//g_Context.Release();

ROS_INFO_STREAM("Starting to generate everything ...");
nRetVal = g_Context.StartGeneratingAll();
CHECK_RC(nRetVal, "StartGenerating");
ROS_INFO_STREAM("nRetVal: " << xnGetStatusString(nRetVal));

ROS_INFO_STREAM("Stopping to generate everything ...");
nRetVal = g_Context.StopGeneratingAll();
ROS_INFO_STREAM("nRetVal: " << xnGetStatusString(nRetVal));

ROS_INFO_STREAM("Starting to generate everything ...");
nRetVal = g_Context.StartGeneratingAll();
ROS_INFO_STREAM("nRetVal: " << xnGetStatusString(nRetVal));

ROS_INFO_STREAM("Setting up ROS node ...");
ros::Rate r(30);
ros::NodeHandle pnh("~");
string frame_id("openni_depth_frame");
pnh.getParam("camera_frame_id", frame_id);

nRetVal = g_Context.GetGlobalErrorState();
ROS_INFO_STREAM("nRetVal: " << xnGetStatusString(nRetVal));

ROS_INFO_STREAM("And go!");

while (ros::ok())
{
  ros::spinOnce();
  nRetVal = g_Context.WaitAndUpdateAll();
  CHECK_RC(nRetVal, "WaitAndUpdateAll");
  publishTransforms(frame_id);
  r.sleep();
}

g_Context.StopGeneratingAll();
g_Context.Release();
g_Context.Shutdown();
return 0;
}
コード例 #18
0
ファイル: main.cpp プロジェクト: sravanthib/openni_annotator
// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
	g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
//	g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	xn::ImageMetaData imageMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
	#ifdef USE_GLUT
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

		// Process the data
		//DRAW
		// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
		g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
	//	g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

		g_DepthGenerator.GetMetaData(depthMD);
		g_ImageGenerator.GetMetaData(imageMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);


		DrawDepthMap(depthMD, imageMD, sceneMD, g_nPlayer);

		if (g_nPlayer != 0)
		{
			XnPoint3D com;
			g_UserGenerator.GetCoM(g_nPlayer, com);
			if (com.Z == 0)
			{
				g_nPlayer = 0;
				FindPlayer();
			}
		}

	#ifdef USE_GLUT
	glutSwapBuffers();
	#endif
}
コード例 #19
0
void glut_display() {
	xn::DepthMetaData pDepthMapMD;
	xn::ImageMetaData pImageMapMD;
#ifdef DEBUGOUT
	ofstream datei;
#endif

	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();
	gluPerspective(45, WINDOW_SIZE_X/WINDOW_SIZE_Y, 1000, 5000);
//	glOrtho(0, WINDOW_SIZE_X, WINDOW_SIZE_Y, 0, -128, 128);

	glMatrixMode(GL_TEXTURE);
	glLoadIdentity();
//	glTranslatef(-12.8/640.0, 9.0/480.0, 0);
//	glTranslatef(-12.8/630.0, 9.0/480.0,0);
	glScalef(scalex, scaley, 1.0);
	glTranslatef(transx/630, transy/480, 0.0);

	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
	
	rot_angle+=0.7;

	// Warten auf neue Daten vom Tiefengenerator
	nRetVal = context.WaitAndUpdateAll();
	checkError("Fehler beim Aktualisieren der Daten", nRetVal);

	// Aktuelle Depth Metadaten auslesen
	depth.GetMetaData(pDepthMapMD);
	// Aktuelle Depthmap auslesen
	const XnDepthPixel* pDepthMap = depth.GetDepthMap();

	if(maxdepth==-1)
		maxdepth = getMaxDepth(pDepthMap);

	// Aktuelle Image Metadaten auslesen 
	image.GetMetaData(pImageMapMD);
	//Aktuelles Bild auslesen
	const XnRGB24Pixel* pImageMap = image.GetRGB24ImageMap();

	glColor3f(1, 1, 1);
//	XnDepthPixel maxdepth = depth.GetDeviceMaxDepth();
	const unsigned int xres = pDepthMapMD.XRes();
	const unsigned int yres = pDepthMapMD.YRes();

#ifdef DEBUGOUT
	datei.open("daniel.txt", ios::out);
#endif

	for(unsigned int y=0; y<yres-1; y++) {
		for(unsigned int x=0; x<xres; x++) {
			aDepthMap[x+y*xres] = static_cast<GLubyte>(static_cast<float>(pDepthMap[x+y*xres])/static_cast<float>(maxdepth)*255);
		}
	}

	/*
	glEnable(GL_TEXTURE_2D);
	glPushMatrix();
	glLoadIdentity();
	glTranslatef(-800, 0, -2000);
	glBindTexture(GL_TEXTURE_2D, texture_rgb);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, pImageMap);
	glBegin(GL_QUADS);
		glTexCoord2f(0,1); glVertex3f(0,0,0);
		glTexCoord2f(1,1); glVertex3f(640,0,0);
		glTexCoord2f(1,0); glVertex3f(640,480,0);
		glTexCoord2f(0,0); glVertex3f(0,480,0);
	glEnd();
	glPopMatrix();

	glPushMatrix();
	glLoadIdentity();
	glTranslatef(-800, 600, -2000);
	glBindTexture(GL_TEXTURE_2D, texture_depth);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE8, 640, 480, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, aDepthMap);
	glBegin(GL_QUADS);
		glTexCoord2f(0,1); glVertex3f(0,0,0);
		glTexCoord2f(1,1); glVertex3f(640,0,0);
		glTexCoord2f(1,0); glVertex3f(640,480,0);
		glTexCoord2f(0,0); glVertex3f(0,480,0);
	glEnd();
	glPopMatrix();*/

	glPushMatrix();
	glLoadIdentity();
	glTranslatef(-100, -100, -2000);
	glRotatef(cx,0,1,0);
	glRotatef(cy,1,0,0);
	glTranslatef(-320, -240, 1000);
	glEnable(GL_TEXTURE_2D);
	glBindTexture(GL_TEXTURE_2D, texture_rgb);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, pImageMap);
	glBegin(GL_POINTS);
	for(unsigned int y=0; y<yres-1; y++) {
		for(unsigned int x=0; x<630; x++) {
			if(pDepthMap[x+y*xres]!=0) {
				glTexCoord2f(static_cast<float>(x)/static_cast<float>(630), static_cast<float>(y)/static_cast<float>(480)); 
				glVertex3f(x, (yres-y), -pDepthMap[x+y*xres]/2.00);
#ifdef DEBUGOUT
				datei << t_gamma[pDepthMap[x+y*xres]] << endl;
#endif
			}
		}
	}
	glEnd();
	glPopMatrix();
	glDisable(GL_TEXTURE_2D);
	glutSwapBuffers();
#ifdef DEBUGOUT
	datei.close();
	exit(-1);
#endif
}
コード例 #20
0
ファイル: KinectTouch.cpp プロジェクト: ZiyaoWei/KinectTouch
int main() {

    const unsigned int nBackgroundTrain = 30;
    const unsigned short touchDepthMin = 10;
    const unsigned short touchDepthMax = 20;
    const unsigned int touchMinArea = 50;

    const bool localClientMode = true; // connect to a local client

    const double debugFrameMaxDepth = 4000; // maximal distance (in millimeters) for 8 bit debug depth frame quantization
    const char* windowName = "Debug";
    const char* colorWindowName = "image";
    const Scalar debugColor0(0, 0, 128);
    const Scalar debugColor1(255, 0, 0);
    const Scalar debugColor2(255, 255, 255);
    const Scalar debugColor3(0, 255, 255);
    const Scalar debugColor4(255, 0, 255);

    int xMin = 50;
    int xMax = 550;
    int yMin = 50;
    int yMax = 300;

    Mat1s depth(480, 640); // 16 bit depth (in millimeters)
    Mat1b depth8(480, 640); // 8 bit depth
    Mat3b rgb(480, 640); // 8 bit depth

    Mat3b debug(480, 640); // debug visualization

    Mat1s foreground(640, 480);
    Mat1b foreground8(640, 480);

    Mat1b touch(640, 480); // touch mask

    Mat1s background(480, 640);
    vector<Mat1s> buffer(nBackgroundTrain);

    IplImage * image = cvCreateImage(cvSize(640, 480), 8, 3);
    IplImage * convertedImage = cvCreateImage(cvSize(640, 480), 8, 3);

    initOpenNI("niConfig.xml");

    // TUIO server object
    TuioServer* tuio;
    if (localClientMode) {
        tuio = new TuioServer();
    } else {
        tuio = new TuioServer("192.168.0.2", 3333, false);
    }
    TuioTime time;

    namedWindow(colorWindowName);
    createTrackbar("xMin", colorWindowName, &xMin, 640);
    createTrackbar("xMax", colorWindowName, &xMax, 640);
    createTrackbar("yMin", colorWindowName, &yMin, 480);
    createTrackbar("yMax", colorWindowName, &yMax, 480);
    // create some sliders
    namedWindow(windowName);
    createTrackbar("xMin", windowName, &xMin, 640);
    createTrackbar("xMax", windowName, &xMax, 640);
    createTrackbar("yMin", windowName, &yMin, 480);
    createTrackbar("yMax", windowName, &yMax, 480);

    Keyboard * piano = new Keyboard();
    (*piano).initKeyMap();

    system("qjackctl &");
    sleep(4);
    JackByTheNotes * notesJack = new JackByTheNotes();
    notesJack->connect();
    sleep(2);
    system("sudo jack_connect Piano:Rubinstein system:playback_1 &");

    map<double, timeval> keys;

    // create background model (average depth)
    for (unsigned int i = 0; i < nBackgroundTrain; i++) {
        xnContext.WaitAndUpdateAll();
        depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
        buffer[i] = depth;
    }
    average(buffer, background);

    while (waitKey(1) != 27) {
        // read available data
        xnContext.WaitAndUpdateAll();

        // update 16 bit depth matrix
        depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
        //xnImgeGenertor.GetGrayscale8ImageMap()

        XnRGB24Pixel* xnRgb =
            const_cast<XnRGB24Pixel*>(xnImgeGenertor.GetRGB24ImageMap());
        //		IplImage * image = cvCreateImage(cvSize(640, 480), 8, 3);
        //		IplImage * convertedImage = cvCreateImage(cvSize(640, 480), 8, 3);
        cvSetData		(image, xnRgb, 640 * 3);
        cvConvertImage(image, convertedImage, CV_CVTIMG_SWAP_RB);
        bool color = true;
        rgb = convertedImage;
        //		cvtColor(rgb,rgb,CV_RGB2BGR);
        // update rgb image
        //		rgb.data = (uchar*) xnImgeGenertor.GetRGB24ImageMap(); // segmentation fault here
        //		cvCvtColor(rgb, rgb, CV_BGR2RGB);

        // extract foreground by simple subtraction of very basic background model
        foreground = background - depth;

        // find touch mask by thresholding (points that are close to background = touch points)
        touch = (foreground > touchDepthMin) & (foreground < touchDepthMax);

        // extract ROI
        Rect roi(xMin, yMin, xMax - xMin, yMax - yMin);
        Mat touchRoi = touch(roi);

        // find touch points
        vector<vector<Point2i> > contours;
        vector<Point2f> touchPoints;
        findContours(touchRoi, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE,
                     Point2i(xMin, yMin));
        for (unsigned int i = 0; i < contours.size(); i++) {
            Mat contourMat(contours[i]);
            // find touch points by area thresholding
            if (contourArea(contourMat) > touchMinArea) {
                Scalar center = mean(contourMat);
                Point2i touchPoint(center[0], center[1]);
                touchPoints.push_back(touchPoint);
            }
        }

        // send TUIO cursors
        time = TuioTime::getSessionTime();
        tuio->initFrame(time);

        for (unsigned int i = 0; i < touchPoints.size(); i++) { // touch points
            float cursorX = (touchPoints[i].x - xMin) / (xMax - xMin);
            float cursorY = 1 - (touchPoints[i].y - yMin) / (yMax - yMin);
            TuioCursor* cursor = tuio->getClosestTuioCursor(cursorX, cursorY);
            // TODO improve tracking (don't move cursors away, that might be closer to another touch point)
            if (cursor == NULL || cursor->getTuioTime() == time) {
                tuio->addTuioCursor(cursorX, cursorY);
            } else {
                tuio->updateTuioCursor(cursor, cursorX, cursorY);
            }
        }

        tuio->stopUntouchedMovingCursors();
        tuio->removeUntouchedStoppedCursors();
        tuio->commitFrame();

        // draw debug frame
        depth.convertTo(depth8, CV_8U, 255 / debugFrameMaxDepth); // render depth to debug frame
        cvtColor(depth8, debug, CV_GRAY2BGR);
        debug.setTo(debugColor0, touch); // touch mask
        rectangle(debug, roi, debugColor1, 2); // surface boundaries
        if (color)
            rectangle(rgb, roi, debugColor1, 2); // surface boundaries

        // draw 10 white keys within the roi
        int stride = (xMax - xMin) / 10;
        for (int keys = 1; keys < 10; keys++) {
            Point lower(xMin + keys * stride, yMax);
            if (keys == 3 || keys == 7) {
                Point upper(xMin + keys * stride, yMin);
                line(debug, upper, lower, debugColor3, 2, 0);
                if (color)
                    line(rgb, upper, lower, debugColor3, 2, 0);
                continue;
            } else {
                Point upper(xMin + keys * stride, (yMin + yMax) / 2);
                line(debug, upper, lower, debugColor3, 2, 0);
                if (color)
                    line(rgb, upper, lower, debugColor3, 2, 0);
            }
            Point blkUpper(xMin + keys * stride - stride / 3, yMin);
            Point blkLower(xMin + keys * stride + stride / 3,
                           (yMin + yMax) / 2);
            rectangle(debug, blkUpper, blkLower, debugColor4, 2);
            if (color)
                rectangle(rgb, blkUpper, blkLower, debugColor4, 2);
        }

        for (unsigned int i = 0; i < touchPoints.size(); i++) { // touch points
            circle(debug, touchPoints[i], 5, debugColor2, CV_FILLED);
            if (color)
                circle(rgb, touchPoints[i], 5, debugColor2, CV_FILLED);
            double frequency = piano->keyFrequency(touchPoints[i].y - 50,
                                                   touchPoints[i].x - 50);

            cout << frequency << " " << touchPoints[i].y - 50 << " "
                 << touchPoints[i].x - 50 << endl;

            if (keys.find(frequency) == keys.end()) {
                Note * note = new Note(frequency, 2, 4000);
                notesJack->playNote(*note);
                timeval now;
                gettimeofday(&now, NULL);
                keys[frequency] = now;
            } else {
                timeval now;
                gettimeofday(&now, NULL);
                if ((now.tv_sec - keys[frequency].tv_sec) * 1000
                        + (now.tv_usec - keys[frequency].tv_usec) / 1000
                        > 1000) {
                    Note * note = new Note(frequency, 2, 4000);
                    notesJack->playNote(*note);
                    keys[frequency] = now;
                }
            }
        }
        // render debug frame (with sliders)
//		IplImage grayScale = debug;
//		cvFlip(&grayScale, NULL, 1);
//		Mat gray(&grayScale);
//		imshow(windowName, gray);
//
//		IplImage colorful = rgb;
//		cvFlip(&colorful, NULL, 1);
//		Mat real(&colorful);
//		imshow("image", real);
        imshow(windowName, debug);
        imshow("image", rgb);
        //		cvShowImage("image", image);
    }

    return 0;
}
コード例 #21
0
ファイル: kinect_joints.cpp プロジェクト: rqou/prlite-pc
int main(int argc, char **argv) {
	ros::init(argc, argv, "PersonTracker");
	ros::NodeHandle nh;
	ros::Rate loop_rate(10);
    ROS_INFO("Initalizing Arm Connection....");
    ros::Publisher leftArmPublisher = nh.advertise<prlite_kinematics::SphereCoordinate>("/armik/n0/position", 1000);
    ros::Publisher rightArmPublisher = nh.advertise<prlite_kinematics::SphereCoordinate>("/armik/n1/position", 1000);
	yourmom = nh.advertise<prlite_kinematics::some_status_thing>("kinect_hack_status", 1000);
    ros::Duration(2.0).sleep();
    prlite_kinematics::SphereCoordinate coord;
	prlite_kinematics::some_status_thing status;
    coord.radius = 35.0;
    coord.phi = 0.0;
    coord.theta = 0.0;
	status.lulz = 0;	//initialized/reset
	yourmom.publish(status);
    leftArmPublisher.publish(coord);
    rightArmPublisher.publish(coord);
	ROS_INFO("Initalizing Kinect + NITE....");
	XnStatus nRetVal = XN_STATUS_OK;
	xn::EnumerationErrors errors;
	nRetVal = g_Context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	
	if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (nRetVal);
	} else if (nRetVal != XN_STATUS_OK) {
		printf("Open failed: %s\n", xnGetStatusString(nRetVal));
		return (nRetVal);
	}

	nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
	CHECK_RC(nRetVal, "Find depth generator");
	nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
	if (nRetVal != XN_STATUS_OK)
	{
		nRetVal = g_UserGenerator.Create(g_Context);
		CHECK_RC(nRetVal, "Find user generator");
	}

	XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks;
	if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
	{
		printf("Supplied user generator doesn't support skeleton\n");
		return 1;
	}
	g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
	g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);

	if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
	{
		g_bNeedPose = TRUE;
		if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
		{
			printf("Pose required, but not supported\n");
			return 1;
		}
		g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);
		g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
	}

	g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

	nRetVal = g_Context.StartGeneratingAll();
	CHECK_RC(nRetVal, "StartGenerating");
    ROS_INFO("Ready To Go!\n");

	while (ros::ok()) {
		// Update OpenNI
		g_Context.WaitAndUpdateAll();
		xn::SceneMetaData sceneMD;
		xn::DepthMetaData depthMD;
		g_DepthGenerator.GetMetaData(depthMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);
		// Print positions
		XnUserID aUsers[15];
		XnUInt16 nUsers = 15;
		g_UserGenerator.GetUsers(aUsers, nUsers);
		for (int i = 0; i < nUsers; ++i) {
			if(g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i])) {
				// Read joint positions for person (No WRISTs)
				XnSkeletonJointPosition torsoPosition, lShoulderPosition, rShoulderPosition, neckPosition, headPosition, lElbowPosition, rElbowPosition;
                XnSkeletonJointPosition rWristPosition, lWristPosition, rHipPosition, lHipPosition, lKneePosition, rKneePosition;
                XnSkeletonJointPosition lFootPosition, rFootPosition;
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_TORSO, torsoPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_NECK, neckPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_HEAD, headPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_SHOULDER, lShoulderPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_SHOULDER, rShoulderPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_ELBOW, lElbowPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_ELBOW, rElbowPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_HAND, lWristPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_HAND, rWristPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_HIP, lHipPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_HIP, rHipPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_KNEE, lKneePosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_KNEE, rKneePosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_FOOT, lFootPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_FOOT, rFootPosition);

                // Read Joint Orientations
                XnSkeletonJointOrientation torsoOrientation, lShoulderOrientation, rShoulderOrientation, lHipOrientation, rHipOrientation;
                XnSkeletonJointOrientation lWristOrientation, rWristOrientation, lElbowOrientation, rElbowOrientation;
                XnSkeletonJointOrientation headOrientation, neckOrientation;
                XnSkeletonJointOrientation lKneeOrientation, rKneeOrientation, lFootOrientation, rFootOrientation;
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_TORSO, torsoOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_HEAD, headOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_NECK, neckOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_LEFT_SHOULDER, lShoulderOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_RIGHT_SHOULDER, rShoulderOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_LEFT_HIP, lHipOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_RIGHT_HIP, rHipOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_LEFT_ELBOW, lElbowOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_RIGHT_ELBOW, rElbowOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_LEFT_HAND, lWristOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_RIGHT_HAND, rWristOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_LEFT_KNEE, lKneeOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_RIGHT_KNEE, rKneeOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_LEFT_FOOT, lFootOrientation);
	            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_RIGHT_FOOT, rFootOrientation);

	            XnFloat* m = torsoOrientation.orientation.elements;
	            KDL::Rotation torsoO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = lShoulderOrientation.orientation.elements;
                KDL::Rotation lShouldO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = rShoulderOrientation.orientation.elements;
                KDL::Rotation rShouldO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = lHipOrientation.orientation.elements;
                KDL::Rotation lHipO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = rHipOrientation.orientation.elements;
                KDL::Rotation rHipO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = headOrientation.orientation.elements;
                KDL::Rotation headO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = neckOrientation.orientation.elements;
                KDL::Rotation neckO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = lElbowOrientation.orientation.elements;
                KDL::Rotation lElbowO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = rElbowOrientation.orientation.elements;
                KDL::Rotation rElbowO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = lWristOrientation.orientation.elements;
                KDL::Rotation lWristO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = rWristOrientation.orientation.elements;
                KDL::Rotation rWristO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = lKneeOrientation.orientation.elements;
                KDL::Rotation lKneeO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = rKneeOrientation.orientation.elements;
                KDL::Rotation rKneeO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = lFootOrientation.orientation.elements;
                KDL::Rotation lFootO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
                m = rFootOrientation.orientation.elements;
                KDL::Rotation rFootO(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);

	            double qx = 0.0, qy = 0.0, qz = 0.0, qw = 0.0;

				// Get Points in 3D space
				XnPoint3D torso = torsoPosition.position;
				XnPoint3D lShould = lShoulderPosition.position;
				XnPoint3D rShould = rShoulderPosition.position;
                XnPoint3D lElbow = lElbowPosition.position;
                XnPoint3D rElbow = rElbowPosition.position;
                XnPoint3D lWrist = lWristPosition.position;
                XnPoint3D rWrist = rWristPosition.position;
                XnPoint3D neck = neckPosition.position;
                XnPoint3D head = headPosition.position;
                XnPoint3D lHip = lHipPosition.position;
                XnPoint3D rHip = rHipPosition.position;
                XnPoint3D lKnee = lKneePosition.position;
                XnPoint3D rKnee = rKneePosition.position;
                XnPoint3D lFoot = lFootPosition.position;
                XnPoint3D rFoot = rFootPosition.position;

                // ---------- ARM CONTROLLER HACK ------------------

                // Calculate arm length of user
                double lenL = calc3DDist( lShould, lElbow ) + calc3DDist( lElbow, lWrist );
                double lenR = calc3DDist( rShould, rElbow ) + calc3DDist( rElbow, rWrist );
                double distL = calc3DDist(lShould, lWrist);
                double distR = calc3DDist(rShould, rWrist);

                // Calculate positions
                prlite_kinematics::SphereCoordinate lCoord;
                prlite_kinematics::SphereCoordinate rCoord;

                lCoord.radius = 35 * (distL / lenL);
                rCoord.radius = 35 * (distR / lenR);

                lCoord.theta = -atan2(lShould.X - lWrist.X, lShould.Z - lWrist.Z);
                rCoord.theta = -atan2(rShould.X - rWrist.X, rShould.Z - rWrist.Z);
                if(lCoord.theta > 1.0) lCoord.theta = 1.0;
                if(lCoord.theta < -1.0) lCoord.theta = -1.0;
                if(rCoord.theta > 1.0) rCoord.theta = 1.0;
                if(rCoord.theta < -1.0) rCoord.theta = -1.0;

                lCoord.phi = -atan2(lShould.Y - lWrist.Y, lShould.X - lWrist.X);
                rCoord.phi = -atan2(rShould.Y - rWrist.Y, rShould.X - rWrist.X);
                if(lCoord.phi > 1.25) lCoord.phi = 1.25;
                if(lCoord.phi < -0.33) lCoord.phi = -0.33;
                if(rCoord.phi > 1.2) rCoord.phi = 1.25;
                if(rCoord.phi < -0.33) rCoord.phi = -0.33;

                ROS_INFO("User %d: Left (%lf,%lf,%lf), Right (%lf,%lf,%lf)", i, lCoord.radius, lCoord.theta, lCoord.phi, rCoord.radius, rCoord.theta, rCoord.phi);

                // Publish to arms
                leftArmPublisher.publish(rCoord);
                rightArmPublisher.publish(lCoord);

                // ---------- END HACK -----------------

                // Publish Transform
                static tf::TransformBroadcaster br;
                tf::Transform transform;
                std::stringstream body_name, neck_name, lshould_name, rshould_name, head_name, relbow_name, lelbow_name, lwrist_name, rwrist_name;
                std::stringstream lhip_name, rhip_name, lknee_name, rknee_name, lfoot_name, rfoot_name;
                body_name << "Torso (" << i << ")" << std::ends;
                neck_name << "Neck (" << i << ")" << std::ends;
                head_name << "Head (" << i << ")" << std::ends;
                lshould_name << "Shoulder L (" << i << ")" << std::ends;
                rshould_name << "Shoulder R (" << i << ")" << std::ends;
                lelbow_name << "Elbow L (" << i << ")" << std::ends;
                relbow_name << "Elbow R (" << i << ")" << std::ends;
                lwrist_name << "Wrist L (" << i << ")" << std::ends;
                rwrist_name << "Wrist R (" << i << ")" << std::ends;
                lhip_name << "Hip L (" << i << ")" << std::ends;
                rhip_name << "Hip R (" << i << ")" << std::ends;
                lknee_name << "Knee L (" << i << ")" << std::ends;
                rknee_name << "Knee R (" << i << ")" << std::ends;
                lfoot_name << "Foot L (" << i << ")" << std::ends;
                rfoot_name << "Foot R (" << i << ")" << std::ends;

                // Publish Torso
                torsoO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(torso.X / ADJUST_VALUE, torso.Y / ADJUST_VALUE, torso.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", body_name.str().c_str()));  
                // Publish Left Shoulder
                lShouldO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(lShould.X / ADJUST_VALUE, lShould.Y / ADJUST_VALUE, lShould.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", lshould_name.str().c_str()));
                // Publish Right Shoulder
                rShouldO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(rShould.X / ADJUST_VALUE, rShould.Y / ADJUST_VALUE, rShould.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", rshould_name.str().c_str())); 
                // Publish Left Elbow
                lElbowO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(lElbow.X / ADJUST_VALUE, lElbow.Y / ADJUST_VALUE, lElbow.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", lelbow_name.str().c_str()));
                // Publish Right Elbow
                rElbowO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(rElbow.X / ADJUST_VALUE, rElbow.Y / ADJUST_VALUE, rElbow.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", relbow_name.str().c_str()));
                // Publish Left Wrist
                lWristO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(lWrist.X / ADJUST_VALUE, lWrist.Y / ADJUST_VALUE, lWrist.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", lwrist_name.str().c_str()));
                // Publish Right Wrist
                rWristO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(rWrist.X / ADJUST_VALUE, rWrist.Y / ADJUST_VALUE, rWrist.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", rwrist_name.str().c_str()));
                // Publish Neck
                neckO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(neck.X / ADJUST_VALUE, neck.Y / ADJUST_VALUE, neck.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", neck_name.str().c_str())); 
                // Publish Head
                headO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(head.X / ADJUST_VALUE, head.Y / ADJUST_VALUE, head.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", head_name.str().c_str()));
                // Publish Left Hip
                lHipO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(lHip.X / ADJUST_VALUE, lHip.Y / ADJUST_VALUE, lHip.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", lhip_name.str().c_str()));
                // Publish Right Hip
                rHipO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(rHip.X / ADJUST_VALUE, rHip.Y / ADJUST_VALUE, rHip.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", rhip_name.str().c_str())); 
                // Publish Left Knee
                lKneeO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(lKnee.X / ADJUST_VALUE, lKnee.Y / ADJUST_VALUE, lKnee.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", lknee_name.str().c_str()));
                // Publish Right Knee
                rKneeO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(rKnee.X / ADJUST_VALUE, rKnee.Y / ADJUST_VALUE, rKnee.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", rknee_name.str().c_str())); 
                // Publish Left Foot
                lFootO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(lFoot.X / ADJUST_VALUE, lFoot.Y / ADJUST_VALUE, lFoot.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", lfoot_name.str().c_str()));
                // Publish Right Foot
                rFootO.GetQuaternion(qx, qy, qz, qw);
                transform.setOrigin(tf::Vector3(rFoot.X / ADJUST_VALUE, rFoot.Y / ADJUST_VALUE, rFoot.Z / ADJUST_VALUE));
                transform.setRotation(tf::Quaternion(qx, qy, qz, qw));
                br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", rfoot_name.str().c_str()));
			}
		}
		ros::spinOnce();
		//loop_rate.sleep();
	}
	g_Context.Shutdown();
	return 0;
}
コード例 #22
0
ファイル: OpenNIVideo.cpp プロジェクト: Avatarchik/osgART
bool OpenNIVideo::update(osg::NodeVisitor* nv) {

	//this is the main function of your video plugin
	//you can either retrieve images from your video stream/camera/file
	//or communicate with a thread to synchronize and get the data out
	
	//the most important is to synchronize your data
	//and copy the result to the VideoImageSteam used in this plugin
	//

	//0. you can collect some stats, for that you can use a timer
	osg::Timer t;

	{

	//1. mutex lock access to the image video stream
	OpenThreads::ScopedLock<OpenThreads::Mutex> _lock(this->getMutex());

	osg::notify(osg::DEBUG_INFO)<<"osgART::OpenNIVideo::update() get new image.."<<std::endl;

	XnStatus nRetVal = XN_STATUS_OK;

	nRetVal=context.WaitAndUpdateAll();
	CHECK_RC(nRetVal, "Update Data");

	xnFPSMarkFrame(&xnFPS);

	depth_generator.GetMetaData(depthMD);
	const XnDepthPixel* pDepthMap = depthMD.Data();
	//depth pixel floating point depth map.
	
	image_generator.GetMetaData(imageMD);
	const XnUInt8* pImageMap = imageMD.Data();

	// Hybrid mode isn't supported in this sample
	if (imageMD.FullXRes() != depthMD.FullXRes() || imageMD.FullYRes() != depthMD.FullYRes())
	{
		std::cerr<<"The device depth and image resolution must be equal!"<<std::endl;
		exit(1);
	}

	// RGB is the only image format supported.
	if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		std::cerr<<"The device image format must be RGB24"<<std::endl;
		exit(1);
	}
	
	const XnDepthPixel* pDepth=pDepthMap;
	const XnUInt8* pImage=pImageMap;
	
	XnDepthPixel zMax = depthMD.ZRes();
    //convert float buffer to unsigned short
	for ( unsigned int i=0; i<(depthMD.XRes() * depthMD.YRes()); ++i )
    {
            *(_depthBufferByte + i) = 255 * (float(*(pDepth + i)) / float(zMax));
    }

	memcpy(_videoStreamList[0]->data(),pImage, _videoStreamList[0]->getImageSizeInBytes());
	
	memcpy(_videoStreamList[1]->data(),_depthBufferByte, _videoStreamList[1]->getImageSizeInBytes());

	//3. don't forget to call this to notify the rest of the application
	//that you have a new video image
	_videoStreamList[0]->dirty();
	_videoStreamList[1]->dirty();
	}

	//4. hopefully report some interesting data
	if (nv) {

		const osg::FrameStamp *framestamp = nv->getFrameStamp();

		if (framestamp && _stats.valid())
		{
			_stats->setAttribute(framestamp->getFrameNumber(),
				"Capture time taken", t.time_m());
		}
	}


	// Increase modified count every X ms to ensure tracker updates
	if (updateTimer.time_m() > 50) {
		_videoStreamList[0]->dirty();
		_videoStreamList[1]->dirty();
		updateTimer.setStartTick();
	}

	return true;
}
コード例 #23
0
int main(int argc, char **argv) {
    ros::init(argc, argv, "openni_tracker1");
    ros::NodeHandle nh;
/*
    string configFilename = ros::package::getPath("openni_tracker1") + "/openni_tracker1.xml";
    XnStatus nRetVal = g_Context.InitFromXmlFile(configFilename.c_str());
    CHECK_RC(nRetVal, "InitFromXml"); */


XnStatus nRetVal = g_Context.Init();
    CHECK_RC(nRetVal, "Init");
    nRetVal = g_Context.OpenFileRecording("/home/latifanjum/Downloads/OpenNI-master/Platform/Linux/Bin/x86-Release/Dataset/Wave2/wave25.oni", g_Player);
    g_Player.SetRepeat( false );
    if (nRetVal != XN_STATUS_OK)
    {
    printf("Can't open recording %s\n", xnGetStatusString(nRetVal));
    return 1;
    }


    nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
    CHECK_RC(nRetVal, "Find depth generator");

	nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
	if (nRetVal != XN_STATUS_OK) {
		nRetVal = g_UserGenerator.Create(g_Context);
		CHECK_RC(nRetVal, "Find user generator");
	}

	if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
		ROS_INFO("Supplied user generator doesn't support skeleton");
		return 1;
	}

    XnCallbackHandle hUserCallbacks;
	g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);

	XnCallbackHandle hCalibrationCallbacks;
	g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);

	if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration()) {
		g_bNeedPose = TRUE;
		if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) {
			ROS_INFO("Pose required, but not supported");
			return 1;
		}

		XnCallbackHandle hPoseCallbacks;
		g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);

		g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
	}

	g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

	nRetVal = g_Context.StartGeneratingAll();
	CHECK_RC(nRetVal, "StartGenerating");

	ros::Rate r(30);

        
        ros::NodeHandle pnh("~");
        string frame_id("openni_depth_frame");
        pnh.getParam("camera_frame_id", frame_id);
                
	while (ros::ok()) {
		g_Context.WaitAndUpdateAll();
		publishTransforms(frame_id);
		r.sleep();
	}

	g_Context.Shutdown();
	return 0;
}
コード例 #24
0
int main(int argc, char **argv) {
    ros::init(argc, argv, "tracker_2");
    ros::NodeHandle nh;

    string configFilename = ros::package::getPath("openni_tracker_multi") + "/openni_tracker.xml";
    XnStatus nRetVal = g_Context.InitFromXmlFile(configFilename.c_str());
    CHECK_RC(nRetVal, "InitFromXml");

//    XnStatus nRetVal = XN_STATUS_OK;

    //xnLogInitFromXmlFile(csXmlFile);

    nRetVal = g_Context.Init();
    XN_IS_STATUS_OK(nRetVal);


   // SELECTION OF THE DEVICE
    xn::EnumerationErrors errors;
    xn::Device g_Device;
        // find devices
    xn::NodeInfoList list;
    xn::NodeInfoList list_depth;
        nRetVal = g_Context.EnumerateProductionTrees(XN_NODE_TYPE_DEVICE, NULL, list, &errors);
    XN_IS_STATUS_OK(nRetVal);

    printf("The following devices were found:\n");
        int i = 1;
        for (xn::NodeInfoList::Iterator it = list.Begin(); it != list.End(); ++it, ++i)
        {
            xn::NodeInfo deviceNodeInfo = *it;

            xn::Device deviceNode;
            deviceNodeInfo.GetInstance(deviceNode);
            XnBool bExists = deviceNode.IsValid();
            if (!bExists)
            {
                g_Context.CreateProductionTree(deviceNodeInfo, deviceNode);
                // this might fail.
            }

            if (deviceNode.IsValid() && deviceNode.IsCapabilitySupported(XN_CAPABILITY_DEVICE_IDENTIFICATION))
            {
                const XnUInt32 nStringBufferSize = 200;
                XnChar strDeviceName[nStringBufferSize];
                XnChar strSerialNumber[nStringBufferSize];

                XnUInt32 nLength = nStringBufferSize;
                deviceNode.GetIdentificationCap().GetDeviceName(strDeviceName, nLength);
                nLength = nStringBufferSize;
                deviceNode.GetIdentificationCap().GetSerialNumber(strSerialNumber, nLength);
                printf("[%d] %s (%s)\n", i, strDeviceName, strSerialNumber);
            }
            else
            {
                printf("[%d] %s\n", i, deviceNodeInfo.GetCreationInfo());
            }

            // release the device if we created it
            if (!bExists && deviceNode.IsValid())
            {
                deviceNode.Release();
            }
        }
        printf("\n");
        printf("Choose device to open (1): ");

        int chosen=0;
        if (argc != 2)
        {
            printf("Choose device to open (1): ");
            int nRetval = scanf("%d", &chosen);
        }
        else
        {
        chosen = atoi(argv[1]);
        }

            // create it
            xn::NodeInfoList::Iterator it = list.Begin();
            for (i = 1; i < chosen; ++i)
            {
                it++;
            }

        xn::NodeInfo deviceNode = *it;
        nRetVal = g_Context.CreateProductionTree(deviceNode, g_Device);
        printf("Production tree of the device created.\n");

    // SELECTION OF THE DEPTH GENERATOR
        nRetVal = g_Context.EnumerateProductionTrees(XN_NODE_TYPE_DEPTH, NULL, list_depth, &errors);
        XN_IS_STATUS_OK(nRetVal);

        printf("The following devices were found:\n");
            int i_depth = 1;
            for (xn::NodeInfoList::Iterator it_depth = list_depth.Begin(); it_depth != list_depth.End(); ++it_depth, ++i_depth)
            {
                xn::NodeInfo depthNodeInfo = *it_depth;

                xn::Device depthNode;
                depthNodeInfo.GetInstance(depthNode);
                XnBool bExists_depth = depthNode.IsValid();
                if (!bExists_depth)
                {
                    g_Context.CreateProductionTree(depthNodeInfo, depthNode);
                    // this might fail.
                }

                if (depthNode.IsValid() && depthNode.IsCapabilitySupported(XN_CAPABILITY_DEVICE_IDENTIFICATION))
                {
                    const XnUInt32 nStringBufferSize = 200;
                    XnChar strDeviceName[nStringBufferSize];
                    XnChar strSerialNumber[nStringBufferSize];

                    XnUInt32 nLength = nStringBufferSize;
                    depthNode.GetIdentificationCap().GetDeviceName(strDeviceName, nLength);
                    nLength = nStringBufferSize;
                    depthNode.GetIdentificationCap().GetSerialNumber(strSerialNumber, nLength);
                    printf("[%d] %s (%s)\n", i, strDeviceName, strSerialNumber);
                }
                else
                {
                    printf("[%d] %s\n", i, depthNodeInfo.GetCreationInfo());
                }

                // release the device if we created it
                if (!bExists_depth && depthNode.IsValid())
                {
                    depthNode.Release();
                }
            }
            printf("\n");
        printf("Choose device to open (1): ");

        int chosen_depth = 1;
        /*if (argc != 2)
        {
            printf("Choose device to open (1): ");
            int nRetval_depth = scanf("%d", &chosen_depth);
        }
        else
        {
        chosen_depth = atoi(argv[1]);
        }*/


            // create it
            xn::NodeInfoList::Iterator it_depth = list_depth.Begin();
            for (i = 1; i < chosen_depth; ++i)
            {
                it_depth++;
            }

        xn::NodeInfo depthNode = *it_depth;
        nRetVal = g_Context.CreateProductionTree(depthNode, g_DepthGenerator);
        printf("Production tree of the DepthGenerator created.\n");

        nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
        printf("Production tree of the depth generator created.\n");
        XN_IS_STATUS_OK(nRetVal);
        printf("XN_IS_STATUS_OK(nRetVal).\n");



    CHECK_RC(nRetVal, "Find depth generator");
     printf("CHECK_RC(nRetVal, Find depth generator);\n");

    nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
    printf("User generator found.\n");
    if (nRetVal != XN_STATUS_OK) {
        nRetVal = g_UserGenerator.Create(g_Context);
        printf("User generator created.\n");
        CHECK_RC(nRetVal, "Find user generator");
    }

    if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
        printf("Supplied user generator doesn't support skeleton.\n");
        ROS_INFO("Supplied user generator doesn't support skeleton");
        return 1;
    }

    XnCallbackHandle hUserCallbacks;
    g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);

    XnCallbackHandle hCalibrationCallbacks;
    g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);

    if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration()) {
        g_bNeedPose = TRUE;
        if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) {
            ROS_INFO("Pose required, but not supported");
            return 1;
        }

        XnCallbackHandle hPoseCallbacks;
        g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);

        g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
    }

    g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

    nRetVal = g_Context.StartGeneratingAll();
    CHECK_RC(nRetVal, "StartGenerating");

    ros::Rate r(30);


        ros::NodeHandle pnh("~");
        string frame_id("/kinect2_depth_frame");
        pnh.getParam("camera_frame_id", frame_id);

    while (ros::ok()) {
        g_Context.WaitAndUpdateAll();
        publishTransforms(frame_id);
        r.sleep();
    }

    g_Context.Shutdown();
    return 0;
}
コード例 #25
0
ファイル: ContextWrapper.cpp プロジェクト: CaoJohn/PyOpenNI
void Context_WaitAndUpdateAll_wrapped(xn::Context& self) {
    check( self.WaitAndUpdateAll() );
}
コード例 #26
0
void updateKinnect() {
	xnContext.WaitAndUpdateAll();
}
コード例 #27
0
ファイル: KinectTouch.cpp プロジェクト: comoc/KinectTouch
int main() {

	const unsigned int nBackgroundTrain = 30;
	const unsigned short touchDepthMin = 10;
	const unsigned short touchDepthMax = 20;
	const unsigned int touchMinArea = 50;

	const bool localClientMode = true; 					// connect to a local client

	const double debugFrameMaxDepth = 4000; // maximal distance (in millimeters) for 8 bit debug depth frame quantization
	const char* windowName = "Debug";
	const Scalar debugColor0(0,0,128);
	const Scalar debugColor1(255,0,0);
	const Scalar debugColor2(255,255,255);

	int xMin = 110;
	int xMax = 560;
	int yMin = 120;
	int yMax = 320;

	Mat1s depth(480, 640); // 16 bit depth (in millimeters)
	Mat1b depth8(480, 640); // 8 bit depth
	Mat3b rgb(480, 640); // 8 bit depth

	Mat3b debug(480, 640); // debug visualization

	Mat1s foreground(640, 480);
	Mat1b foreground8(640, 480);

	Mat1b touch(640, 480); // touch mask

	Mat1s background(480, 640);
	vector<Mat1s> buffer(nBackgroundTrain);

	CHECK_RC(initOpenNI("niConfig.xml"), "initOpenNI");

	// TUIO server object
	TuioServer* tuio;
	if (localClientMode) {
		tuio = new TuioServer();
	} else {
		tuio = new TuioServer("192.168.0.1",3333,false);
	}
	TuioTime time;

	// create some sliders
	namedWindow(windowName);
	createTrackbar("xMin", windowName, &xMin, 640);
	createTrackbar("xMax", windowName, &xMax, 640);
	createTrackbar("yMin", windowName, &yMin, 480);
	createTrackbar("yMax", windowName, &yMax, 480);

	// create background model (average depth)
	for (unsigned int i=0; i<nBackgroundTrain; i++) {
		CHECK_RC(xnContext.WaitAndUpdateAll(), "xnContext.WaitAndUpdateAll()");
		depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
		buffer[i] = depth;
	}
	average(buffer, background);

	while ( waitKey(1) != 27 ) {
		// read available data
		xnContext.WaitAndUpdateAll();

		// update 16 bit depth matrix
		depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
		//xnImgeGenertor.GetGrayscale8ImageMap()



		// update rgb image
		//rgb.data = (uchar*) xnImgeGenertor.GetRGB24ImageMap(); // segmentation fault here
		//cvtColor(rgb, rgb, CV_RGB2BGR);

		// extract foreground by simple subtraction of very basic background model
		foreground = background - depth;

		// find touch mask by thresholding (points that are close to background = touch points)
		touch = (foreground > touchDepthMin) & (foreground < touchDepthMax);

		// extract ROI
		Rect roi(xMin, yMin, xMax - xMin, yMax - yMin);
		Mat touchRoi = touch(roi);

		// find touch points
		vector< vector<Point2i> > contours;
		vector<Point2f> touchPoints;
		findContours(touchRoi, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point2i(xMin, yMin));
		for (unsigned int i=0; i<contours.size(); i++) {
			Mat contourMat(contours[i]);
			// find touch points by area thresholding
			if ( contourArea(contourMat) > touchMinArea ) {
				Scalar center = mean(contourMat);
				Point2i touchPoint(center[0], center[1]);
				touchPoints.push_back(touchPoint);
			}
		}

		// send TUIO cursors
		time = TuioTime::getSessionTime();
		tuio->initFrame(time);

		for (unsigned int i=0; i<touchPoints.size(); i++) { // touch points
			float cursorX = (touchPoints[i].x - xMin) / (xMax - xMin);
			float cursorY = 1 - (touchPoints[i].y - yMin)/(yMax - yMin);
			TuioCursor* cursor = tuio->getClosestTuioCursor(cursorX,cursorY);
			// TODO improve tracking (don't move cursors away, that might be closer to another touch point)
			if (cursor == NULL || cursor->getTuioTime() == time) {
				tuio->addTuioCursor(cursorX,cursorY);
			} else {
				tuio->updateTuioCursor(cursor, cursorX, cursorY);
			}
		}

		tuio->stopUntouchedMovingCursors();
		tuio->removeUntouchedStoppedCursors();
		tuio->commitFrame();

		// draw debug frame
		depth.convertTo(depth8, CV_8U, 255 / debugFrameMaxDepth); // render depth to debug frame
		cvtColor(depth8, debug, CV_GRAY2BGR);
		debug.setTo(debugColor0, touch);  // touch mask
		rectangle(debug, roi, debugColor1, 2); // surface boundaries
		for (unsigned int i=0; i<touchPoints.size(); i++) { // touch points
			circle(debug, touchPoints[i], 5, debugColor2, CV_FILLED);
		}

		// render debug frame (with sliders)
		imshow(windowName, debug);
		//imshow("image", rgb);
	}

	return 0;
}
コード例 #28
0
ファイル: main.cpp プロジェクト: kio327/PKgame
// this function is called each frame
void glutDisplay (void)
{

	if(gScene == NULL) return;
	
	gScene->simulate(1.0f/30.0f);
	glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);

	// 射影マトリックス
	glMatrixMode(GL_PROJECTION);
	glLoadIdentity();
	gluPerspective(100.0f, (float)glutGet(GLUT_WINDOW_WIDTH)/(float)glutGet(GLUT_WINDOW_HEIGHT), 1.0f, 10000.0f); // 視点の位置

	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
	gluLookAt(gEye.x, gEye.y, gEye.z, gEye.x + gDir.x, gEye.y + gDir.y, gEye.z + gDir.z, 0.0f, 1.0f, 0.0f);
	
	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

	for(int i=0;i<2;i++){
		for(int j=0;j<15;j++){
			oldjpoint[i][j]=jpoint[i][j];
		}
	}
	// Process the data
	g_DepthGenerator.GetMetaData(depthMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);
	CalculateJoint();

	head.x=(jpoint[0][0].X);
	head.y=(jpoint[0][0].Y);
	head.z=(jpoint[0][0].Z);

	neck.x=(jpoint[0][1].X);
	neck.y=(jpoint[0][1].Y);
	neck.z=(jpoint[0][1].Z);
	
	rshoulder.x=(jpoint[0][2].X);
	rshoulder.y=(jpoint[0][2].Y);
	rshoulder.z=(jpoint[0][2].Z);

	relbow.x=(jpoint[0][3].X*2+oldjpoint[0][3].X)/3;
	relbow.y=(jpoint[0][3].Y*2+oldjpoint[0][3].Y)/3;
	relbow.z=(jpoint[0][3].Z*2+oldjpoint[0][3].Z)/3;

	rhand.x=(jpoint[0][4].X*2+oldjpoint[0][4].X)/3;
	rhand.y=(jpoint[0][4].Y*2+oldjpoint[0][4].Y)/3;
	rhand.z=(jpoint[0][4].Z*2+oldjpoint[0][4].Z)/3;

	lshoulder.x=(jpoint[0][5].X*2+oldjpoint[0][5].X)/3;
	lshoulder.y=(jpoint[0][5].Y*2+oldjpoint[0][5].Y)/3;
	lshoulder.z=(jpoint[0][5].Z*2+oldjpoint[0][5].Z)/3;

	lelbow.x=(jpoint[0][6].X*2+oldjpoint[0][6].X)/3;
	lelbow.y=(jpoint[0][6].Y*2+oldjpoint[0][6].Y)/3;
	lelbow.z=(jpoint[0][6].Z*2+oldjpoint[0][6].Z)/3;

	lhand.x=(jpoint[0][7].X*2+oldjpoint[0][7].X)/3;
	lhand.y=(jpoint[0][7].Y*2+oldjpoint[0][7].Y)/3;
	lhand.z=(jpoint[0][7].Z*2+oldjpoint[0][7].Z)/3;

	torso.x=(jpoint[0][8].X*2+oldjpoint[0][8].X)/3;
	torso.y=(jpoint[0][8].Y*2+oldjpoint[0][8].Y)/3;
	torso.z=(jpoint[0][8].Z*2+oldjpoint[0][8].Z)/3;

	rhip.x=(jpoint[0][9].X*2+oldjpoint[0][9].X)/3;
	rhip.y=(jpoint[0][9].Y*2+oldjpoint[0][9].Y)/3;
	rhip.z=(jpoint[0][9].Z*2+oldjpoint[0][9].Z)/3;

	rknee.x=(jpoint[0][10].X*2+oldjpoint[0][10].X)/3;
	rknee.y=(jpoint[0][10].Y*2+oldjpoint[0][10].Y)/3;
	rknee.z=(jpoint[0][10].Z*2+oldjpoint[0][10].Z)/3;

	rfoot.x=(jpoint[0][11].X*2+oldjpoint[0][11].X)/3;
	rfoot.y=(jpoint[0][11].Y*2+oldjpoint[0][11].Y)/3;
	rfoot.z=(jpoint[0][11].Z*2+oldjpoint[0][11].Z)/3;

	lhip.x=(jpoint[0][12].X*2+oldjpoint[0][12].X)/3;
	lhip.y=(jpoint[0][12].Y*2+oldjpoint[0][12].Y)/3;
	lhip.z=(jpoint[0][12].Z*2+oldjpoint[0][12].Z)/3;

	lknee.x=(jpoint[0][13].X*2+oldjpoint[0][13].X)/3;
	lknee.y=(jpoint[0][13].Y*2+oldjpoint[0][13].Y)/3;
	lknee.z=(jpoint[0][13].Z*2+oldjpoint[0][13].Z)/3;

	lfoot.x=(jpoint[0][14].X*2+oldjpoint[0][14].X)/3;
	lfoot.y=(jpoint[0][14].Y*2+oldjpoint[0][14].Y)/3;
	lfoot.z=(jpoint[0][14].Z*2+oldjpoint[0][14].Z)/3;

	printf("%f, %f, %f\n",rightreduction.x, rightreduction.y, rightreduction.z);
	printf("%f, %f, %f\n",leftreduction.x, leftreduction.y, leftreduction.z);

	if(jpoint[0][8].X!=0.0&&jpoint[0][8].Y!=0.0&&jpoint[0][8].Z!=0.0){

		Head->setGlobalPosition(NxVec3(-head.x+positionx, -head.y+positiony, -head.z+positionz));
		Head->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));

		Neck->setGlobalPosition(NxVec3(-neck.x+positionx, -neck.y+positiony, -neck.z+positionz));
		Neck->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));

		rightshoulder->setGlobalPosition(NxVec3(-rshoulder.x+positionx, -rshoulder.y+positiony, -rshoulder.z+positionz));
		rightshoulder->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));
		rightelbow->setGlobalPosition(NxVec3(-relbow.x+positionx, -relbow.y+positiony, -relbow.z+positionz));
		rightelbow->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));
		righthand->setGlobalPosition(NxVec3(-rhand.x+positionx, -rhand.y+positiony, -rhand.z+positionz));
		righthand->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));

		leftshoulder->setGlobalPosition(NxVec3(-lshoulder.x+positionx, -lshoulder.y+positiony, -lshoulder.z+positionz));
		leftshoulder->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));
		leftelbow->setGlobalPosition(NxVec3(-lelbow.x+positionx, -lelbow.y+positiony, -lelbow.z+positionz));
		leftelbow->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));
		lefthand->setGlobalPosition(NxVec3(-lhand.x+positionx, -lhand.y+positiony, -lhand.z+positionz));
		lefthand->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));

		Torso->setGlobalPosition(NxVec3(-torso.x+positionx, -torso.y+positiony, -torso.z+positionz));
		Torso->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));

		righthip->setGlobalPosition(NxVec3(-rhip.x+positionx, -rhip.y+positiony, -rhip.z+positionz));
		righthip->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));
		rightknee->setGlobalPosition(NxVec3(-rknee.x+positionx, -rknee.y+positiony, -rknee.z+positionz));
		rightknee->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));
		rightfoot->setGlobalPosition(NxVec3(-rfoot.x+positionx, -rfoot.y+positiony, -rfoot.z+positionz));
		rightfoot->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));

		lefthip->setGlobalPosition(NxVec3(-lhip.x+positionx, -lhip.y+positiony, -lhip.z+positionz));
		lefthip->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));
		leftknee->setGlobalPosition(NxVec3(-lknee.x+positionx, -lknee.y+positiony, -lknee.z+positionz));
		leftknee->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));
		leftfoot->setGlobalPosition(NxVec3(-lfoot.x+positionx, -lfoot.y+positiony, -lfoot.z+positionz));
		leftfoot->setLinearVelocity(NxVec3(0.0f, 0.0f, 0.0f));

	}

	glPushMatrix();
	glBegin(GL_POLYGON);
	glColor4f(0.2f,1.2f,0.1f,1.0f); // 地面の色
	glVertex3f( 10000.0f,-1.0f, 10000.0f);
	glVertex3f( 10000.0f,-1.0f,-10000.0f);
	glVertex3f(-10000.0f,-1.0f,-10000.0f);
	glVertex3f(-10000.0f,-1.0f, 10000.0f);
	glEnd();
	glPopMatrix();

	//地平線の描画
	glPushMatrix();
	glColor4f(1.0f, 1.0f, 1.0f,1.0f);
	glLineWidth(1);
	glBegin(GL_LINES);
	for(int i=-10000; i< 10000; i+=500) {
		glVertex3f( i    , -0.2f,-10000 );
		glVertex3f( i    , -0.2f, 10000 );
		glVertex3f(-10000, -0.2f, i     ); 
		glVertex3f( 10000, -0.2f, i     ); 
	}
	glEnd();
	glPopMatrix();
	
	switch(gameflag){
		case 0: // スタート画面
			glViewport( 0, 0, (float)glutGet(GLUT_WINDOW_WIDTH),(float)glutGet(GLUT_WINDOW_HEIGHT) );
			glMatrixMode( GL_PROJECTION );
			glLoadIdentity();
			glOrtho( 0.0f, (float)glutGet(GLUT_WINDOW_WIDTH),(float)glutGet(GLUT_WINDOW_HEIGHT), 0.0f, 0.0f, 1.0f );
			glMatrixMode( GL_MODELVIEW );
			glLoadIdentity();

			glPushMatrix();
			glColor3d(1.0, 1.0, 0.0);
			glRasterPos3d(GL_WIN_SIZE_X/2-80, GL_WIN_SIZE_Y/2, 0.0);
			drawBitmapString(GLUT_BITMAP_HELVETICA_18,"Push 's' -> START");
			glPopMatrix();
			printf("Push 's' -> START\n");
			break;

		case 1: // ゲーム終了時
			glViewport( 0, 0, (float)glutGet(GLUT_WINDOW_WIDTH),(float)glutGet(GLUT_WINDOW_HEIGHT) );
			glMatrixMode( GL_PROJECTION );
			glLoadIdentity();
			glOrtho( 0.0f, (float)glutGet(GLUT_WINDOW_WIDTH),(float)glutGet(GLUT_WINDOW_HEIGHT), 0.0f, 0.0f, 1.0f );
			glMatrixMode( GL_MODELVIEW );
			glLoadIdentity();

			glPushMatrix();
			glColor3d(0.0, 0.0, 1.0);
			glRasterPos3d(GL_WIN_SIZE_X/2-60, GL_WIN_SIZE_Y/2+20, 0.0);
			drawBitmapString(GLUT_BITMAP_HELVETICA_18,"GAME OVER");
			glPopMatrix();
			glPushMatrix();
			glColor3d(1.0, 1.0, 0.0);
			glRasterPos3d(GL_WIN_SIZE_X/2-65, GL_WIN_SIZE_Y/2, 0.0);
			drawBitmapString(GLUT_BITMAP_HELVETICA_18,gametime);
			glPopMatrix();
			glPushMatrix();
			glColor3d(1.0, 0.0, 0.0);
			glRasterPos3d(GL_WIN_SIZE_X/2-90, GL_WIN_SIZE_Y/2-20, 0.0);
			drawBitmapString(GLUT_BITMAP_HELVETICA_18,"Push 'r' -> RESTART");
			glPopMatrix();
			printf("GAME OVER\ntime -> %f\n",(double)(t2 - t1) / CLOCKS_PER_SEC);
			break;

		case 2: // ゲーム中の画面

			glEnable(GL_DEPTH_TEST);
			// アクターの描画
			int nbActors = gScene->getNbActors();
			NxActor** actors = gScene->getActors();
			while(nbActors--)
			{
				NxActor* actor = *actors++;
				if(!actor->userData) continue;

				glPushMatrix();
				glEnable(GL_LIGHTING);
				float glMat[16];
				actor->getGlobalPose().getColumnMajor44(glMat);
				glMultMatrixf(glMat);
				myData* mydata = (myData*)actor->userData;
				int shape = mydata->shape;
				int color = mydata->color;

				switch(shape){
				case 1:
						glColor4f(0.0f,0.0f,1.0f,1.0f);
						if(mydata->houkou==0){
						link(pole[0], pole[4]);
						}
						else if(mydata->houkou==1){
						tlink(3000.0,2000.0,6000.0,-3000.0,2000.0,6000.0);
						tlink(-3000.0,2000.0,4000.0,-3000.0,2000.0,6000.0);
						}
						else if(mydata->houkou==2){
						ylink(3000.0,2000.0,4000.0,-3000.0,2000.0,4000.0);
						ylink(3000.0,2000.0,6000.0,-3000.0,2000.0,6000.0);
						}
						else if(mydata->houkou==3){ // パネル
						glColor4f(0.0f,1.0f,1.0f,1.0f);
						cuboid(mydata->width*2,mydata->length*2, mydata->height*2);
						}
						else if(mydata->houkou==4){
						}
						break;
					case 2:
						if(mydata->ball == 1){
						glColor4f(1.0f,0.0f,0.0f,1.0f); // ボールの色
						glutSolidSphere(mydata->size,15,15);
						}
						else if(mydata->ball == 0){
							glColor4f(1.0f,1.0f,0.0f,1.0f);
							glutSolidSphere(mydata->size,15,15);
						}
						break;
					case 4:
						glColor4f(1.0f,0.0f,0.0f,1.0f);
						cylinder(50, 100, 50);
						break;
				}
				glDisable(GL_LIGHTING);
				glPopMatrix();


			}

			glDisable(GL_DEPTH_TEST);
			t2 = clock();
			balltimer2 = clock();
			btime=(double)(balltimer2 - balltimer1) / CLOCKS_PER_SEC;
			gtime=(double)(t2 - t1) / CLOCKS_PER_SEC;
			printf("%f\n",gtime);

			if(gtime<=10.0){
				balldire=2.0;
				ballspeed=250.0;
			}
			else if(gtime<=20.0){
				balldire=1.5;
				ballspeed=300.0;
			}
			else if(gtime<=30.0){
				balldire=1.0;
				ballspeed=350.0;
			}
			else if(gtime<=40.0){
				balldire=0.5;
				ballspeed=400.0;
			}
			break;
	}

	// 描画の終了
	gScene->flushStream();
	gScene->fetchResults(NX_RIGID_BODY_FINISHED, true);
	glutSwapBuffers();
}
コード例 #29
0
int main(int argc, char **argv) {
    ros::init(argc, argv, "openni_hand_tracker");
    ros::NodeHandle nh;

    string configFilename = ros::package::getPath("openni_tracker") + "/openni_tracker.xml";
    XnStatus nRetVal = g_Context.InitFromXmlFile(configFilename.c_str());
    CHECK_RC(nRetVal, "InitFromXml");

    nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
    CHECK_RC(nRetVal, "Find depth generator");
	
    // Create generators
    nRetVal = g_GestureGenerator.Create(g_Context);
    CHECK_RC(nRetVal, "Unable to create GestureGenerator.");

    nRetVal = g_HandsGenerator.Create(g_Context);
    CHECK_RC(nRetVal, "Unable to create HandsGenerator.");
    
    ROS_INFO("Create Generator Success");

/*

	nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
	if (nRetVal != XN_STATUS_OK) {
		nRetVal = g_UserGenerator.Create(g_Context);
		CHECK_RC(nRetVal, "Find user generator");
	}

	if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
		ROS_INFO("Supplied user generator doesn't support skeleton");
		return 1;
	}

    XnCallbackHandle hUserCallbacks;
	g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);

	XnCallbackHandle hCalibrationCallbacks;
	g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);

	if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration()) {
		g_bNeedPose = TRUE;
		if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) {
			ROS_INFO("Pose required, but not supported");
			return 1;
		}

		XnCallbackHandle hPoseCallbacks;
		g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);

		g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
	}

	g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
*/

	nRetVal = g_Context.StartGeneratingAll();
	CHECK_RC(nRetVal, "StartGenerating");

	ros::Rate r(30);

        
        ros::NodeHandle pnh("~");
        string frame_id("openni_depth_frame");
        pnh.getParam("camera_frame_id", frame_id);
                
	while (ros::ok()) {
		g_Context.WaitAndUpdateAll();
		r.sleep();
	}

	g_Context.Shutdown();
	return 0;
}
コード例 #30
0
ファイル: OniTrackApp.cpp プロジェクト: pholz/OniTrack
void OniTrackApp::update()
{
	context.WaitAndUpdateAll();
	((XnVSessionManager*)pSessionGenerator)->Update(&context);
}