Пример #1
0
void XN_CALLBACK_TYPE Kinect::CalibrationEnd( xn::SkeletonCapability& skeleton,XnUserID user,
	XnCalibrationStatus eStatus,void* pCookie)
{  
	if( eStatus == XN_CALIBRATION_STATUS_OK )
	{
		skeleton.StartTracking(user);
	}
	else
	{
		skeleton.RequestCalibration( user, true );
	}
}
Пример #2
0
// Callback: Detected a pose
void XN_CALLBACK_TYPE UserPose_PoseDetected(xn::PoseDetectionCapability& capability, const XnChar* strPose, XnUserID nId, void* pCookie){
    XnUInt32 epochTime = 0;
    xnOSGetEpochTime(&epochTime);
    LOG_D("%d Pose %s detected for user %d", epochTime, strPose, nId);
    g_UserGenerator.GetPoseDetectionCap().StopPoseDetection(nId);
    g_SkeletonCap.RequestCalibration(nId, TRUE);
}
Пример #3
0
// Callback: New user was detected
void XN_CALLBACK_TYPE User_NewUser(xn::UserGenerator& generator, XnUserID nId, void* pCookie){
    XnUInt32 epochTime = 0;
    xnOSGetEpochTime(&epochTime);
    LOG_D("%d New User %d", epochTime, nId);
    // New user found
    if (g_bNeedPose){
        g_UserGenerator.GetPoseDetectionCap().StartPoseDetection(g_strPose, nId);
    }
    else{
        g_SkeletonCap.RequestCalibration(nId, TRUE);
    }
}
Пример #4
0
void XN_CALLBACK_TYPE CalibrationEnd(xn::SkeletonCapability& skeleton, XnUserID user, XnBool bSuccess, void* pCookie)
{
	printf("Calibration complete for user %d: %s\n", user, bSuccess?"Success":"Failure");
	if (bSuccess)
	{
		skeleton.StartTracking(user);
	}
	else
	{
		g_UserGenerator.GetPoseDetectionCap().StartPoseDetection("Psi", user);
	}
}
Пример #5
0
// スケルトンを描画する
void DrawSkelton(XnUserID player, int idx){

    // 線を引く開始と終了のJointの定義
    XnSkeletonJoint joints[][2] = {
        {XN_SKEL_HEAD, XN_SKEL_NECK},
        {XN_SKEL_NECK, XN_SKEL_LEFT_SHOULDER},
        {XN_SKEL_LEFT_SHOULDER, XN_SKEL_LEFT_ELBOW},
        {XN_SKEL_LEFT_ELBOW, XN_SKEL_LEFT_HAND},
        {XN_SKEL_NECK, XN_SKEL_RIGHT_SHOULDER},
        {XN_SKEL_RIGHT_SHOULDER, XN_SKEL_RIGHT_ELBOW},
        {XN_SKEL_RIGHT_ELBOW, XN_SKEL_RIGHT_HAND},
        {XN_SKEL_LEFT_SHOULDER, XN_SKEL_TORSO},
        {XN_SKEL_RIGHT_SHOULDER, XN_SKEL_TORSO},
        {XN_SKEL_TORSO, XN_SKEL_LEFT_HIP},
        {XN_SKEL_LEFT_HIP, XN_SKEL_LEFT_KNEE},
        {XN_SKEL_LEFT_KNEE, XN_SKEL_LEFT_FOOT},
        {XN_SKEL_TORSO, XN_SKEL_RIGHT_HIP},
        {XN_SKEL_RIGHT_HIP, XN_SKEL_RIGHT_KNEE},
        {XN_SKEL_RIGHT_KNEE, XN_SKEL_RIGHT_FOOT},
        {XN_SKEL_LEFT_HIP, XN_SKEL_RIGHT_HIP}
    };

	XnSkeletonJointPosition joint1, joint2;
    int nJointsCount = sizeof(joints) / sizeof(joints[0]);
    int color_idx = idx;
    if( color_idx > (sizeof(g_Colors) / sizeof(g_Colors[0])) ){
        color_idx = (sizeof(g_Colors) / sizeof(g_Colors[0])) - 1;
    }
       
    for(int i = 0; i < nJointsCount;i++){
        g_SkeletonCap.GetSkeletonJointPosition(player, joints[i][0], joint1);
        g_SkeletonCap.GetSkeletonJointPosition(player, joints[i][1], joint2);        
        if (joint1.fConfidence < 0.2 || joint2.fConfidence < 0.2){
            return;
        }
        
        XnPoint3D pt[2];
        pt[0] = joint1.position;
        pt[1] = joint2.position;
        
        g_DepthGenerator.ConvertRealWorldToProjective(2, pt, pt);
        
        // 線で結んで
        cvLine( g_rgbImage, cvPoint(pt[0].X, pt[0].Y), cvPoint(pt[1].X, pt[1].Y), g_Colors[color_idx], 1, CV_AA);
        // それぞれの点を塗りつぶす
        cvCircle(g_rgbImage, cvPoint(pt[0].X, pt[0].Y), 2, g_Colors[color_idx], -1, CV_AA, 0);
        cvCircle(g_rgbImage, cvPoint(pt[1].X, pt[1].Y), 2, g_Colors[color_idx], -1, CV_AA, 0);
    }
    
}
Пример #6
0
/**
 * Body に画像をコラージュ
 */
void drawBodyPreLoad(xn::DepthGenerator& depth, xn::SkeletonCapability& capability, XnSkeletonJoint joint, XnUserID user, XnMapOutputMode mapMode, IplImage* preLoadImage, IplImage **rgbImage) {
	XnSkeletonJointPosition pos;
    
    IplImage *partImage = NULL; 
    IplImage *fallImage = NULL; 
    
	// ジョイント座標の取得
	capability.GetSkeletonJointPosition(user, joint, pos);
	XnPoint3D pReal[1] = {pos.position};
	XnPoint3D pProjective[1];
    
	// 世界座標系から表示座標系に変換した座標を取得
	depth.ConvertRealWorldToProjective(1, pReal, pProjective);
    
    
    // 重ね合わせよう画像
    partImage = preLoadImage;
    fallImage = cvCreateImage(cvSize(mapMode.nXRes, mapMode.nYRes), IPL_DEPTH_8U, 3);    
    CvPoint2D32f original[3];
    CvPoint2D32f transform[3];
    
    original[0] = cvPoint2D32f(0, 0);
    original[1] = cvPoint2D32f(mapMode.nXRes, 0);
    original[2] = cvPoint2D32f( 0, mapMode.nYRes);
    
    CvSize sizeOfPart = cvGetSize(partImage);
    
    int transX = pProjective[0].X - (sizeOfPart.width / 2);
    int transY = pProjective[0].Y - (sizeOfPart.height / 2);
    
    transform[0] = cvPoint2D32f( transX, transY);
    transform[1] = cvPoint2D32f( transX + mapMode.nXRes, transY);
    transform[2] = cvPoint2D32f( transX , transY + mapMode.nYRes);
    
    // 行列作成
    CvMat *affineMatrix = cvCreateMat(2, 3, CV_32FC1);
    cvGetAffineTransform(original, transform, affineMatrix);
    
    // 移動
    cvWarpAffine(partImage, fallImage, affineMatrix, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
    
    
    // 画像の重ね合わせ
    fallPartImage(fallImage, *rgbImage);
    
    // 解放
    cvReleaseImage(&fallImage);	
    //cvReleaseImage(&partImage);	
}
Пример #7
0
void XN_CALLBACK_TYPE UserCalibration_CalibrationComplete(xn::SkeletonCapability& capability, XnUserID nId, XnCalibrationStatus eStatus, void* pCookie){

    XnUInt32 epochTime = 0;
    xnOSGetEpochTime(&epochTime);
    if (eStatus == XN_CALIBRATION_STATUS_OK){
        // Calibration succeeded
        LOG_D("%d Calibration complete, start tracking user %d", epochTime, nId);		
        g_SkeletonCap.StartTracking(nId);
    }
    else{
        // Calibration failed
        LOG_D("%d Calibration failed for user %d\n", epochTime, nId);
        if(eStatus==XN_CALIBRATION_STATUS_MANUAL_ABORT){
            LOG_D("%s", "Manual abort occured, stop attempting to calibrate!");
            return;
        }
        if (g_bNeedPose){
            g_UserGenerator.GetPoseDetectionCap().StartPoseDetection(g_strPose, nId);
        }
        else{
            g_SkeletonCap.RequestCalibration(nId, TRUE);
        }
    }
}
Пример #8
0
void XN_CALLBACK_TYPE  whu_CalibrationEnd(xn::SkeletonCapability &skeleton,XnUserID user,XnCalibrationStatus eStatus,void *pCookie)
{
	cout<<"user"<<user<<": calibrate ";
	if(eStatus == XN_CALIBRATION_STATUS_OK)
	{
		cout<<"success"<<endl;
		skeleton.StartTracking(user);
	}
	else
	{
		cout<<"failed"<<endl;
		//skeleton.RequestCalibration(user,true);//假如不需要做出投降姿势才开始的话,把此句解注释
		((xn::UserGenerator*)pCookie)->GetPoseDetectionCap().StartPoseDetection( "Psi", user );
	}	
}
Пример #9
0
// callback function of skeleton: calibration end   
static void XN_CALLBACK_TYPE CalibrationEnd( xn::SkeletonCapability& skeleton,XnUserID user,XnCalibrationStatus calibrationError,void* pCookie )  
{  
	std::cout << "Calibration complete for user " <<  user << ", ";  
	if( calibrationError==XN_CALIBRATION_STATUS_OK )  
	{  
		std::cout << "Success" << std::endl;  
		skeleton.StartTracking( user );  
		//userGenerator.GetSkeletonCap().SaveCalibrationDataToFile(user, "UserCalibration.txt" );  
	}  
	else  
	{  
		std::cout << "Failure" << std::endl;  
		//For the current version of OpenNI, only Psi pose is available  
		((xn::UserGenerator*)pCookie)->GetPoseDetectionCap().StartPoseDetection( "Psi", user );  
	}  
}  
Пример #10
0
int main(int argc, char **argv)
{
    XnStatus nRetVal = XN_STATUS_OK;
    xn::EnumerationErrors errors;
    
    if( USE_RECORED_DATA ){
        g_Context.Init();
        g_Context.OpenFileRecording(RECORD_FILE_PATH);
        xn::Player player;
        
        // Player nodeの取得
        nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_PLAYER, player);
        CHECK_RC(nRetVal, "Find player");
        
        LOG_D("PlaybackSpeed: %d", player.GetPlaybackSpeed());
        
        xn:NodeInfoList nodeList;
        player.EnumerateNodes(nodeList);
        for( xn::NodeInfoList::Iterator it = nodeList.Begin();
            it != nodeList.End(); ++it){
            
            if( (*it).GetDescription().Type == XN_NODE_TYPE_IMAGE ){
                nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator);
                CHECK_RC(nRetVal, "Find image node");
                LOG_D("%s", "ImageGenerator created.");
            }
            else if( (*it).GetDescription().Type == XN_NODE_TYPE_DEPTH ){
                nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
                CHECK_RC(nRetVal, "Find depth node");
                LOG_D("%s", "DepthGenerator created.");            
            }
            else{
                LOG_D("%s %s %s", ::xnProductionNodeTypeToString((*it).GetDescription().Type ),
                      (*it).GetInstanceName(),
                      (*it).GetDescription().strName);
            }
        }
    }
    else{
        LOG_I("Reading config from: '%s'", CONFIG_XML_PATH);
        
        nRetVal = g_Context.InitFromXmlFile(CONFIG_XML_PATH, g_scriptNode, &errors);
        if (nRetVal == XN_STATUS_NO_NODE_PRESENT){
            XnChar strError[1024];
            errors.ToString(strError, 1024);
            LOG_E("%s\n", strError);
            return (nRetVal);
        }
        else if (nRetVal != XN_STATUS_OK){
            LOG_E("Open failed: %s", xnGetStatusString(nRetVal));
            return (nRetVal);
        }
        
        nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
        CHECK_RC(nRetVal,"No depth");
        
        // ImageGeneratorの作成
        nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator);
        CHECK_RC(nRetVal, "Find image generator");
        
    }
    // UserGeneratorの取得
    nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
    if(nRetVal!=XN_STATUS_OK){
        nRetVal = g_UserGenerator.Create(g_Context); 
        CHECK_RC(nRetVal, "Create user generator");
    }

    
    XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected;
    if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)){
        LOG_E("%s", "Supplied user generator doesn't support skeleton");
        return 1;
    }
    nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
    CHECK_RC(nRetVal, "Register to user callbacks");

    g_SkeletonCap = g_UserGenerator.GetSkeletonCap();
    nRetVal = g_SkeletonCap.RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart);
    CHECK_RC(nRetVal, "Register to calibration start");

    nRetVal = g_SkeletonCap.RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete);
    CHECK_RC(nRetVal, "Register to calibration complete");
    
    if (g_SkeletonCap.NeedPoseForCalibration()){
        g_bNeedPose = TRUE;
        if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)){
            LOG_E("%s", "Pose required, but not supported");
            return 1;
        }
        nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
        CHECK_RC(nRetVal, "Register to Pose Detected");
        g_SkeletonCap.GetCalibrationPose(g_strPose);
    }
    
    g_SkeletonCap.SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
    
    nRetVal = g_Context.StartGeneratingAll();
    CHECK_RC(nRetVal, "StartGenerating");
    
    // 表示用の画像データの作成
    XnMapOutputMode mapMode;
    g_ImageGenerator.GetMapOutputMode(mapMode);
    g_rgbImage = cvCreateImage(cvSize(mapMode.nXRes, mapMode.nYRes), IPL_DEPTH_8U, 3);

    LOG_I("%s", "Starting to run");
    if(g_bNeedPose){
        LOG_I("%s", "Assume calibration pose");
    }

    xn::Recorder recorder;
    if( DO_RECORED && !USE_RECORED_DATA ){
        // レコーダーの作成
        LOG_I("%s", "Setup Recorder");
        nRetVal = recorder.Create(g_Context);
        CHECK_RC(nRetVal, "Create recorder");
        
        // 保存設定
        nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, RECORD_FILE_PATH);
        CHECK_RC(nRetVal, "Set recorder destination file");
        
        // 深度、ビデオカメラ入力を保存対象として記録開始
        nRetVal = recorder.AddNodeToRecording(g_DepthGenerator, XN_CODEC_NULL);
        CHECK_RC(nRetVal, "Add depth node to recording");
        nRetVal = recorder.AddNodeToRecording(g_ImageGenerator, XN_CODEC_NULL);
        CHECK_RC(nRetVal, "Add image node to recording");
        
        LOG_I("%s", "Recorder setup done.");
    }

    while (!xnOSWasKeyboardHit())
    {
        g_Context.WaitOneUpdateAll(g_UserGenerator);
        if( DO_RECORED  && !USE_RECORED_DATA ){
            nRetVal = recorder.Record();
            CHECK_RC(nRetVal, "Record");
        }

        // ビデオカメラ画像の生データを取得
        xn::ImageMetaData imageMetaData;
        g_ImageGenerator.GetMetaData(imageMetaData);
        // メモリコピー
        xnOSMemCopy(g_rgbImage->imageData, imageMetaData.RGB24Data(), g_rgbImage->imageSize);
        // BGRからRGBに変換して表示
        cvCvtColor(g_rgbImage, g_rgbImage, CV_RGB2BGR);

        // UserGeneratorからユーザー識別ピクセルを取得
        xn::SceneMetaData sceneMetaData;
        g_UserGenerator.GetUserPixels(0, sceneMetaData);
        
        XnUserID allUsers[MAX_NUM_USERS];
        XnUInt16 nUsers = MAX_NUM_USERS;
        g_UserGenerator.GetUsers(allUsers, nUsers);
        for (int i = 0; i < nUsers; i++) {
            
            // キャリブレーションに成功しているかどうか
            if (g_SkeletonCap.IsTracking(allUsers[i])) {
                // スケルトンを描画
                DrawSkelton(allUsers[i], i);
            }
        }
        
        // 表示
        cvShowImage("User View", g_rgbImage);

        // ESCもしくはqが押されたら終了させる
        if (cvWaitKey(10) == 27) {
            break;
        }
    }

    if( !USE_RECORED_DATA ){
        g_scriptNode.Release();
    }
    g_DepthGenerator.Release();
    g_UserGenerator.Release();
    g_Context.Release();

	if (g_rgbImage != NULL) {
		cvReleaseImage(&g_rgbImage);	
	}
	g_Context.Shutdown();

    
}
Пример #11
0
//左上腕(LEFT_ELBOW - LEFT_SHOULDER)に画像をコラージュ
void drawLeftArm(xn::DepthGenerator& depth, xn::SkeletonCapability& capability, XnUserID user, XnMapOutputMode mapMode, IplImage* preLoadImage, IplImage **rgbImage) {
	XnSkeletonJointPosition pos1,pos2;
  
    IplImage *partImage = NULL; 
    IplImage *fallImage = NULL; 
    
	// elbowのジョイント座標の取得
	capability.GetSkeletonJointPosition(user, XN_SKEL_LEFT_ELBOW, pos1);
	XnPoint3D pReal1[1] = {pos1.position};
	XnPoint3D pProjective1[1];
	// 世界座標系から表示座標系に変換した座標を取得
	depth.ConvertRealWorldToProjective(1, pReal1, pProjective1);
    
    // sholderのジョイント座標の取得
	capability.GetSkeletonJointPosition(user, XN_SKEL_LEFT_SHOULDER, pos2);
	XnPoint3D pReal2[1] = {pos2.position};
	XnPoint3D pProjective2[1];
	// 世界座標系から表示座標系に変換した座標を取得
	depth.ConvertRealWorldToProjective(1, pReal2, pProjective2);
    
    // 重ね合わせよう画像
    partImage = preLoadImage;
    fallImage = cvCreateImage(cvSize(mapMode.nXRes, mapMode.nYRes), IPL_DEPTH_8U, 3);    
    CvPoint2D32f original[3];
    CvPoint2D32f transform[3];
    
    original[0] = cvPoint2D32f(0, 0);
    original[1] = cvPoint2D32f(mapMode.nXRes, 0);
    original[2] = cvPoint2D32f( 0, mapMode.nYRes);
    
    CvSize sizeOfPart = cvGetSize(partImage);
    
    //ELBOWの位置
    int transX1 = pProjective1[0].X;
    int transY1 = pProjective1[0].Y;
    //SHOULDERの位置
    int transX2 = pProjective2[0].X;
    int transY2 = pProjective2[0].Y;
    //中間の座標
    int transX3 = (transX1 + transX2) / 2;
    int transY3 = (transY1 + transY2) / 2;
    //画像の始点
    int transX = transX3 - (sizeOfPart.width / 2);
    int transY = transY3 - (sizeOfPart.height / 2);
    //角度
    float ang = cvFastArctan(transY2 - transY1, transX2 - transX1); //+ cvFastArctan(transY1, transX1);
    
    transform[0] = cvPoint2D32f( transX, transY);
    transform[1] = cvPoint2D32f( transX + mapMode.nXRes, transY);
    transform[2] = cvPoint2D32f( transX, transY + mapMode.nYRes);
    
    // 行列作成
    CvMat *affineMatrix = cvCreateMat(2, 3, CV_32FC1);
    cvGetAffineTransform(original, transform, affineMatrix);
    
    // 移動
    cvWarpAffine(partImage, fallImage, affineMatrix, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
    
    
    //回転行列作成
    CvPoint2D32f center = cvPoint2D32f(transX3, transY3);
    IplImage *fallImage2 = cvCreateImage(cvSize(mapMode.nXRes, mapMode.nYRes), IPL_DEPTH_8U, 3); 
    CvMat *rotationMatrix = cvCreateMat(2, 3, CV_32FC1);
    cv2DRotationMatrix(center, 90.0 - ang, 1.0, rotationMatrix);
    //回転
    cvWarpAffine(fallImage, fallImage2, rotationMatrix, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
    
    // 画像の重ね合わせ
    fallPartImage(fallImage2, *rgbImage);
    
    // 解放
    cvReleaseImage(&fallImage);	
    cvReleaseImage(&fallImage2);	
}