XnStatus xnConfigureAlternativeViewPoint(XnNodeHandle hNode, const TiXmlElement* pOpcode) { XnStatus nRetVal = XN_STATUS_OK; if (!xnIsCapabilitySupported(hNode, XN_CAPABILITY_ALTERNATIVE_VIEW_POINT)) { return XN_STATUS_INVALID_OPERATION; } XnContext* pContext = xnGetContextFromNodeHandle(hNode); XnNodeHandle hOther = NULL; nRetVal = xnGetNodeHandleByName(pContext, pOpcode->GetText(), &hOther); XN_IS_STATUS_OK(nRetVal); nRetVal = xnSetViewPoint(hNode, hOther); XN_IS_STATUS_OK(nRetVal); return (XN_STATUS_OK); }
//-------------------------------------------------------------- void testApp::setup(){ ofDisableArbTex(); ofSetTextureWrap(); //configure FBO initFrameBuffer(); //camera config #ifdef KINECT context.setupUsingXMLFile(); image.setup(&context); depth.setup(&context); xn::DepthGenerator& depthGen = depth.getXnDepthGenerator(); xn::ImageGenerator& imageGen = image.getXnImageGenerator(); XnStatus ret = xnSetViewPoint(depthGen, imageGen); cout << "Using kinect" << endl; #else //ofSetLogLevel(OF_LOG_VERBOSE); //grabber.listDevices(); //grabber.setDeviceID(7); if(grabber.initGrabber(640, 480)){ cout << "Using grabber" << endl; } else { cout << "MASSIVE FAIL" <<endl; } #endif convert.allocate(640, 480); //conversion of camera image to grayscale gray.allocate(640, 480); //grayscale tracking image kinectImage.allocate(640, 480); //image from kinect kinectDepthImage.allocate(640, 480); //Depth image from kinect finalMaskImage.allocate(640, 480);; //final composition mask sceneDepthImage.allocate(640, 480);; //scenes depthmap image finalImage.allocate(640, 480); sceneImage.allocate(640,480); pixelBuf = new unsigned char[640*480]; //temp buffer for kinect depth strea colorPixelBuf = new unsigned char[640*480*3]; //temp buffer for kinect image sceneDepthBuf = new unsigned short[640 * 480]; //depth buffer from our rendered scene kinectDepthBuf = new unsigned short[640 * 480]; //camera image buffer finalBuf = new unsigned char[640 * 480]; //final mask buffer finalImageBuf = new unsigned char[640 * 480 * 3]; //final Image buffer sceneBuffer = new unsigned char[640 * 480 * 3]; //copy of the scene in the FBO bDraw = false; //tracker = new ARToolKitPlus::TrackerSingleMarkerImpl<6,6,6, 1, 8>(width,height); tracker = new ARToolKitPlus::TrackerMultiMarkerImpl<6,6,6, 1, 64>(width,height); tracker->setPixelFormat(ARToolKitPlus::PIXEL_FORMAT_LUM); //markerboard_480-499.cfg if( !tracker->init( (const char *)ofToDataPath("bluedot.cfg").c_str(), (const char *)ofToDataPath("conf.cfg").c_str(), 1.0f, 1000.0f) ) // load std. ARToolKit camera file { printf("ERROR: init() failed\n"); delete tracker; return; } // the marker in the BCH test image has a thin border... tracker->setBorderWidth(0.125f); // set a threshold. alternatively we could also activate automatic thresholding //tracker->setThreshold(150); tracker->activateAutoThreshold(true); // let's use lookup-table undistortion for high-speed // note: LUT only works with images up to 1024x1024 tracker->setUndistortionMode(ARToolKitPlus::UNDIST_LUT); // RPP is more robust than ARToolKit's standard pose estimator tracker->setPoseEstimator(ARToolKitPlus::POSE_ESTIMATOR_RPP); tracker->setImageProcessingMode(ARToolKitPlus::IMAGE_FULL_RES); // switch to simple ID based markers // use the tool in tools/IdPatGen to generate markers tracker->setMarkerMode(ARToolKitPlus::MARKER_ID_SIMPLE); netThread = new NetworkThread(this); netThread->start(); //load textures ofDisableArbTex(); ofSetTextureWrap(); textures[0].loadImage("grass.png"); textures[1].loadImage("cobble.png"); textures[2].loadImage("dirt.png"); textures[3].loadImage("lava.png"); textures[4].loadImage("rock.png"); textures[5].loadImage("sand.png"); textures[6].loadImage("snow.png"); textures[7].loadImage("tree.png"); textures[8].loadImage("leaves.png"); mapWidth = 20; mapHeight = 20; mapDepth = 20; mapLocked = false; //fill our 3d vector with 20x20x20 mapLocked = true; array3D.resize(mapWidth); for (int i = 0; i < mapWidth; ++i) { array3D[i].resize(mapHeight); for (int j = 0; j < mapHeight; ++j) array3D[i][j].resize(mapDepth); } //create block face data and clear map Block b; vList[0] = ofxVec3f(0.0f, 0.0f, -1.0f); vList[1] = ofxVec3f(-1.0f, 0.0f, -1.0f); vList[2] = ofxVec3f(-1.0f, 0.0f, 0.0f); vList[3] = ofxVec3f(0.0f, 0.0f, 0.0f); vList[4] = ofxVec3f(-1.0f, -1.0f, 0.0f); vList[5] = ofxVec3f(0.0f, -1.0f, 0.0f); vList[6] = ofxVec3f(0.0f, -1.0f, -1.0f); vList[7] = ofxVec3f(-1.0f, -1.0f, -1.0f); //vertex indices for faces const static int faceVals[6][4] = { {2, 1, 0, 3}, //top {5, 4, 2, 3}, //front {0, 6, 5, 3},//right {4, 7, 1, 2},//left {5, 6, 7, 5},//bottom {0, 1, 7, 6} //back }; for(int x=0; x < mapWidth; x++){ for(int y=0; y < mapHeight; y++){ for(int z=0; z < mapDepth; z++){ b.type = NONE; b.textured = false; b.visMask = VIS_TOP; for(int a = 0; a < 6; a++){ for (int c = 0; c < 4; c++){ b.faceList[a][c] = faceVals[a][c]; } } array3D[x][y][z] = b; } } } //add some test blocks to play with when offline b.type = GRASS; b.textured = true; b.textureRef = 0; b.visMask = 63; array3D[1][1][5] = b; array3D[2][1][5] = b; array3D[3][1][5] = b; array3D[4][1][5] = b; array3D[5][1][5] = b; array3D[6][1][5] = b; //testBlock = b; //run the face visibility and normal calculations updateVisibility(); //dont draw the gui guiDraw = false; //scale and offset for map mapScale = 1.0f; offset.x = -100.0f; offset.y = 100.0f; scVal = 1.0f; //used as light colour (when lights eventually work) sceneWhiteLevel = ofColor(255,255,255); #ifdef SYPHON //start up syphon and set framerate mainOutputSyphonServer.setName("Minecraft"); #endif ofSetFrameRate(60); //try and set up some lights light light = { {0,50,0,1}, //position (the final 1 means the light is positional) {1,1,1,1}, //diffuse {0,0,0,1}, //specular {0,0,0,1} //ambient }; light0 = light; glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); //glLightModeli(GL_LIGHT_MODEL_TWO_SIDE,0); //sets lighting to one-sided glLightfv(GL_LIGHT0, GL_POSITION, light0.pos); doLights(); //turn on backface culling glEnable(GL_CULL_FACE); }