void rayMarcherApp::setup() { CameraPersp cam; mStartEyePoint = Vec3f( 15, 21, 27.5 ) * 0.65f; cam.lookAt( mStartEyePoint, Vec3f::zero(), Vec3f::yAxis() ); cam.setCenterOfInterest( mStartEyePoint.distance( Vec3f::zero() ) ); mMayaCam.setCurrentCam( cam ); }
void FadeCandyClientApp::mouseDrag( MouseEvent event ) { // keep track of the mouse mMousePos = event.getPos(); // let the camera handle the interaction mMayaCam.mouseDrag( event.getPos(), event.isLeftDown(), event.isMiddleDown(), event.isRightDown() ); }
void LookAroundYouApp::mouseDrag( MouseEvent event ) { mMousePos = event.getPos(); // Added/hacked support for international mac laptop keyboards. bool middle = event.isMiddleDown() || ( event.isMetaDown() && event.isLeftDown() ); bool right = event.isRightDown() || ( event.isControlDown() && event.isLeftDown() ); mMayaCam.mouseDrag( event.getPos(), event.isLeftDown() && !middle && !right, middle, right ); }
void StereoscopicRenderingApp::resize() { // make sure the camera's aspect ratio remains correct mCamera.setAspectRatio( getWindowAspectRatio() ); mMayaCam.setCurrentCam( mCamera ); // create/resize the Frame Buffer Object required for some of the render methods createFbo(); }
void wellingtonModelApp::mouseDown( MouseEvent event ) { // /* if( event.isAltDown() ) mMayaCam.mouseDown( event.getPos() ); else mArcball.mouseDown( event.getPos() ); // */ }
void CameraLensShiftTestApp::draw() { gl::clear(); gl::enableDepthRead(); gl::enableDepthWrite(); // draw the overview of the scene in the left half of the window glPushAttrib( GL_VIEWPORT_BIT ); gl::setViewport( Area( getWindowWidth() * 0.0f, 0, getWindowWidth() * 0.5f, getWindowHeight() ) ); gl::pushMatrices(); gl::setMatrices( mOverview.getCamera() ); render(); gl::color( Color(0, 1, 1) ); gl::drawFrustum( mCamera ); gl::popMatrices(); glPopAttrib(); // draw what the camera sees in the right half of the window glPushAttrib( GL_VIEWPORT_BIT ); gl::setViewport( Area( getWindowWidth() * 0.5f, 0, getWindowWidth() * 1.0f, getWindowHeight() ) ); gl::pushMatrices(); gl::setMatrices( mCamera ); render(); gl::popMatrices(); glPopAttrib(); // gl::disableDepthWrite(); gl::disableDepthRead(); // draw separator gl::color( Color(0.25f, 0.25f, 0.25f) ); gl::drawLine( Vec2f( getWindowWidth() * 0.5f, 0.0f ), Vec2f( getWindowWidth() * 0.5f, getWindowHeight() ) ); // draw info gl::enableAlphaBlending(); gl::drawString( (boost::format("Lens Shift X: %02.2f\nLens Shift Y: %02.2f\n\nUse cursor keys to adjust lens shift,\nuse mouse to control overview camera") % mCamera.getLensShiftHorizontal() % mCamera.getLensShiftVertical() ).str(), Vec2f( 10, 10 ), Color::white(), mFont ); gl::drawString( "Overview of the scene", Vec2f( 10, getWindowHeight() - 28 ), Color::white(), mFont ); gl::drawString( "View from the camera", Vec2f( 0.5f * getWindowWidth() + 10, getWindowHeight() - 28 ), Color::white(), mFont ); gl::disableAlphaBlending(); }
void HexagonMirrorApp::draw() { // clear the window gl::clear(); // activate our camera gl::pushMatrices(); gl::setMatrices( mCamera.getCamera() ); // set render states gl::enable( GL_CULL_FACE ); gl::enableDepthRead(); gl::enableDepthWrite(); gl::color( Color::white() ); if( mVboMesh && mShaderInstanced && mBuffer ) { // bind webcam image if( mWebcamTexture ) mWebcamTexture.bind(0); // bind the shader, which will do all the hard work for us mShaderInstanced.bind(); mShaderInstanced.uniform( "texture", 0 ); mShaderInstanced.uniform( "scale", Vec2f( 1.0f / (3.0f * INSTANCES_PER_ROW), 1.0f / (3.0f * INSTANCES_PER_ROW) ) ); // bind the buffer containing the model matrix for each instance, // this will allow us to pass this information as a vertex shader attribute. // See: initializeBuffer() glBindVertexArray(mVAO); // we do all positioning in the shader, and therefor we only need // a single draw call to render all instances. drawInstanced( mVboMesh, NUM_INSTANCES ); // make sure our VBO is no longer bound mVboMesh.unbindBuffers(); // unbind vertex array object containing our buffer glBindVertexArray(0); // unbind shader mShaderInstanced.unbind(); if( mWebcamTexture ) mWebcamTexture.unbind(); } // reset render states gl::disableDepthWrite(); gl::disableDepthRead(); gl::disable( GL_CULL_FACE ); // restore 2D drawing gl::popMatrices(); }
void AudioObjApp::draw() { gl::clear( Color( 0, 0, 0 ) ); gl::enableAlphaBlending(); gl::enableDepthRead(); gl::enableDepthWrite(); gl::pushMatrices(); gl::setMatrices( mMayaCam.getCamera() ); if ( mFeature && mFeatureTex ) { mShader->bind(); mFeatureTex.enableAndBind(); mShader->uniform( "dataTex", 0 ); mShader->uniform( "texWidth", (float)mFeatureTex.getWidth() ); mShader->uniform( "texHeight", (float)mFeatureTex.getHeight() ); mShader->uniform( "soundDataSize", (float)mFeature->getSize() ); mShader->uniform( "spread", mFeatureSpread ); mShader->uniform( "spreadOffset", mFeatureSpreadOffset ); mShader->uniform( "time", (float)getElapsedSeconds() ); mShader->uniform( "tintColor", mObjColor ); } if ( mRenderWireframe ) gl::enableWireframe(); gl::color( Color(1.0f, 0.0f, 0.0f ) ); if ( mVbo ) gl::draw( mVbo ); if ( mRenderWireframe ) gl::disableWireframe(); mShader->unbind(); mFeatureTex.unbind(); gl::color( Color::white() ); // gl::drawCoordinateFrame(); gl::popMatrices(); gl::disableDepthRead(); gl::disableDepthWrite(); gl::setMatricesWindow( getWindowSize() ); ciXtractReceiver::drawData( mFeature, Rectf( 15, getWindowHeight() - 150, 255, getWindowHeight() - 35 ) ); gl::draw( mFeatureSurf ); mParams->draw(); }
void MarionetteZooApp::draw() { // clear out the window with black gl::clear( Colorf( 0.392, 0.392, 0.784 ) ); gl::setViewport( getWindowBounds() ); gl::setMatrices( mMayaCam.getCamera() ); gl::enableDepthRead(); gl::enableDepthWrite(); ci::gl::pushMatrices(); mBulletWorld->draw(); ci::gl::popMatrices(); mModelManager->draw( mMayaCam.getCamera() ); ci::gl::drawCoordinateFrame( 5.f ); mParams.draw(); }
void wellingtonModelApp::mouseDrag(MouseEvent event) { // /* mouseMove(event); if( event.isAltDown() ) mMayaCam.mouseDrag( event.getPos(), event.isLeftDown(), event.isMiddleDown(), event.isRightDown() ); else mArcball.mouseDrag( event.getPos() ); // */ }
void HiKinectApp::draw3D() { gl::setMatrices( mCamUI.getCamera() ); if (mWireframe) gl::enableWireframe(); else gl::disableWireframe(); if (mLighting) { glEnable( GL_LIGHTING ); glEnable( GL_LIGHT0 ); } // GLfloat light_position[] = { mMousePos.x, mMousePos.y, -275.0f, 0.0f }; GLfloat light_position[] = { 0, 0, 1.0f, 0.0f }; glLightfv( GL_LIGHT0, GL_POSITION, light_position ); if( DIFFUSE ){ ci::ColorA color( CM_RGB, 1.0f, 1.0f, 1.0f, 1.0f ); glMaterialfv( GL_FRONT, GL_DIFFUSE, color ); } else { glMaterialfv( GL_FRONT, GL_DIFFUSE, no_mat ); } if( AMBIENT ) glMaterialfv( GL_FRONT, GL_AMBIENT, mat_ambient ); else glMaterialfv( GL_FRONT, GL_AMBIENT, no_mat ); if( SPECULAR ){ glMaterialfv( GL_FRONT, GL_SPECULAR, mat_specular ); glMaterialfv( GL_FRONT, GL_SHININESS, mat_shininess ); } else { glMaterialfv( GL_FRONT, GL_SPECULAR, no_mat ); glMaterialfv( GL_FRONT, GL_SHININESS, no_shininess ); } if( EMISSIVE ) glMaterialfv( GL_FRONT, GL_EMISSION, mat_emission ); else glMaterialfv( GL_FRONT, GL_EMISSION, no_mat ); if (mDepthTexture) mDepthTexture.bind(0); mFbo.bindTexture(1); if (mColorTexture) mColorTexture.bind(2); mGridMesh.draw( lmap(mMousePos.x, 0.0f, (float)getWindowWidth(), 0.0f, 1.0f) ); if (mLighting) { glDisable( GL_LIGHTING ); glDisable( GL_LIGHT0 ); } gl::disableWireframe(); }
void GizmoSampleApp::setup() { // Create a reference to our gizmo object mGizmo = Gizmo::create( getWindowSize() ); // Create the cam interface CameraPersp cam; cam.setEyePoint( Vec3f( 0.0f, 300.0f, 500.0f ) ); cam.setPerspective(50, getWindowWidth() / (float) getWindowHeight(), 1, 10000 ); cam.setCenterOfInterestPoint( Vec3f::zero() ); mCamUI.setCurrentCam( cam ); }
void godComplexApp::draw() { gl::enableDepthRead(); gl::enableDepthWrite(); gl::enableAlphaBlending(); // clear out the window with black gl::clear( Color( 0, 0, 0 ) ); gl::setMatrices( mMayaCam.getCamera()); if(drawWater == true){ if(mWaterModule != NULL){ gl::pushMatrices(); mWaterModule->draw(0); gl::popMatrices(); } } if(drawMesh == true){ gl::pushMatrices(); myImage.enableAndBind(); // gl::rotate( mArcball.getQuat() ); //NOTE: for debugging gl::scale(Vec3f(0.035,0.035,0.035)); glLineWidth(0.2f); gl::enableWireframe(); gl::translate(Vec3f(280.0, 0.0, -180.0)); gl::rotate(Vec3f(-10.0, -10.0, 0.0)); gl::draw(mVbo); gl::disableWireframe(); myImage.unbind(); gl::popMatrices(); } gl::pushMatrices(); mFlowField->draw(); gl::popMatrices(); /* glPushMatrix(); glColor4f(1.0, 0.0, 0.0, 1.0); gl::drawSphere(Vec3f(userIncr1, userIncr2, userIncr3), 30.0, 12.0); glPopMatrix(); */ }
void PointCloudApp::draw() { gl::clear(Color(0.25f, 0.1f, 0.15f)); //gl::enableAdditiveBlending(); gl::enableDepthRead(); gl::enableDepthWrite(); gl::setMatrices(mMayaCam.getCamera()); gl::ScopedTextureBind cTexture(mTexRgb); gl::pointSize(getWindowWidth()/mDepthDims.x); mDrawObj->draw(); }
void TessellationSampleApp::draw() { // clear out the window with black // gl::clear( Color( 0, 0, 0 ) ); // gl::setMatricesWindow( getWindowWidth(), getWindowHeight() ); // gl::translate( getWindowCenter() ); // gl::ScopedGlslProg glslProg( mGlsl ); // mGlsl->uniform( "uNumSides", mNumSides ); // mGlsl->uniform( "uRadius", mRadius ); // mBatch->draw(); //glEnable(GL_DEPTH_TEST); //glEnable(GL_CULL_FACE); glUseProgram(shaderProgram); glUniform1f( tessellationInnerLoc, mTessellationInner ); glUniform1f( tessellationOuterLoc, mTessellationOuter ); glUniform3f(ambientColorLoc, 0.04f, 0.04f, 0.04f); glUniform3f(diffuseColorLoc, 0, 0.75, 0.75); glUniform3f(lightPosLoc, 0.25, 0.25, 1); //gl::setMatrices( mMayaCam.getCamera() ); glUniformMatrix4fv(projectionMatLoc, 1, GL_FALSE, mMayaCam.getCamera().getProjectionMatrix().m); glUniformMatrix4fv(modelViewMatLoc, 1, GL_FALSE, mMayaCam.getCamera().getViewMatrix().m); glUniformMatrix4fv(normalMatLoc, 1, GL_FALSE, mNormalMatrix.m); glClearColor(0.7f, 0.6f, 0.5f, 1.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //glPatchParameteri(GL_PATCH_VERTICES, 3); // triggers a crash for some reason... glPolygonMode( GL_FRONT_AND_BACK, GL_LINE ); //glDrawArrays(GL_PATCHES, 0, 4); glDrawElements(GL_PATCHES, mIndexCount, GL_UNSIGNED_INT, 0); glUseProgram( 0 ); }
void StereoscopicRenderingApp::resize( ResizeEvent event ) { // make sure the camera's aspect ratio remains correct mCamera.setAspectRatio( event.getAspectRatio() ); mMayaCam.setCurrentCam( mCamera ); // create/resize the FBO's required for anaglyph rendering gl::Fbo::Format fmt; fmt.setMagFilter( GL_LINEAR ); fmt.setSamples(8); mAnaglyphLeft = gl::Fbo( event.getWidth(), event.getHeight(), fmt ); mAnaglyphRight = gl::Fbo( event.getWidth(), event.getHeight(), fmt ); }
void PointCloudApp::setup() { getWindow()->setSize(1280, 720); setFrameRate(60); mCamera.setPerspective(45.0f, getWindowAspectRatio(), 100.0f, 2000.0f); mCamera.lookAt(vec3(0, 0, 0), vec3(0, 0, 1), vec3(0, 1, 0)); mCamera.setCenterOfInterestPoint(vec3(0, 0, 750.0)); mMayaCam.setCurrentCam(mCamera); setupRSSDK(); setupMesh(); getSignalCleanup().connect(std::bind(&PointCloudApp::exit, this)); }
void MarionetteZooApp::setupParams() { mndl::params::PInterfaceGl::load( "params.xml" ); mParams = mndl::params::PInterfaceGl( "Parameters", Vec2i( 230, 550 ), Vec2i( 50, 50 ) ); mParams.addPersistentSizeAndPosition(); mFps = 0; mParams.addParam( "Fps", &mFps, "", true ); mParams.addSeparator(); mParams.addText( "Camera" ); mParams.addPersistentParam( "Lock camera (l)", &mCameraLock, false ); mParams.addPersistentParam( "Fov", &mCameraFov, 45.f, "min=20 max=180 step=.1" ); mParams.addPersistentParam( "Eye", &mCameraEyePoint, Vec3f( 0.0f, -20.0f, 0.0f ) ); mParams.addPersistentParam( "Center of Interest", &mCameraCenterOfInterestPoint, Vec3f( 0.0f, 0.0f, 0.1f ) ); mParams.addButton( "Reset camera", [ & ]() { mCameraCenterOfInterestPoint = Vec3f( 0.0f, 0.0f, 0.1f ); mCameraFov = 45.f; mCameraEyePoint = Vec3f( 0.0f, -20.0f, 0.0f ); CameraPersp cam = mMayaCam.getCamera(); cam.setPerspective( mCameraFov, getWindowAspectRatio(), 0.1f, 1000.0f ); cam.setEyePoint( mCameraEyePoint ); cam.setCenterOfInterestPoint( mCameraCenterOfInterestPoint ); mMayaCam.setCurrentCam( cam ); } ); mParams.addSeparator(); mModelTypes = ModelFileManager::getSingleton().getModelTypes(); mParams.addPersistentParam( "Model", mModelTypes, &mModelTypeId, 0 ); if ( mModelTypeId > mModelTypes.size() ) mModelTypeId = 0; mParams.addButton( "Test model", std::bind( &MarionetteZooApp::testModel, this ) ); }
void reflection_animationApp::draw() { // BACKGROUND gl::clear( Color( 0.0025f, 0.0025f, 0.0025f ) ); gl::enableDepthWrite( false ); gl::enableDepthRead( false ); glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT ); // RENDER MODE switch (m_renderMode) { case ALPHA: gl::enableAlphaBlending(); break; case ADDITIVE: gl::enableAdditiveBlending(); break; default: gl::enableAlphaBlending(); break; } // DRAW // camera //m_eye = Vec3f( 0.0f, 0.0f, m_cameraDistance ); //m_camPers.lookAt( m_eye, m_center, m_up ); // matrices gl::setMatrices( m_mayaCam.getCamera() ); // draw pyramid m_structure.draw(); // GUI if (m_recording) { Surface frame = copyWindowSurface(); m_movieWriter.addFrame( frame ); } if(m_showInterface) params::InterfaceGl::draw(); }
void FastTrailsApp::setup() { // initialize camera CameraPersp cam( getWindowWidth(), getWindowHeight(), 60.0f, 0.1f, 500.0f ); cam.setEyePoint( Vec3f(0, 0, -100.0f) ); cam.setCenterOfInterestPoint( Vec3f::zero() ); mCamera.setCurrentCam( cam ); // load texture try { mTexture = gl::Texture( loadImage( loadAsset("gradient.png") ) ); } catch( const std::exception &e ) { console() << e.what() << std::endl; } // create VBO mesh gl::VboMesh::Layout layout; layout.setDynamicPositions(); layout.setStaticIndices(); layout.setStaticTexCoords2d(); mVboMesh = gl::VboMesh( TRAIL_LENGTH, TRAIL_LENGTH, layout, GL_TRIANGLE_STRIP ); // observation: indices and texture coordinates never change std::vector< uint32_t > indices; indices.reserve( TRAIL_LENGTH ); std::vector< Vec2f > texcoords; texcoords.reserve( TRAIL_LENGTH ); for( size_t i=0; i<TRAIL_LENGTH; ++i ) { indices.push_back( i ); float x = math<float>::floor( i * 0.5f ) / ( TRAIL_LENGTH * 0.5f ); float y = float( i % 2 ); texcoords.push_back( Vec2f( x, y ) ); } // create index and texture coordinate buffers mVboMesh.bufferIndices( indices ); mVboMesh.bufferTexCoords2d( 0, texcoords ); // clear our trail buffer mTrail.clear(); // initialize time and angle mTime = getElapsedSeconds(); mAngle= 0.0f; // disable vertical sync, so we can see the actual frame rate gl::disableVerticalSync(); }
void TerrainApp::draw() { gl::clear( Color::black() ); gl::setMatrices( mMayaCam.getCamera() ); gl::enable( GL_LIGHTING ); gl::enableDepthRead(); gl::enableDepthWrite(); mMaterial.apply(); gl::draw( mTriMesh ); gl::disable( GL_LIGHTING ); params::InterfaceGl::draw(); }
void PhysicallyBasedShadingApp::setup() { // build our test model mSphere = gl::VboMesh::create( geom::Sphere().subdivisions( 32 ) ); // prepare the Camera ui auto cam = CameraPersp(); cam.setPerspective( 50.0f, getWindowAspectRatio(), 1.0f, 1000.0f ); cam.setEyePoint( vec3( -37.653, 40.849, -0.187 ) ); cam.setOrientation( quat( -0.643, 0.298, 0.640, 0.297 ) ); mMayaCam.setCurrentCam( cam ); // load and compile the shader and make sure they compile fine try { mShader = gl::GlslProg::create( gl::GlslProg::Format().vertex( loadAsset( "PBR.vert" ) ).fragment( loadAsset( "PBR.frag" ) ) ); } catch( gl::GlslProgCompileExc exc ){ CI_LOG_E( exc.what() ); } // create a shader for rendering the light mColorShader = gl::getStockShader( gl::ShaderDef().color() ); // set the initial parameters and setup the ui mRoughness = 1.0f; mMetallic = 1.0f; mSpecular = 1.0f; mLightRadius = 4.0f; mLightColor = Color::white(); mBaseColor = Color( 1.0f, 0.0f, 0.0f ); mTime = 0.0f; mAnimateLight = true; mFStop = 2.0f; mGamma = 2.2f; mFocalLength = 36.0f; mSensorSize = 35.0f; mFocalLengthPreset = mPrevFocalLengthPreset = 3; mSensorSizePreset = mPrevSensorSizePreset = 4; mFStopPreset = mPrevFStopPreset = 2; setupParams(); #if defined( CINDER_MSW ) mFont = Font( "Arial Bold", 12 ); #else mFont = Font( "Arial-BoldMT", 12 ); #endif }
void cApp::setup(){ setWindowPos( 0, 0 ); setWindowSize( mW*0.5, mH*0.5 ); mExp.setup( mW*mScale, mH*mScale,0, 2999, GL_RGB, mt::getRenderPath() ); mPln.setOctaves(4); mPln.setSeed(1332); randSeed( mt::getSeed() ); int count = 0; for( int i=0; i<100; i++){ StrangeAgent sa; sa.setRandom(); mSAs.push_back( sa ); for(int j=0; j<sa.points.size(); j++){ mPlnPts.push_back( Vec3f(count*scale,0,0) ); count++; } } total = count; if( 1 ){ CameraPersp cam; cam.setNearClip(0.1); cam.setFarClip(1000000); cam.setFov(60); cam.setEyePoint( Vec3f(0,0,-30 ) ); cam.setCenterOfInterestPoint( Vec3f(0,0,0) ); cam.setAspectRatio( (float)mW/mH ); mCamUi.setCurrentCam(cam); }else{ ortho.setNearClip(0.1); ortho.setFarClip(1000000); ortho.setEyePoint( Vec3f(0,0,-7 ) ); ortho.setCenterOfInterestPoint( Vec3f(0,0,0) ); ortho.setAspectRatio( (float)mW/mH ); } #ifdef RENDER mExp.startRender(); #endif }
void ObjLoaderApp::setup() { ObjLoader loader( loadResource( RES_CUBE_OBJ )->createStream() ); loader.load( &mMesh ); mVBO = gl::VboMesh( mMesh ); mTexture = gl::Texture( loadImage( loadResource( RES_IMAGE ) ) ); mShader = gl::GlslProg( loadResource( RES_SHADER_VERT ), loadResource( RES_SHADER_FRAG ) ); CameraPersp initialCam; initialCam.setPerspective( 45.0f, getWindowAspectRatio(), 0.1, 10000 ); mMayaCam.setCurrentCam( initialCam ); mTexture.bind(); mShader.bind(); mShader.uniform( "tex0", 0 ); }
void CameraLensShiftTestApp::setup() { CameraPersp cam( getWindowWidth() * 0.5f, getWindowHeight(), 40.0f, 0.1f, 1000.0f ); cam.setEyePoint( Vec3f(-50.0f, 0.0f, 0.0f) ); cam.setCenterOfInterestPoint( Vec3f::zero() ); mOverview.setCurrentCam(cam); // set camera to size of half the window mCamera = CameraPersp( getWindowWidth() * 0.5f, getWindowHeight(), 40.0f, 2.5f, 20.0f ); mCamera.setEyePoint( Vec3f(0.0f, 0.0f, -15.0f) ); mCamera.setCenterOfInterestPoint( Vec3f::zero() ); // mFont = Font("Tahoma", 18); mLensShift = mCamera.getLensShift(); }
void _TBOX_PREFIX_App::draw() { // clear out the window with black gl::clear( Color( 0, 0, 0 ) ); // set up the camera gl::pushMatrices(); gl::setMatrices( mMayaCam.getCamera() ); Vec3f size = 5.0f * Vec3f::one(); gl::drawColorCube( 0.5f * size, size ); gl::drawCoordinateFrame( 6.0f ); gl::popMatrices(); // Draw the interface mParams.draw(); }
void godComplexApp::setup() { waterPrevTime = 0.0f; drawWater = true; drawMesh = true; mDrawFlowField = true; userIncr1 = 0.0f; userIncr2 = 0.0f; userIncr3 = 0.0f; //setFullScreen(true); //glEnable(GL_LIGHTING); //glEnable(GL_LIGHT0); myImage = gl::Texture(loadImage(loadResource( RES_WELLINGTON_IMG_ALPHA ))); ObjLoader loader(loadResource(RES_WELLINGTON_OBJ)); loader.load(&mMesh); mVbo = gl::VboMesh(mMesh); CameraPersp initialCam; initialCam.setPerspective( 77.5f, getWindowAspectRatio(), 5.0f, 3000.0f ); //TODO: get correct camera persp from C4D Vec3f mEye = Vec3f(0.0f, 30.0f, 0.0f); Vec3f mCenter = Vec3f::zero(); Vec3f mUp = Vec3f::yAxis(); initialCam.lookAt( mEye, mCenter, mUp ); //NOTE: new camera // initialCam.lookAt(Vec3f(0, 43, 0), Vec3f(0, 0, 0), Vec3f(0, -1, 0)); //NOTE: orginal camera mMayaCam.setCurrentCam( initialCam ); mWaterModule = new WaterModule(); mWaterModule->setup(); mFlowField = new VectorFlowField(); mFlowField->setup(); }
void cApp::draw(){ mExp.begin( mCamUi.getCamera() ); { gl::clear( Colorf(0,0,0) ); if(0){ glPushMatrix(); glTranslatef( 5, 0, 0); gl::color(1, 1, 1); glBegin( GL_POINTS ); for( int i=0; i<mSAs.size(); i++ ){ for (int j=0; j<mSAs[i].points.size(); j++) { glVertex3f( mSAs[i].points[j] ); } } glEnd(); glPopMatrix(); } glPushMatrix(); glTranslatef( -total*scale*0.5, 0, 0); gl::color(0, 0, 1, 0.35); glBegin( GL_POINTS ); for( int i=0; i<mPlnPts.size(); i++ ){ glVertex3f( mPlnPts[i] ); } glEnd(); glPopMatrix(); glPushMatrix(); glColor3f(1, 0, 0); glBegin( GL_LINES ); glVertex3f(-100, 0, 0); glVertex3f(100, 0, 0); glEnd(); glPopMatrix(); } mExp.end(); gl::clear( Colorf(1,1,1) ); gl::color( Colorf(1,1,1) ); mExp.draw(); }
void HiKinectApp::setup() { console() << "There are " << Kinect::getNumDevices() << " Kinects connected." << std::endl; mKinect = Kinect( Kinect::Device() ); mKinectReady = false; mKinectIR = false; mNormalShader = gl::GlslProg( loadResource( "normal_vert.glsl" ), loadResource( "normal_frag.glsl" ) ); mNormalStrength = 20.0f; gl::Fbo::Format format; mFbo = gl::Fbo( CAPTURE_WIDTH, CAPTURE_HEIGHT, format ); mSilhouetteDetector = new SilhouetteDetector(640,480); mContours = new vector<vector<cv::Point> >(); mDoNormalMap = false; mParams = params::InterfaceGl( "Parameters", Vec2i( 300, 200 ) ); mParams.addParam( "NormalStrength", &mNormalStrength, "min=1.0 max=1000.0 step=1.0 keyIncr=z keyDecr=Z" ); mParams.addParam( "Depth Scale", &mGridMesh.mDepthScale, "min=1.0 max=2000.0" ); mParams.addParam( "Depth Offset", &mGridMesh.mDepthOffset, "min=0.0 max=1000.0" ); mParams.addParam( "Shader Displacement", &mGridMesh.mShaderDisplacement ); mParams.addParam( "Depth Max", &mGridMesh.mDepthMax, "min=0.0 max=1.0 step=0.01" ); mParams.addParam( "Generate Normal Map", &mDoNormalMap ); mGridMesh.init(160, 120, 640, 480, false, true); mIsMouseDown = false; mWireframe = true; DIFFUSE = true; AMBIENT = false; SPECULAR = false; EMISSIVE = false; mCamera.setEyePoint( Vec3f(0.0f, 0.0f, 750.0f)); mCamera.setCenterOfInterestPoint( Vec3f::zero() ); mCamera.setPerspective( 60, getWindowAspectRatio(), 1, 2000 ); mCamUI.setCurrentCam( mCamera ); gl::enableDepthWrite(); gl::enableDepthRead(); }
void ssaoApp::update() { mCam = mMayaCam.getCamera(); mDeferredRenderer.mCam = &mCam; float base = getElapsedSeconds(); vector<Light_PS*>::iterator it = mDeferredRenderer.getCubeLightsRef()->begin(); for(it; it != mDeferredRenderer.getCubeLightsRef()->end(); ++it) { Vec3f v = (*it)->getPos(); v.y = (40 + (sin(base) * 30.f)); (*it)->setPos(v); base += 1.f; } }