void QuickTimeSampleApp::loadMovieFile( const fs::path &moviePath ) { try { // load up the movie, set it to loop, and begin playing mMovie = qtime::MovieGl::create( moviePath ); mMovie->setLoop(); mMovie->play(); // create a texture for showing some info about the movie TextLayout infoText; infoText.clear( ColorA( 0.2f, 0.2f, 0.2f, 0.5f ) ); infoText.setColor( Color::white() ); infoText.addCenteredLine( moviePath.filename().string() ); infoText.addLine( toString( mMovie->getWidth() ) + " x " + toString( mMovie->getHeight() ) + " pixels" ); infoText.addLine( toString( mMovie->getDuration() ) + " seconds" ); infoText.addLine( toString( mMovie->getNumFrames() ) + " frames" ); infoText.addLine( toString( mMovie->getFramerate() ) + " fps" ); infoText.setBorder( 4, 2 ); mInfoTexture = gl::Texture::create( infoText.render( true ) ); } catch( ci::Exception &exc ) { console() << "Exception caught trying to load the movie from path: " << moviePath << ", what: " << exc.what() << std::endl; mMovie.reset(); mInfoTexture.reset(); } mFrameTexture.reset(); }
void TextParticlesApp::update() { if( !mActive ) return; // Update particles on the GPU gl::ScopedGlslProg prog( mUpdateProg ); gl::ScopedState rasterizer( GL_RASTERIZER_DISCARD, true ); // turn off fragment stage mPerlin3dTex->bind(0); mUpdateProg->uniform( "uPerlinTex", 0 ); mUpdateProg->uniform( "uStep", mStep.value() ); mUpdateProg->uniform( "uDampingSpeed", mDampingSpeed ); mUpdateProg->uniform( "uNoiseOffset", mNoiseOffset ); mUpdateProg->uniform( "uEndColor", mEndColor ); // Bind the source data (Attributes refer to specific buffers). gl::ScopedVao source( mAttributes[mSourceIndex] ); // Bind destination as buffer base. gl::bindBufferBase( GL_TRANSFORM_FEEDBACK_BUFFER, 0, mParticleBuffer[mDestinationIndex] ); gl::beginTransformFeedback( GL_POINTS ); // Draw source into destination, performing our vertex transformations. gl::drawArrays( GL_POINTS, 0, mTextParticleCount ); gl::endTransformFeedback(); mPerlin3dTex->unbind(); // Swap source and destination for next loop std::swap( mSourceIndex, mDestinationIndex ); }
void NormalGetterApp::normalize(gl::TextureRef _tex){ { gl::ScopedMatrices push; gl::ScopedFramebuffer fbo(mOutputFbo); gl::clear(); ci::gl::setMatricesWindow( mOutputFbo->getSize() ); ci::gl::ScopedViewport view( ci::vec2(0), mOutputFbo->getSize() ); gl::ScopedGlslProg mGlsl(mNormalGlsl); gl::ScopedTextureBind tex0(_tex); mNormalGlsl->uniform("uSampler", 0); mNormalGlsl->uniform("u_textureSize", vec2(_tex->getWidth(), _tex->getHeight())); mNormalGlsl->uniform("bias", bias); mNormalGlsl->uniform("invertR", float(invertR ? -1.0 : 1.0) ); mNormalGlsl->uniform("invertG", float(invertG ? -1.0 : 1.0)); gl::drawSolidRect(Rectf(vec2(0), _tex->getSize())); } if( pushFramesToBuffer){ mPreprocessedImages->pushFront(std::make_pair(mOutputFbo->getColorTexture()->createSource(), currentFrame)); if(currentFrame == mMovie->getNumFrames()){ pushFramesToBuffer = false; mMovie->setLoop(true); mMovie->seekToStart(); } currentFrame++; } }
void FaceOff::update() { #ifdef QUICKTIME_ENABLED if (MOVIE_MODE) { if (!mMovie) { fs::path moviePath = getAssetPath(MOVIE_PATH); try { // load up the movie, set it to loop, and begin playing mMovie = qtime::MovieSurface::create(moviePath); mMovie->setLoop(); mMovie->play(); mOfflineFaceTex.reset(); } catch (ci::Exception &exc) { console() << "Exception caught trying to load the movie from path: " << MOVIE_PATH << ", what: " << exc.what() << std::endl; mMovie.reset(); } } else { if (mMovie->checkNewFrame()) { auto surface = mMovie->getSurface(); if (!mOfflineFaceTex) { mOfflineFaceTex = gl::Texture2d::create(*surface, gl::Texture::Format().loadTopDown()); } else { mOfflineFaceTex->update(*surface); } } } } else { mMovie.reset(); mOfflineFaceTex = mPhotoTex; } #endif if (mDeviceId != DEVICE_ID) { mDeviceId = DEVICE_ID; mCapture.setup(CAM_W, CAM_H, mDevices[DEVICE_ID]); mDoesCaptureNeedsInit = true; } if (mCapture.isBackCamera) mCapture.flip = false; else mCapture.flip = CAM_FLIP; }
void ciApp::setup() { setWindowSize(1280, 720); setFrameRate(60.f); int maxVertUniformsVect; glGetIntegerv(GL_MAX_VERTEX_UNIFORM_VECTORS, &maxVertUniformsVect); mSize = 0; mSizePrev = -1; mSizeMax = 17; mAmplifier = 1.f; mExposure = 1.f; mGamma = 2.2f; printf("max uniform: %i, %i\n", maxVertUniformsVect, mSizeMax); mParams = params::InterfaceGl::create(getWindow(), "App parameters", ivec2(250, 300)); mParams->setPosition(ivec2(20, 250)); mTexture = gl::Texture::create(loadImage(loadFile(data_path + "demo.png"))); mFbo = gl::Fbo::create(mTexture->getWidth(), mTexture->getHeight(), gl::Fbo::Format().colorTexture()); //mShader.setup("filterGaussianBlur"); filter = hb::GlslFilter::create(mTexture->getSize()); filter->setParams(mParams); vector_blur.setup(getWindowSize()); vector_blur.setParams(mParams); spout_receiver = hb::Receiver::create("Spout DX11 Sender"); //spout_receiver = hbSpoutReceiver::create("KidsLandSea"); spout_sender = hb::Sender::create("cinder_spout", mFbo->getWidth(), mFbo->getHeight()); #if 0 auto ctx = audio::Context::master(); // The InputDeviceNode is platform-specific, so you create it using a special method on the Context: mInputDeviceNode = ctx->createInputDeviceNode(); // By providing an FFT size double that of the window size, we 'zero-pad' the analysis data, which gives // an increase in resolution of the resulting spectrum data. auto monitorFormat = audio::MonitorSpectralNode::Format().fftSize(2048).windowSize(1024); mMonitorSpectralNode = ctx->makeNode(new audio::MonitorSpectralNode(monitorFormat)); mInputDeviceNode >> mMonitorSpectralNode; // InputDeviceNode (and all InputNode subclasses) need to be enabled()'s to process audio. So does the Context: mInputDeviceNode->enable(); ctx->enable(); #endif }
void QuickTimeSampleApp::draw() { gl::clear( Color( 0, 0, 0 ) ); gl::enableAlphaBlending(); if( mFrameTexture ) { Rectf centeredRect = Rectf( mFrameTexture->getBounds() ).getCenteredFit( getWindowBounds(), true ); gl::draw( mFrameTexture, centeredRect ); } if( mInfoTexture ) { gl::draw( mInfoTexture, vec2( 20, getWindowHeight() - 20 - mInfoTexture->getHeight() ) ); } }
void ShaderToyApp::bindShader(gl::GlslProgRef shader) { // Nothing to bind if we don't have a shader. if(!shader) return; // Bind the shader. shader->bind(); // Make sure it was successfull by checking for errors. GLenum err = glGetError(); if(err != GL_NO_ERROR) fatal("Failed to bind the shader!\n\nYour driver may not properly support shared contexts. Make sure you use the latest driver version and a proper GPU."); // Calculate shader parameters. Vec3f iResolution( Vec2f( getWindowSize() ), 1.f ); float iGlobalTime = (float) getElapsedSeconds(); float iChannelTime0 = (float) getElapsedSeconds(); float iChannelTime1 = (float) getElapsedSeconds(); float iChannelTime2 = (float) getElapsedSeconds(); float iChannelTime3 = (float) getElapsedSeconds(); Vec3f iChannelResolution0 = mChannel0 ? Vec3f( mChannel0->getSize(), 1.f ) : Vec3f::one(); Vec3f iChannelResolution1 = mChannel1 ? Vec3f( mChannel1->getSize(), 1.f ) : Vec3f::one(); Vec3f iChannelResolution2 = mChannel2 ? Vec3f( mChannel2->getSize(), 1.f ) : Vec3f::one(); Vec3f iChannelResolution3 = mChannel3 ? Vec3f( mChannel3->getSize(), 1.f ) : Vec3f::one(); time_t now = time(0); tm* t = gmtime(&now); Vec4f iDate( float(t->tm_year + 1900), float(t->tm_mon + 1), float(t->tm_mday), float(t->tm_hour * 3600 + t->tm_min * 60 + t->tm_sec) ); // Set shader uniforms. shader->uniform("iResolution", iResolution); shader->uniform("iGlobalTime", iGlobalTime); shader->uniform("iChannelTime[0]", iChannelTime0); shader->uniform("iChannelTime[1]", iChannelTime1); shader->uniform("iChannelTime[2]", iChannelTime2); shader->uniform("iChannelTime[3]", iChannelTime3); shader->uniform("iChannelResolution[0]", iChannelResolution0); shader->uniform("iChannelResolution[1]", iChannelResolution1); shader->uniform("iChannelResolution[2]", iChannelResolution2); shader->uniform("iChannelResolution[3]", iChannelResolution3); shader->uniform("iMouse", mMouse); shader->uniform("iChannel0", 0); shader->uniform("iChannel1", 1); shader->uniform("iChannel2", 2); shader->uniform("iChannel3", 3); shader->uniform("iDate", iDate); }
void FlickrTestMTApp::draw() { gl::clear( Color( 0.1f, 0.1f, 0.2f ) ); if( mLastTexture ) { gl::color( 1, 1, 1, 1.0f - mFade ); Rectf textureBounds = mLastTexture->getBounds(); Rectf drawBounds = textureBounds.getCenteredFit( getWindowBounds(), true ); gl::draw( mLastTexture, drawBounds ); } if( mTexture ) { gl::color( 1, 1, 1, mFade ); Rectf textureBounds = mTexture->getBounds(); Rectf drawBounds = textureBounds.getCenteredFit( getWindowBounds(), true ); gl::draw( mTexture, drawBounds ); } }
void camerasApp::draw() { gl::clear( Color( 0, 0.1f, 0.2f ) ); // draw the cube gl::pushMatrices(); gl::translate( getWindowCenter() ); gl::rotate( mArcball.getQuat() ); if(mTexture) { mTexture->enableAndBind(); gl::drawCube(Vec3f::zero(), Vec3f(320,320,320)); mTexture->unbind(); } gl::popMatrices(); }
void HighDynamicRangeApp::draw() { gl::clear( Color( 0, 0, 0 ) ); gl::ScopedGlslProg shaderScp( mShader ); gl::ScopedTextureBind texBindScp( mHdrTexture ); mShader->uniform( "uExposure", mExposure ); gl::drawSolidRect( mHdrTexture->getBounds() ); }
void ShaderToyApp::setUniforms() { auto shader = gl::context()->getGlslProg(); if( !shader ) return; // Calculate shader parameters. vec3 iResolution( vec2( getWindowSize() ), 1 ); float iGlobalTime = (float)getElapsedSeconds(); float iChannelTime0 = (float)getElapsedSeconds(); float iChannelTime1 = (float)getElapsedSeconds(); float iChannelTime2 = (float)getElapsedSeconds(); float iChannelTime3 = (float)getElapsedSeconds(); vec3 iChannelResolution0 = mChannel0 ? vec3( mChannel0->getSize(), 1 ) : vec3( 1 ); vec3 iChannelResolution1 = mChannel1 ? vec3( mChannel1->getSize(), 1 ) : vec3( 1 ); vec3 iChannelResolution2 = mChannel2 ? vec3( mChannel2->getSize(), 1 ) : vec3( 1 ); vec3 iChannelResolution3 = mChannel3 ? vec3( mChannel3->getSize(), 1 ) : vec3( 1 ); time_t now = time( 0 ); tm* t = gmtime( &now ); vec4 iDate( float( t->tm_year + 1900 ), float( t->tm_mon + 1 ), float( t->tm_mday ), float( t->tm_hour * 3600 + t->tm_min * 60 + t->tm_sec ) ); // Set shader uniforms. shader->uniform( "iResolution", iResolution ); shader->uniform( "iGlobalTime", iGlobalTime ); shader->uniform( "iChannelTime[0]", iChannelTime0 ); shader->uniform( "iChannelTime[1]", iChannelTime1 ); shader->uniform( "iChannelTime[2]", iChannelTime2 ); shader->uniform( "iChannelTime[3]", iChannelTime3 ); shader->uniform( "iChannelResolution[0]", iChannelResolution0 ); shader->uniform( "iChannelResolution[1]", iChannelResolution1 ); shader->uniform( "iChannelResolution[2]", iChannelResolution2 ); shader->uniform( "iChannelResolution[3]", iChannelResolution3 ); shader->uniform( "iMouse", mMouse ); shader->uniform( "iChannel0", 0 ); shader->uniform( "iChannel1", 1 ); shader->uniform( "iChannel2", 2 ); shader->uniform( "iChannel3", 3 ); shader->uniform( "iDate", iDate ); }
void InstascopeApp::updateMirrors( vector<TrianglePiece> *vec ) { if( ! mMirrorTexture ) return; vec2 mSamplePt1( -0.5, -(sin(M_PI/3)/3) ); vec2 mSamplePt2( mSamplePt1.x + 1, mSamplePt1.y); vec2 mSamplePt3( mSamplePt1.x + (cos(M_PI/3)), mSamplePt1.y + (sin(M_PI/3))); mat3 mtrx( 1.0f ); mtrx = glm::translate( mtrx, mSamplePt.value() ); mtrx = glm::scale( mtrx, vec2( mSampleSize ) ); mtrx = glm::rotate( mtrx, float((getElapsedFrames()*4)/2*M_PI) ); mSamplePt1 = vec2( mtrx * vec3( mSamplePt1, 1.0 ) ); mSamplePt2 = vec2( mtrx * vec3( mSamplePt2, 1.0 ) ); mSamplePt3 = vec2( mtrx * vec3( mSamplePt3, 1.0 ) ); mSamplePt1 /= mMirrorTexture->getSize(); mSamplePt2 /= mMirrorTexture->getSize(); mSamplePt3 /= mMirrorTexture->getSize(); // loop through all the pieces and pass along the current texture and it's coordinates int outCount = 0; int inCount = 0; for( int i = 0; i < vec->size(); i++ ) { (*vec)[i].update( mMirrorTexture, mSamplePt1, mSamplePt2, mSamplePt3 ); if( (*vec)[i].isOut() ) outCount++; if( (*vec)[i].isIn() ) inCount++; } // if all are out, then make a new mirror grid if( outCount > 0 && outCount == mTriPieces.size() ) { mirrorOut(); } // if all the pieces are in if( inCount > 0 && inCount == mTriPieces.size() && ! mPiecesIn ) { mPiecesIn = true; mirrorIn(); } }
void AsyncTextureLoadingApp::setup() { // Enable alpha blending in case our image supports it. gl::enableAlphaBlending(); // Load the image in a separated thread and returns the ImageSourceRef auto asyncLoad = [](DataSourceRef dataSource){ ImageSourceRef imageSource = loadImage( dataSource ); return imageSource; }; // The second callback is executed in the main thread so any OpenGL resources can be created here. auto textureCreation = [this](ImageSourceRef imageSource){ mTexture = gl::Texture::create( imageSource ); // It's ok to do that in the main thread: setWindowSize( mTexture->getWidth(), mTexture->getHeight() ); }; // Use the templated version if you want to pass an object from the loading thread to the main thread. Because we are providing the load function with two callbacks there's no need to specify Options().asynchronous() like we would do with only one callback. AssetManager::load<ImageSourceRef>( "cinder_logo_alpha.png", asyncLoad, textureCreation ); // The following does exactly the same but is shorter. /* AssetManager::load<ImageSourceRef>( "cinder_logo_alpha.png", // Load the image in a separated thread and returns the ImageSourceRef [this](DataSourceRef dataSource){ ImageSourceRef imageSource = loadImage( dataSource ); return imageSource; }, // The second callback is executed in the main thread so any OpenGL resources can be created here. [this](ImageSourceRef imageSource){ mTexture = gl::Texture::create( imageSource ); } ); */ }
void ImageFileBasicApp::draw() { gl::clear( Color( 0.5f, 0.5f, 0.5f ) ); gl::enableAlphaBlending(); if( mTexture ) { Rectf destRect = Rectf( mTexture->getBounds() ).getCenteredFit( getWindowBounds(), true ).scaledCentered( 0.85f ); gl::draw(mTexture, getWindowBounds()); } m_params->draw(); }
void InstascopeApp::draw() { gl::clear( Color( 0, 0, 0 ) ); gl::enableAlphaBlending( PREMULT ); if( mBgTexture ) gl::draw( mBgTexture, Rectf( mBgTexture->getBounds() ).getCenteredFit( getWindowBounds(), true ) ); drawMirrors( &mTriPieces ); mTextRibbon->draw(); }
void RotatingCubeApp::draw() { gl::clear(); gl::setMatrices( mCam ); gl::ScopedModelMatrix modelScope; gl::multModelMatrix( mCubeRotation ); mTexture->bind(); mBatch->draw(); }
void Clone::maskedBlur(gl::TextureRef tex, gl::TextureRef mask, gl::FboRef result) { gl::ScopedTextureBind t2(mask, 2); gl::ScopedGlslProg glsl(mMaskBlurShader); { gl::ScopedFramebuffer fbo(mBufferFbo); gl::clear(ColorA::black(), false); gl::ScopedTextureBind t1(tex, 1); mMaskBlurShader->uniform("direction", vec2(1, 0)); gl::drawSolidRect(tex->getBounds()); } { gl::ScopedFramebuffer fbo(result); gl::clear(ColorA::black(), false); gl::ScopedTextureBind t1(mBufferFbo->getColorTexture(), 1); mMaskBlurShader->uniform("direction", vec2(0, 1)); gl::drawSolidRect(tex->getBounds()); } }
void NormalMappingBasicApp::setup() { mCam.lookAt( vec3( 3, 2, 4 ), vec3( 0 ) ); mDiffuseTex = gl::Texture::create( loadImage( loadAsset( "diffuseMap.jpg" ) ), gl::Texture::Format().mipmap() ); mDiffuseTex->bind(); mNormalTex = gl::Texture::create( loadImage( loadAsset( "normalMap.png" ) ), gl::Texture::Format().mipmap() ); mNormalTex->bind( 1 ); #if defined( CINDER_GL_ES ) mGlsl = gl::GlslProg::create( loadAsset( "shader_es2.vert" ), loadAsset( "shader_es2.frag" ) ); #else mGlsl = gl::GlslProg::create( loadAsset( "shader.vert" ), loadAsset( "shader.frag" ) ); #endif mBatch = gl::Batch::create( geom::Cube() >> geom::Transform( scale( vec3( 1.5f ) ) ), mGlsl ); gl::ScopedGlslProg glslScp( mGlsl ); mGlsl->uniform( "uDiffuseMap", 0 ); mGlsl->uniform( "uNormalMap", 1 ); mGlsl->uniform( "uLightLocViewSpace", vec3( 0, 0, 1 ) ); gl::enableDepthWrite(); gl::enableDepthRead(); }
void Emitter::iterateListExist() { gl::enable( GL_TEXTURE_2D ); particleImg->bind(); for( list<Particle>::iterator it = particles.begin(); it != particles.end(); ) { if( ! it->ISDEAD ) { it->exist(); ++it; } else { it = particles.erase( it ); } } }
void ArcballTestApp::draw() { CameraPersp &cam = ( mUsingCameraUi ) ? mDebugCam : mCam; gl::clear( Color( 0, 0.0f, 0.15f ) ); gl::setMatrices( cam ); // draw the earth gl::enableDepthRead(); gl::enableDepthWrite(); gl::translate( mEarthSphere.getCenter() ); gl::rotate( mArcball.getQuat() ); mEarthTex->bind(); mEarth->draw(); // draw constraint axis if( mArcball.isUsingConstraint() ) { gl::setMatrices( cam ); gl::color( 1, 1, 0 ); gl::translate( mEarthSphere.getCenter() ); gl::rotate( glm::rotation( vec3( 0, 1, 0 ), mArcball.getConstraintAxis() ) ); mConstraintAxis->draw(); } gl::disableDepthRead(); // draw from vector marker gl::setMatrices( cam ); gl::color( 0, 1, 0.25f ); gl::translate( mEarthSphere.getCenter() + mArcball.getFromVector() * mEarthSphere.getRadius() ); mMarker->draw(); // draw to vector marker gl::setMatrices( cam ); gl::color( 1, 0.5f, 0.25f ); gl::translate( mEarthSphere.getCenter() + mArcball.getToVector() * mEarthSphere.getRadius() ); mMarker->draw(); // draw the elliptical axes gl::setMatricesWindow( getWindowSize() ); gl::color( 1, 0, 0 ); vec2 center, axisA, axisB; mCam.calcScreenProjection( mEarthSphere, getWindowSize(), ¢er, &axisA, &axisB ); gl::drawLine( center - axisA, center + axisA ); gl::drawLine( center - axisB, center + axisB ); }
void InstancedTeapotsApp::setup() { mCam.lookAt( vec3( 0, CAMERA_Y_RANGE.first, 0 ), vec3( 0 ) ); mTexture = gl::Texture::create( loadImage( loadAsset( "texture.jpg" ) ), gl::Texture::Format().mipmap() ); #if ! defined( CINDER_GL_ES ) mGlsl = gl::GlslProg::create( loadAsset( "shader.vert" ), loadAsset( "shader.frag" ) ); #elif defined( CINDER_GL_ES_3 ) mGlsl = gl::GlslProg::create(loadAsset("shader_es3.vert"), loadAsset("shader_es3.frag")); #else mGlsl = gl::GlslProg::create( loadAsset( "shader_es2.vert" ), loadAsset( "shader_es2.frag" ) ); #endif gl::VboMeshRef mesh = gl::VboMesh::create( geom::Teapot().subdivisions( 4 ) ); // create an array of initial per-instance positions laid out in a 2D grid std::vector<vec3> positions; for( size_t potX = 0; potX < NUM_INSTANCES_X; ++potX ) { for( size_t potY = 0; potY < NUM_INSTANCES_Y; ++potY ) { float instanceX = potX / (float)NUM_INSTANCES_X - 0.5f; float instanceY = potY / (float)NUM_INSTANCES_Y - 0.5f; positions.push_back( vec3( instanceX * vec3( DRAW_SCALE, 0, 0 ) + instanceY * vec3( 0, 0, DRAW_SCALE ) ) ); } } // create the VBO which will contain per-instance (rather than per-vertex) data mInstanceDataVbo = gl::Vbo::create( GL_ARRAY_BUFFER, positions.size() * sizeof(vec3), positions.data(), GL_DYNAMIC_DRAW ); // we need a geom::BufferLayout to describe this data as mapping to the CUSTOM_0 semantic, and the 1 (rather than 0) as the last param indicates per-instance (rather than per-vertex) geom::BufferLayout instanceDataLayout; instanceDataLayout.append( geom::Attrib::CUSTOM_0, 3, 0, 0, 1 /* per instance */ ); // now add it to the VboMesh we already made of the Teapot mesh->appendVbo( instanceDataLayout, mInstanceDataVbo ); // and finally, build our batch, mapping our CUSTOM_0 attribute to the "vInstancePosition" GLSL vertex attribute mBatch = gl::Batch::create( mesh, mGlsl, { { geom::Attrib::CUSTOM_0, "vInstancePosition" } } ); gl::enableDepthWrite(); gl::enableDepthRead(); mTexture->bind(); }
void ImageFileBasicApp::keyDown( KeyEvent event ) { if( event.getChar() == 'o' ) { fs::path path = getOpenFilePath( "", ImageIo::getLoadExtensions() ); if( ! path.empty() ) mTexture = gl::Texture::create( loadImage( path ) ); } else if( event.getChar() == 's' ) { fs::path path = getSaveFilePath(); if( ! path.empty() ) { Surface s8( mTexture->createSource() ); writeImage( writeFile( path ), s8 ); } } else if (event.getChar() == 'f') { setFullScreen(!isFullScreen()); // Works } }
void ObjLoaderApp::setup() { #if defined( CINDER_GL_ES ) mGlsl = gl::GlslProg::create( loadAsset( "shader_es2.vert" ), loadAsset( "shader_es2.frag" ) ); #else mGlsl = gl::GlslProg::create( loadAsset( "shader.vert" ), loadAsset( "shader.frag" ) ); #endif mGlsl->uniform( "uTex0", 0 ); mCam.setPerspective( 45.0f, getWindowAspectRatio(), 0.1, 10000 ); mCamUi = CameraUi( &mCam ); mCheckerTexture = gl::Texture::create( ip::checkerboard( 512, 512, 32 ) ); mCheckerTexture->bind( 0 ); loadObj( loadResource( RES_8LBS_OBJ ) ); mArcball = Arcball( &mCam, mBoundingSphere ); }
void Clone::update(gl::TextureRef src, gl::TextureRef dst, gl::TextureRef mask) { mMaskBlurShader->uniform("strength", mStrength); maskedBlur(src, mask, mSrcBlurFbo); maskedBlur(dst, mask, mDstBlurFbo); { gl::ScopedFramebuffer fbo(mBufferFbo); gl::ScopedBlendAlpha blend; gl::ScopedGlslProg glslTexOnly(gl::getStockShader(gl::ShaderDef().texture())); gl::draw(dst); gl::ScopedGlslProg glsl(mCloneShader); gl::ScopedTextureBind t1(src, 1); gl::ScopedTextureBind t2(mSrcBlurFbo->getColorTexture(), 2); gl::ScopedTextureBind t3(mDstBlurFbo->getColorTexture(), 3); gl::drawSolidRect(src->getBounds()); } }
void ShaderToyApp::draw() { // Bind textures. if( mChannel0 ) mChannel0->bind( 0 ); if( mChannel1 ) mChannel1->bind( 1 ); if( mChannel2 ) mChannel2->bind( 2 ); if( mChannel3 ) mChannel3->bind( 3 ); // Render the current shader to a frame buffer. if( mShaderCurrent && mBufferCurrent ) { gl::ScopedFramebuffer fbo( mBufferCurrent ); // Bind shader. gl::ScopedGlslProg shader( mShaderCurrent ); setUniforms(); // Clear buffer and draw full screen quad (flipped). gl::clear(); gl::drawSolidRect( Rectf( 0, (float)getWindowHeight(), (float)getWindowWidth(), 0 ) ); } // Render the next shader to a frame buffer. if( mShaderNext && mBufferNext ) { gl::ScopedFramebuffer fbo( mBufferNext ); // Bind shader. gl::ScopedGlslProg shader( mShaderNext ); setUniforms(); // Clear buffer and draw full screen quad (flipped). gl::clear(); gl::drawSolidRect( Rectf( 0, (float)getWindowHeight(), (float)getWindowWidth(), 0 ) ); } // Perform a cross-fade between the two shaders. double time = getElapsedSeconds() - mTransitionTime; double fade = math<double>::clamp( time / mTransitionDuration, 0.0, 1.0 ); if( fade <= 0.0 ) { // Transition has not yet started. Keep drawing current buffer. gl::draw( mBufferCurrent->getColorTexture(), getWindowBounds() ); } else if( fade < 1.0 ) { // Transition is in progress. // Use a transition shader to avoid having to draw one buffer on top of another. gl::ScopedTextureBind tex0( mBufferCurrent->getColorTexture(), 0 ); gl::ScopedTextureBind tex1( mBufferNext->getColorTexture(), 1 ); gl::ScopedGlslProg shader( mShaderTransition ); mShaderTransition->uniform( "iSrc", 0 ); mShaderTransition->uniform( "iDst", 1 ); mShaderTransition->uniform( "iFade", (float)fade ); gl::drawSolidRect( getWindowBounds() ); } else if( mShaderNext ) { // Transition is done. Swap shaders. gl::draw( mBufferNext->getColorTexture(), getWindowBounds() ); mShaderCurrent = mShaderNext; mShaderNext.reset(); mPathCurrent = mPathNext; mPathNext.clear(); getWindow()->setTitle( std::string( "ShaderToyApp - Showing " ) + mPathCurrent.filename().string() ); } else { // No transition in progress. gl::draw( mBufferCurrent->getColorTexture(), getWindowBounds() ); } }
void MeshViewApp::draw() { // Clear the window gl::clear(); gl::color(Color::white()); if(isInitialized()) { // Get ready to draw in 3D gl::pushMatrices(); gl::setMatrices(m_camera); gl::enableDepthRead(); gl::enableDepthWrite(); // Bind textures if(m_texDiffuse) m_texDiffuse->enableAndBind(); if (m_texNormal) m_texNormal->bind(1); if (m_texSpecular) m_texSpecular->bind(2); if(m_texAO) m_texAO->bind(3); if(m_texEmissive) m_texEmissive->bind(4); // Bind shader m_shader->bind(); m_shader->uniform("texDiffuse", 0); m_shader->uniform("texNormal", 1); m_shader->uniform("texSpecular", 2); m_shader->uniform("texAO", 3); m_shader->uniform("texEmissive", 4); m_shader->uniform("texDiffusePower", m_texDiffusePower); m_shader->uniform("texNormalPower", m_texNormalPower); m_shader->uniform("texSpecularPower", m_texSpecularPower); m_shader->uniform("texAOPower", m_texAOPower); m_shader->uniform("texEmissivePower", m_texEmissivePower); m_shader->uniform("diffuseEnabled", m_diffuseEnabled); m_shader->uniform("normalEnabled", m_normalEnabled); m_shader->uniform("specularEnabled", m_specularEnabled); m_shader->uniform("aoEnabled", m_aoEnabled); m_shader->uniform("emissiveEnabled", m_emissiveEnabled); m_shader->uniform("material.Ka", m_matAmbient); m_shader->uniform("material.Kd", m_matDiffuse); m_shader->uniform("material.Ks", m_matSpecular); m_shader->uniform("material.Shininess", m_matShininess); m_shader->uniform("gamma", m_gamma); // Enable lights m_light1->enable(); m_light2->enable(); // Render model gl::pushModelView(); gl::multModelView(m_matrix); m_assimpLoader.draw(); gl::popModelView(); // Disable lights m_light1->disable(); m_light2->disable(); // Unbind shader m_shader->unbind(); // Unbind textures gl::disable(m_texDiffuse->getTarget()); // Disable 3D rendering gl::disableDepthWrite(); gl::disableDepthRead(); // Restore matrices gl::popMatrices(); // Enable 2D rendering gl::setMatricesWindow(getWindowSize()); gl::setViewport(getWindowBounds()); // Render parameter window if(m_params) m_params->draw(); } // Render debug information Debug::get().draw(ColorAf::white()); }
void ImageSequenceLoaderApp::setupAE() { //the selected AV layer's source must be an image sequence. loader_.load(getSourcePath()); texture_.reset(); }
void MotionTrackingTestApp::draw() { // gl::setViewport( getWindowBounds() ); // clear out the window with black gl::clear( Color( 1, 1, 1 ) ); // if( mSurface ){ // if( mTexture ){ // mTexture->update( mSurface ); // } else { // mTexture = gl::Texture::create( mSurface ); // } // gl::draw( mTexture, mTexture->getBounds(), getWindowBounds() ); // } if( mSurfaceDepth ){ if( mTextureDepth ){ mTextureDepth->update( Channel32f( mSurfaceDepth ) ); } else { mTextureDepth = gl::Texture::create( Channel32f( mSurfaceDepth ) ); } gl::color( Color::white() ); gl::draw( mTextureDepth, mTextureDepth->getBounds() ); } gl::pushMatrices(); gl::translate( Vec2f( 320, 0 ) ); if( mSurfaceBlur ){ if( mTextureDepth ){ mTextureDepth->update( Channel32f( mSurfaceBlur ) ); } else { mTextureDepth = gl::Texture::create( Channel32f( mSurfaceBlur ) ); } gl::draw( mTextureDepth, mTextureDepth->getBounds() ); } gl::translate( Vec2f( 0, 240 ) ); if( mSurfaceSubtract ){ if( mTextureDepth ){ mTextureDepth->update( Channel32f( mSurfaceSubtract ) ); } else { mTextureDepth = gl::Texture::create( Channel32f( mSurfaceSubtract ) ); } gl::draw( mTextureDepth, mTextureDepth->getBounds() ); } gl::translate( Vec2f( -320, 0 ) ); for( ContourVector::iterator iter = mContours.begin(); iter != mContours.end(); ++iter ){ glBegin( GL_LINE_LOOP ); for( vector< cv::Point >::iterator pt = iter->begin(); pt != iter->end(); ++pt ){ gl::color( Color( 1.0f, 0.0f, 0.0f ) ); gl::vertex( fromOcv( *pt ) ); } glEnd(); } gl::translate( Vec2f( 0, 240 ) ); for( int i=0; i<mTrackedShapes.size(); i++){ glBegin( GL_POINTS ); for( int j=0; j<mTrackedShapes[i].hull.size(); j++ ){ gl::color( Color( 1.0f, 0.0f, 0.0f ) ); gl::vertex( fromOcv( mTrackedShapes[i].hull[j] ) ); } glEnd(); } gl::popMatrices(); mParams->draw(); }
void ProceduralAnimApp::draw() { // clear out the window with black gl::clear( Color( 0.4f, 0.5f, 0.6f ) ); gl::setMatrices( mMayaCam.getCamera() ); gl::enableDepthRead(); gl::Light light( gl::Light::DIRECTIONAL, 0 ); light.setAmbient( Color::white() ); light.setDiffuse( Color::white() ); light.setSpecular( Color::white() ); light.lookAt( mLightPos, Vec3f::zero() ); light.update( mMayaCam.getCamera() ); light.enable(); gl::pushMatrices(); float e = (float)getElapsedSeconds(); gl::translate( math<float>::sin( 1.3f * e ), math<float>::cos( 2.7f * e ) ); gl::enable( GL_LIGHTING ); gl::enable( GL_NORMALIZE ); if ( mEnableWireframe ) gl::enableWireframe(); if( mDrawMesh ) { SkinningRenderer::draw( mSkinnedVboBird ); } if ( mEnableWireframe ) gl::disableWireframe(); gl::disable( GL_LIGHTING ); gl::disable( GL_NORMALIZE ); if( mDrawSkeleton) { SkinningRenderer::draw( mSkinnedVboBird->getSkeleton() ); } if( mDrawLabels ) { SkinningRenderer::drawLabels( mSkinnedVboBird->getSkeleton(), mMayaCam.getCamera() ); } gl::popMatrices(); Vec3f mRight, mUp; mMayaCam.getCamera().getBillboardVectors(&mRight, &mUp); // gl::disableDepthRead(); gl::disableDepthWrite(); gl::enableAlphaBlending(); dustParticleTex->enableAndBind(); mDustShader->bind(); mDustShader->uniform( "tex", 0 ); for( const auto& d : mDust ) { mDustShader->uniform( "transparency", 1.0f - math<float>::abs(d.z)/(SCENE_SIZE/2.0f) ); gl::drawBillboard(d, Vec2f(60.f, 60.f), 0.0f, mRight, mUp); } mDustShader->unbind(); dustParticleTex->unbind(); mParams.draw(); }
void Emitter::render() { emitterImg->bind(); renderImage( loc, 150, myColor, 1.0 ); }