void rayMarcherApp::draw() { glClearColor( 0, 0, 0, 0 ); glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT ); glEnable( GL_DEPTH_TEST ); glEnable( GL_LIGHTING ); glDepthMask( GL_TRUE ); glDisable( GL_TEXTURE_2D ); gl::setMatrices( mMayaCam.getCamera() ); mMarcher.renderSceneGL(); gl::setMatricesWindow( getWindowSize() ); // draw as much of the texture as we've rendered glDisable( GL_LIGHTING ); glDepthMask( GL_TRUE ); glDisable( GL_DEPTH_TEST ); glColor3f( 1, 1, 1 ); mImageTexture.enableAndBind(); glBegin( GL_QUADS ); glTexCoord2f( mImageTexture.getLeft(), mImageTexture.getTop() ); glVertex2f( 0, 0 ); glTexCoord2f( mImageTexture.getLeft(), mImageTexture.getBottom() * mCurrentLine / mImageTexture.getHeight() ); glVertex2f( 0, mCurrentLine ); glTexCoord2f( mImageTexture.getRight(), mImageTexture.getBottom() * mCurrentLine / mImageTexture.getHeight() ); glVertex2f( mImageTexture.getWidth(), mCurrentLine ); glTexCoord2f( mImageTexture.getRight(), mImageTexture.getTop() ); glVertex2f( mImageTexture.getWidth(), 0 ); glEnd(); }
void ImageRetargetingApp::drawSeamCarvingWindow() { gl::clear( Color( 0.f, 0.f, 0.f ) ); switch(seamCarvingState) { case SeamCarvingState::ShowImage: if( seamCarvedTexture ) { gl::draw(seamCarvedTexture); } break; case SeamCarvingState::ShowGradient: if( gradientTexture ) { gl::draw(gradientTexture); } break; case SeamCarvingState::SeamCarving: if(seamCarvedTexture){ int dw = seamCarver->newWidth - seamCarvedImage.getWidth(); int dh = seamCarver->newHeight - seamCarvedImage.getHeight(); if (dw<0 && dh<0){ seamCarvingWindow->setSize(seamCarvedTexture.getWidth()-1, seamCarvedTexture.getHeight()); seamCarvedImage = seamCarver->deleteVerticalSeam(seamCarvedImage); } else if (dw<0){ seamCarvingWindow->setSize(seamCarvedTexture.getWidth()-1, seamCarvedTexture.getHeight()); seamCarvedImage = seamCarver->deleteVerticalSeam(seamCarvedImage); } else if (dh<0){ seamCarvingWindow->setSize(seamCarvedTexture.getWidth(), seamCarvedTexture.getHeight()-1); seamCarvedImage = seamCarver->deleteHorizontalSeam(seamCarvedImage); } //TODO:: Add Seam else if (dw>=0 && dh>=0) { seamCarvingState = SeamCarvingState::ShowImage; seamCarver->stopCarveTimer(); } seamCarvedTexture = gl::Texture(seamCarvedImage); gl::draw(seamCarvedTexture); } break; case SeamCarvingState::Undefined: break; } seamCarvingParams->draw(); }
void PhotoBoothApp::touchesBegan( TouchEvent event ){ TouchEvent::Touch touch = event.getTouches().front(); Vec2f cameraButtonTargetPos = Vec2f(mCameraButtonPos.value()); float touchX = touch.getX() / DISPLAY_SCALE; float touchY = touch.getY() / DISPLAY_SCALE; switch(mCurrentState) { case STATE_PREVIEW: // see if the camera icon has been tapped (touch coordinates are reversed for landscape mode) cameraButtonTargetPos.x += mCameraButtonTexture.getWidth() / 2.0f; cameraButtonTargetPos.y += mCameraButtonTexture.getHeight() / 2.0f; if( cameraButtonTargetPos.distance( Vec2f(touchX, touchY) ) < (mCameraButtonTexture.getWidth() * 2) ) { mCountDownStartTime = getElapsedSeconds(); mCurrentState = STATE_COUNT_DOWN; } break; case STATE_COUNT_DOWN: // stub.. break; case STATE_ACCEPT: if(touchY > 1280) { // only look for touches near the bottom of the screen. // just split the screen in half, no need to do precise hit detection for save/cancel buttons.. if(touchX > width / 2){ ip::flipVertical( &mCameraSurface ); cinder::cocoa::SafeUiImage img = cocoa::createUiImage( mCameraSurface ); // Call into objective C to do upload via cocoa FileSender::sendFile(img); timeline().apply( &mPreviewTexturePos, Vec2f(0, -height ), 1.0f, EaseInCubic() ); }else{ timeline().apply( &mPreviewTexturePos, Vec2f(0, height ), 1.0f, EaseInBack() ); } mCurrentState = STATE_PREVIEW; timeline().apply( &mDarkBgAlpha, 0.0f, 1.0f, EaseInCubic() ); // Hide buttons timeline().apply( &mDiscardPos, Vec2f(100, height + 100 ), 1.0f, EaseInCubic() ); timeline().apply( &mSavePos, Vec2f(width-700, height + 100 ), 1.0f, EaseInCubic() ); } break; } }
void AudioObjApp::draw() { gl::clear( Color( 0, 0, 0 ) ); gl::enableAlphaBlending(); gl::enableDepthRead(); gl::enableDepthWrite(); gl::pushMatrices(); gl::setMatrices( mMayaCam.getCamera() ); if ( mFeature && mFeatureTex ) { mShader->bind(); mFeatureTex.enableAndBind(); mShader->uniform( "dataTex", 0 ); mShader->uniform( "texWidth", (float)mFeatureTex.getWidth() ); mShader->uniform( "texHeight", (float)mFeatureTex.getHeight() ); mShader->uniform( "soundDataSize", (float)mFeature->getSize() ); mShader->uniform( "spread", mFeatureSpread ); mShader->uniform( "spreadOffset", mFeatureSpreadOffset ); mShader->uniform( "time", (float)getElapsedSeconds() ); mShader->uniform( "tintColor", mObjColor ); } if ( mRenderWireframe ) gl::enableWireframe(); gl::color( Color(1.0f, 0.0f, 0.0f ) ); if ( mVbo ) gl::draw( mVbo ); if ( mRenderWireframe ) gl::disableWireframe(); mShader->unbind(); mFeatureTex.unbind(); gl::color( Color::white() ); // gl::drawCoordinateFrame(); gl::popMatrices(); gl::disableDepthRead(); gl::disableDepthWrite(); gl::setMatricesWindow( getWindowSize() ); ciXtractReceiver::drawData( mFeature, Rectf( 15, getWindowHeight() - 150, 255, getWindowHeight() - 35 ) ); gl::draw( mFeatureSurf ); mParams->draw(); }
void TextTestApp::draw() { // this pair of lines is the standard way to clear the screen in OpenGL glClearColor( 0.1f, 0.1f, 0.1f, 1.0f ); glClear( GL_COLOR_BUFFER_BIT ); gl::setMatricesWindow( getWindowSize() ); gl::enableAlphaBlending( PREMULT ); gl::color( Color::white() ); gl::draw( mTexture, Vec2f( 10, 10 ) ); gl::draw( mSimpleTexture, Vec2f( 10, getWindowHeight() - mSimpleTexture.getHeight() - 5 ) ); }
void WaterSimApp::drawInfo() { const int LOGO_OFFSET_X = 15, LOGO_OFFSET_Y = 20; glEnable( GL_BLEND ); glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA ); gl::setMatricesWindow( getWindowSize() ); glEnable( GL_TEXTURE_2D ); mInfo.bind(); glColor4f( 1.0f, 1.0f, 1.0f, 1.0f ); Vec2f center( getWindowWidth() - mInfo.getWidth() / 2.0f - LOGO_OFFSET_X, mInfo.getHeight() / 2.0f + LOGO_OFFSET_Y ); float halfWidth = mInfo.getWidth() / 2.0f, halfHeight = mInfo.getHeight() / 2.0f; glTranslatef( center.x, center.y, 0 ); glBegin( GL_QUADS ); glTexCoord2f( 0, 0 ); glVertex2f( -halfWidth, -halfHeight ); glTexCoord2f( 1.0, 0.0f ); glVertex2f( halfWidth, -halfHeight ); glTexCoord2f( 1.0f, 1.0f ); glVertex2f( halfWidth, halfHeight ); glTexCoord2f( 0.0f, 1.0f ); glVertex2f( -halfWidth, halfHeight ); glEnd(); glDisable( GL_TEXTURE_2D ); }
void QuickTimeSampleApp::draw() { gl::clear( Color( 0, 0, 0 ) ); gl::enableAlphaBlending(); if( mFrameTexture ) { Rectf centeredRect = Rectf( mFrameTexture.getBounds() ).getCenteredFit( getWindowBounds(), true ); gl::draw( mFrameTexture, centeredRect ); } if( mInfoTexture ) { glDisable( GL_TEXTURE_RECTANGLE_ARB ); gl::draw( mInfoTexture, Vec2f( 20, getWindowHeight() - 20 - mInfoTexture.getHeight() ) ); } }
void PhotoBoothApp::setup() { // Start cameara. try{ vector<Capture::DeviceRef> devices( Capture::getDevices() ); // Look for a camera called "Front Camera" for( vector<Capture::DeviceRef>::const_iterator deviceIt = devices.begin(); deviceIt != devices.end(); ++deviceIt ) { Capture::DeviceRef device = *deviceIt; if(device->getName() == "Front Camera"){ mCapture = Capture( CAM_WIDTH, CAM_HEIGHT, device ); mCapture.start(); } } } catch( ... ) { console() << "Failed to initialize camera" << std::endl; } // Load textures mConfirmMessage = loadImage( loadResource("assets/confirm_message.png")); mSaveTexture = loadImage( loadResource("assets/save.png")); mDiscardTexture = loadImage( loadResource("assets/discard.png")); mCameraButtonTexture = loadImage( loadResource("assets/camera.png")); mIntroTexture = loadImage( loadResource("assets/attract.png" )); mLightBg = loadImage( loadResource("assets/bkg_light.png" )); mDarkBg = loadImage( loadResource("assets/bkg_dark.png" )); mNumberBg = loadImage( loadResource("assets/countdown_bkg.png") ); mNumberProgress = loadImage( loadResource("assets/countdown_progress.png") ); mNumberTextures.push_back( loadImage( loadResource("assets/countdown_5.png"))); mNumberTextures.push_back( loadImage( loadResource("assets/countdown_4.png"))); mNumberTextures.push_back( loadImage( loadResource("assets/countdown_3.png"))); mNumberTextures.push_back( loadImage( loadResource("assets/countdown_2.png"))); mNumberTextures.push_back( loadImage( loadResource("assets/countdown_1.png"))); width = getWindowWidth() / DISPLAY_SCALE; height = getWindowHeight() / DISPLAY_SCALE; mCurrentState = STATE_PREVIEW; mDiscardPos = Vec2f(100, height + 100 ); mSavePos = Vec2f(width - 700, height + 100); mCameraButtonPos = Vec2f(width/2 - mCameraButtonTexture.getWidth() / 2, 650 - mCameraButtonTexture.getHeight() / 2); }
void MovieBasicApp::draw() { // clear out the window with black gl::clear( ColorA::black() ); gl::enableAlphaBlending(); if( mFrameTexture ) { Rectf centeredRect = Rectf( mFrameTexture.getBounds() ).getCenteredFit( getWindowBounds(), true ); gl::draw( mFrameTexture, centeredRect ); } if( mInfoTexture ) { // error // glDisable( GL_TEXTURE_RECTANGLE_ARB ); gl::draw( mInfoTexture, Vec2f( 20, getWindowHeight() - 20 - mInfoTexture.getHeight() ) ); } }
void QuickTimePlayer::draw() { gl::clear( Color( 0, 0, 0 ) ); gl::enableAlphaBlending(); if( mFrameTexture ) { Rectf centeredRect = Rectf( mFrameTexture.getBounds() ).getCenteredFit( getWindowBounds(), true ); gl::draw( mFrameTexture, centeredRect ); if (bInitialized) { spoutsender.SendTexture(mFrameTexture.getId(), mFrameTexture.getTarget(), g_Width, g_Height, false); } } if( mInfoTexture ) { glDisable( GL_TEXTURE_RECTANGLE_ARB ); gl::draw( mInfoTexture, Vec2f( 20, getWindowHeight() - 20 - mInfoTexture.getHeight() ) ); } }
void WaterSimApp::drawArrow() { glEnable( GL_BLEND ); glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA ); gl::setMatricesWindow( getWindowSize() ); glEnable( GL_TEXTURE_2D ); mArrow.bind(); glColor4f( 0.5f, 0.0f, 0.0f, 1.0f ); Vec2f center( getWindowWidth() / 2.0f, getWindowHeight() / 2.0f ); float halfWidth = mArrow.getWidth() / 2.0f, halfHeight = mArrow.getHeight() / 2.0f; glTranslatef( center.x, center.y, 0 ); glRotatef( toDegrees( math<float>::atan2( -mGravityVector.y, mGravityVector.x ) ) + 90.0f, 0, 0, 1 ); glBegin( GL_QUADS ); glTexCoord2f( 0, 0 ); glVertex2f( -halfWidth, -halfHeight ); glTexCoord2f( 1.0, 0.0f ); glVertex2f( halfWidth, -halfHeight ); glTexCoord2f( 1.0f, 1.0f ); glVertex2f( halfWidth, halfHeight ); glTexCoord2f( 0.0f, 1.0f ); glVertex2f( -halfWidth, halfHeight ); glEnd(); glDisable( GL_TEXTURE_2D ); }
void ardroneApp::draw() { renderFbo.bindFramebuffer(); gl::clear( Color( 0, 0, 0 ) ); gl::color( Color::white() ); if ( mFrameTexture ) { Rectf centeredRect = Rectf( mFrameTexture.getBounds() ).getCenteredFit( getWindowBounds(), true ); gl::draw( mFrameTexture, centeredRect ); } renderFbo.blitToScreen(renderFbo.getBounds(), getWindowBounds()); mSyphonServer.publishTexture(renderFbo.getTexture(), false); renderFbo.unbindFramebuffer(); // return rendering to the window's own frame buffer if( mInfoTexture ) { glDisable( GL_TEXTURE_RECTANGLE_ARB ); gl::draw( mInfoTexture, Vec2f( 5, getWindowHeight() - 5 - mInfoTexture.getHeight() ) ); } }
void qbConfig::setRenderTexture( gl::Texture & frame ) { if ( ! mRenderTexFbo ) { int h = (int) ( QBCFG_THUMB_SIZE.x * ( frame.getHeight() / (float) frame.getWidth() ) ); mRenderTexFbo = gl::Fbo( QBCFG_THUMB_SIZE.x, h, true, true, false ); CHECK_GL_ERROR; mRenderTexFbo.getTexture().setFlipped(true); mTextureControl->var = &(mRenderTexFbo.getTexture()); } // Draw to FBO glEnable( GL_TEXTURE_2D ); glDisable( GL_DEPTH_TEST ); glDisable( GL_LIGHTING ); gl::disableAlphaBlending(); gl::setMatricesWindow( mRenderTexFbo.getTexture().getSize() ); gl::setViewport( mRenderTexFbo.getTexture().getBounds() ); mRenderTexFbo.bindFramebuffer(); gl::color( ColorA::white() ); gl::draw( frame, mRenderTexFbo.getBounds() ); mRenderTexFbo.unbindFramebuffer(); mTextureControl->refresh(); }
void PhotoBoothApp::draw() { gl::enableAlphaBlending(); gl::clear(); gl::color(1, 1, 1, 1); glDepthMask( GL_FALSE ); // Set up the view for landscape mode. gl::setMatricesWindow(width * DISPLAY_SCALE, height * DISPLAY_SCALE); gl::scale(DISPLAY_SCALE, DISPLAY_SCALE); // draw the live camera preview if( mCameraTexture ) { // flip the texture vertically mCameraTexture.setFlipped(false); // draw the texture mirrored. gl::draw( mCameraSurface, Rectf(width, 0, 0, height) ); } // draw "idle" stuff (text and images overlayed on the live camera preview) if(mCurrentState == STATE_PREVIEW){ gl::color(1, 1, 1, 0.75f); gl::draw(mLightBg, Rectf(0, 0, width, height)); gl::color(1, 1, 1, 1); gl::draw(mIntroTexture, Rectf(0,0,width, height)); gl::draw(mCameraButtonTexture, mCameraButtonPos.value() - Vec2f(0, abs( sin(getElapsedSeconds() * 3) * 30))); //gl::draw(mCameraButtonTexture, Rectf(mCameraButtonPos.value().x,mCameraButtonPos.value().y, mCameraButtonTexture.getWidth() * 0.5 + mCameraButtonPos.value().x, mCameraButtonTexture.getHeight() * 0.5 + mCameraButtonPos.value().y) );// mCameraButtonPos.value() ); } // Draw the preview image with dark background. if(mPreviewTexture){ // draw background image and prompt text. if(mDarkBgAlpha > 0){ gl::color(1, 1, 1, mDarkBgAlpha); gl::draw(mDarkBg, Vec2f::zero()); gl::draw(mConfirmMessage, Vec2f(width/2 - mConfirmMessage.getWidth() / 2, 125 - mConfirmMessage.getHeight() / 2)); } float aspect = mPreviewTexture.getAspectRatio(); float imageHeight = height - 500; // margins are keyed off of hard-coded height, this is not very multi-resolution friendly. float imageWidth = imageHeight * aspect; float marginX = (width - imageWidth) / 2; float marginY = (height - imageHeight) / 2; gl::draw(mPreviewTexture, Rectf(marginX, marginY, width - marginX, height - marginY) + mPreviewTexturePos); // Draw semi-transparent pillar boxes to show how the sqare version of the image will look. if(mCurrentState == STATE_ACCEPT) { float pillarWidth = (imageWidth - imageHeight) / 2; gl::color(0, 0, 0, 0.35f); gl::drawSolidRect( Rectf( marginX, height - marginY, pillarWidth + marginX, marginY ) ); gl::drawSolidRect( Rectf( width - marginX - pillarWidth, height - marginY, width - marginX, marginY ) ); } } // draw the "flash" if(mCurrentState == STATE_ACCEPT){ gl::color(1, 1, 1, mCameraFlash); gl::drawSolidRect( Rectf(0, 0, width, height)); } // draw count-down timer if(mCurrentState == STATE_COUNT_DOWN){ // draw dark circle background Vec2f centerPos = Vec2f(width / 2 - mNumberBg.getWidth() / 2, height / 2 - mNumberBg.getHeight() / 2); gl::draw( mNumberBg, centerPos); // background ring that "fills up" for each second. gl::draw(mNumberProgress, Area(0, mNumberProgress.getHeight() * mCountDownFractional, mNumberProgress.getWidth(), mNumberProgress.getHeight()), Rectf(centerPos.x,centerPos.y+mNumberProgress.getHeight() * mCountDownFractional, mNumberBg.getWidth()+centerPos.x, centerPos.y+mNumberBg.getHeight())); // Draw number. gl::enableAdditiveBlending(); gl::draw( mNumberTextures[mCountDownNumber], centerPos); } gl::color(1, 1, 1, 1); gl::draw(mSaveTexture, mSavePos); gl::draw(mDiscardTexture, mDiscardPos); }
void ImageRetargetingApp::resetWindowOriginalSize(WindowRef window) { window->setSize(originalTexture.getWidth(), originalTexture.getHeight()); }
void WaterSimApp::drawLogo() { const int LOGO_OFFSET_X = 35, LOGO_OFFSET_Y = 20; glEnable( GL_BLEND ); glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA ); gl::setMatricesWindow( getWindowSize() ); glEnable( GL_TEXTURE_2D ); mLogo.bind(); if( mRenderInfo ) glColor4f( 1.0f, 1.0f, 1.0f, 1.0f ); else glColor4f( 1.0f, 1.0f, 1.0f, 0.1f ); glBegin( GL_QUADS ); glTexCoord2f( 0, 0 ); glVertex2f( getWindowWidth() - mLogo.getWidth() - LOGO_OFFSET_X, getWindowHeight() - mLogo.getHeight() - LOGO_OFFSET_Y ); glTexCoord2f( 1.0, 0.0f ); glVertex2f( getWindowWidth() - LOGO_OFFSET_X, getWindowHeight() - mLogo.getHeight() - LOGO_OFFSET_Y ); glTexCoord2f( 1.0f, 1.0f ); glVertex2f( getWindowWidth() - LOGO_OFFSET_X, getWindowHeight() - LOGO_OFFSET_Y ); glTexCoord2f( 0.0f, 1.0f ); glVertex2f( getWindowWidth() - mLogo.getWidth() - LOGO_OFFSET_X, getWindowHeight() - LOGO_OFFSET_Y ); glEnd(); glDisable( GL_TEXTURE_2D ); }
void ForelleVisualAppApp::draw() { // clear out the window with black gl::clear( Color(0, 0,0 ) ); // draw our logo gl::draw(mLogo, Vec2i(0, getWindowHeight()-mLogo.getHeight() ) ); //update our Client // stick together with draw; client.update(); //gl::color( Colorf(0.0f, 0.0f, 0.0f) ); //draw the Image from the client gl::draw( *client.getTexture(), Rectf(pos.x,pos.y,pos.x+(60*scale),pos.y+(60*scale))); if (drawGrid) { // draw Pixel grid gl::color( Colorf(1.0f, 1.0f, 1.0f) ); gl::pushMatrices(); gl::translate(pos); for(float i=0;i<=60*scale;i+=scale) { gl::drawLine( Vec2f(i, 0), Vec2f(i, 60*scale) ); gl::drawLine( Vec2f(0, i), Vec2f(60*scale, i) ); } gl::popMatrices(); } if(updateCluster) controller.updateAndDrawClusters(clusters, *client.getSurface(), pos, scale); if(readPixels) controller.getData(clusters,data1,data2,data3,data4); if(bAllOn){ for (int i=0; i < Const::MAX_DMX_CHANNELS ; i++) { data1[i]= 255; data2[i]= 255; data3[i]= 255; data4[i]= 255; } }else if(bAllOff){ for (int i=0; i < Const::MAX_DMX_CHANNELS ; i++) { data1[i]= 0; data2[i]= 0; data3[i]= 0; data4[i]= 0; } } if(selectedClusterOn && !clusters.empty() ){ for (int i=0; i < Const::MAX_DMX_CHANNELS ; i++) { data1[i]= 0; data2[i]= 0; data3[i]= 0; data4[i]= 0; int universe = *(*selectedCluster)->getUniverse(); if (universe == 0) { (*selectedCluster)->getChannelData(data1); }else if (universe == 1) { (*selectedCluster)->getChannelData(data2); }else if (universe == 2) { (*selectedCluster)->getChannelData(data3); }else if (universe == 3) { (*selectedCluster)->getChannelData(data4); } } } if(sendData){ node.sendDataAtPort(data1, 0); node.sendDataAtPort(data2, 1); node.sendDataAtPort(data3, 2); node.sendDataAtPort(data4, 3); } // Draw the interface clusterBar.draw(); menueBar.draw(); }
void FaceController::prepareFaceToSave(TriMesh2d mesh, gl::Texture surf, float headScale) { genericName = "surf_"+to_string(facesStoreVector.size()) + ".png"; Rectf rect = mesh.calcBoundingBox(); vector<ci::Vec2f> savecords; savecords.clear(); for (int i = 0; i < mesh.getNumVertices(); i++) savecords.push_back(Vec2f( ( mesh.getVertices()[i].x-rect.x1 )/surf.getWidth(), (mesh.getVertices()[i].y -rect.y1)/surf.getHeight())); FaceObject newface; newface.setPoints(savecords); newface.setTexName(genericName); newface.setTexture(surf); facesStoreVector.push_back(newface); writeImage( getAppPath() /FACE_STORAGE_FOLDER/genericName, surf); }
void ARTestApp::update() { ARMarkerInfo *marker_info; // Pointer to array holding the details of detected markers. int marker_num; // Count of number of markers detected. int j, k; // Grab a video frame. #if defined( USE_AR_VIDEO ) ARUint8 *image; if ((image = arVideoGetImage()) != NULL) { #else if( mCapture->checkNewFrame() ) { #endif #if defined( USE_AR_VIDEO ) gARTImage = image; // Save the fetched image. mTexture->enableAndBind(); #else const fli::Surface8u &surface( mCapture->getSurface() ); mTexture->update( surface ); gARTImage = const_cast<uint8_t*>( surface.getData() ); #endif gCallCountMarkerDetect++; // Increment ARToolKit FPS counter. // Detect the markers in the video frame. if (arDetectMarker(gARTImage, gARTThreshhold, &marker_info, &marker_num) < 0) { exit(-1); } // check for known patterns for( int i = 0; i < objectnum; i++ ) { k = -1; for( j = 0; j < marker_num; j++ ) { if( object[i].id == marker_info[j].id) { /* you've found a pattern */ if( k == -1 ) k = j; else /* make sure you have the best pattern (highest confidence factor) */ if( marker_info[k].cf < marker_info[j].cf ) k = j; } } if( k == -1 ) { object[i].visible = 0; continue; } /* calculate the transform for each marker */ if( object[i].visible == 0 ) { arGetTransMat(&marker_info[k], object[i].marker_center, object[i].marker_width, object[i].trans); } else { arGetTransMatCont(&marker_info[k], object[i].trans, object[i].marker_center, object[i].marker_width, object[i].trans); } object[i].visible = 1; } } if( mLockedMode >= 0 ) { for( int i = 0; i < objectnum; i++ ) { object[i].visible = 0; } object[mLockedMode].visible = 1; } for( int mod = 0; mod < objectnum; ++mod ) mModules[mod]->update( this, object[mod].visible ); } void ARTestApp::draw() { GLdouble p[16]; GLdouble m[16]; // Select correct buffer for this context. glClearColor( 0, 0, 0, 1 ); // Clear the buffers for new frame. gl::enableDepthWrite(); glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT ); // Clear the buffers for new frame. gl::disableDepthRead(); gl::disableDepthWrite(); gl::enableAlphaBlending(); if( object[0].visible || object[1].visible || object[2].visible ) mCurrentAlpha += ( 0.0f - mCurrentAlpha ) * 0.05f; else mCurrentAlpha += ( 1.0f - mCurrentAlpha ) * 0.05f; gl::setMatricesScreenOrtho( getWindowWidth(), getWindowHeight() ); // draw the camera image centered glColor4f( 1, 1, 1, 1 );//0.2f + mCurrentAlpha * 0.8f ); float width = ( getWindowHeight() * ( mTexture->getWidth() / (float)mTexture->getHeight() ) ); mTexture->draw( ( getWindowWidth() - width ) / 2.0f, 0, width, getWindowHeight() ); glDisable( mTexture->getTarget() ); #if defined( USE_AR_VIDEO ) arVideoCapNext(); gARTImage = NULL; // Image data is no longer valid after calling arVideoCapNext(). #endif // Projection transformation. arglCameraFrustumRH( &gARTCparam, VIEW_DISTANCE_MIN, VIEW_DISTANCE_MAX, p ); glMatrixMode( GL_PROJECTION ); glLoadMatrixd( p ); // Calculate the camera position relative to the marker. // Replace VIEW_SCALEFACTOR with 1.0 to make one drawing unit equal to 1.0 ARToolKit units (usually millimeters). for( int mod = 0; mod < objectnum; ++mod ) { if( object[mod].visible ) { arglCameraViewRH( object[mod].trans, m, VIEW_SCALEFACTOR ); glMatrixMode(GL_MODELVIEW); glLoadMatrixd( m ); fli::Matrix44d mvd( m ); mModules[mod]->draw( this, mvd * Vec4d( 0, 0, 0, 1 ) ); } } }