void CMultiTexturingTerrainSceneNode::render() {
            //CTerrainSceneNode::render();
            //I learned this meshbuffer trick from Viz_Fuerte's "Simple but useful projects"
            if (!SceneManager->isCulled(this)) {
                /*setVisible(true);
                OnRegisterSceneNode();
                updateAbsolutePosition();
                setVisible(false);*/
                //Reset the transformation
                SceneManager->getVideoDriver()->setTransform(video::ETS_WORLD, core::IdentityMatrix);

                for (u32 i = 0; i < array_Passes.size(); i++) {
                    setMaterialTexture(0, array_Passes[i]->splat_texture);
                    setMaterialTexture(1, array_Passes[i]->red_texture);
                    setMaterialTexture(2, array_Passes[i]->green_texture);
                    setMaterialTexture(3, array_Passes[i]->blue_texture);

                    video::SMaterial material = getMaterial(0);
                    material.MaterialType = (video::E_MATERIAL_TYPE)shaderMaterial;
                    material.MaterialTypeParam = video::pack_textureBlendFunc(video::EBF_DST_COLOR, video::EBF_ONE);

                    SceneManager->getVideoDriver()->setMaterial(material);
                    SceneManager->getVideoDriver()->drawMeshBuffer(getRenderBuffer());
                }
            }
        }
void DeferredLightingEffect::copyBack(RenderArgs* args) {
    auto textureCache = DependencyManager::get<TextureCache>();
    QSize framebufferSize = textureCache->getFrameBufferSize();

    auto freeFBO = DependencyManager::get<GlowEffect>()->getFreeFramebuffer();

    //freeFBO->release();
    glBindFramebuffer(GL_FRAMEBUFFER, 0);

    
    glDisable(GL_CULL_FACE);
    
    // now transfer the lit region to the primary fbo
    glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_CONSTANT_ALPHA, GL_ONE);
    glColorMask(true, true, true, false);
    
    auto primaryFBO = gpu::GLBackend::getFramebufferID(textureCache->getPrimaryFramebuffer());
    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, primaryFBO);

    //primaryFBO->bind();
    
    glBindTexture(GL_TEXTURE_2D, gpu::GLBackend::getTextureID(freeFBO->getRenderBuffer(0)));
    glEnable(GL_TEXTURE_2D);
    
    glPushMatrix();
    glLoadIdentity();
    
    glMatrixMode(GL_PROJECTION);
    glPushMatrix();
    glLoadIdentity();
    
    int viewport[4];
    glGetIntegerv(GL_VIEWPORT, viewport);
    const int VIEWPORT_X_INDEX = 0;
    const int VIEWPORT_Y_INDEX = 1;
    const int VIEWPORT_WIDTH_INDEX = 2;
    const int VIEWPORT_HEIGHT_INDEX = 3;

    float sMin = viewport[VIEWPORT_X_INDEX] / (float)framebufferSize.width();
    float sWidth = viewport[VIEWPORT_WIDTH_INDEX] / (float)framebufferSize.width();
    float tMin = viewport[VIEWPORT_Y_INDEX] / (float)framebufferSize.height();
    float tHeight = viewport[VIEWPORT_HEIGHT_INDEX] / (float)framebufferSize.height();

    renderFullscreenQuad(sMin, sMin + sWidth, tMin, tMin + tHeight);
    
    glBindTexture(GL_TEXTURE_2D, 0);
    glDisable(GL_TEXTURE_2D);
    
    glColorMask(true, true, true, true);
    glEnable(GL_LIGHTING);
    glEnable(GL_COLOR_MATERIAL);
    glEnable(GL_DEPTH_TEST);
    glDepthMask(true);
    
    glPopMatrix();
    
    glMatrixMode(GL_MODELVIEW);
    glPopMatrix();
}
Example #3
0
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
JNIEXPORT void JNICALL Java_com_qualcomm_fastcvdemo_apis_imageTransformation_Affine_update
(
   JNIEnv*     env,
   jobject     obj,
   jbyteArray  img,
   jint        w,
   jint        h
)
{
	jbyte*            jimgData = NULL;
	jboolean          isCopy = 0;
	uint32_t*         curCornerPtr = 0;
	uint8_t*          renderBuffer;
	uint64_t          time;
	float             timeMs;

	// Allocate the buffer once here if it's not allocated already
	if( affineState.affineImgBuf == NULL)
	{
		int frameSize = w*h*3/2;
		affineState.affineImgBuf = (uint8_t *)fcvMemAlloc(frameSize, 16);
		if( affineState.affineImgBuf == NULL )
		{
			EPRINTF("Allocate affineImgBuf failed");
		}
		else
		{
			memset(affineState.affineImgBuf, 0, w*h);
			memset(affineState.affineImgBuf+(w*h), 128, w*h/2);
		}
	}

	// Get data from JNI
	jimgData = env->GetByteArrayElements( img, &isCopy );

	renderBuffer = getRenderBuffer( w, h );

	lockRenderBuffer();

	time = util.getTimeMicroSeconds();

	uint8_t* pJimgData    = (uint8_t*)jimgData;

	// Check if camera image data is not aligned.
	if( (int)jimgData & 0xF )
	{
		// Allow for rescale if dimensions changed.
		if( w != (int)affineState.alignedImgWidth ||
			h != (int)affineState.alignedImgHeight )
		{
			if( affineState.alignedImgBuf != NULL )
			{
				DPRINTF( "%s %d Creating aligned for preview\n",
						__FILE__, __LINE__ );
				fcvMemFree( affineState.alignedImgBuf );
				affineState.alignedImgBuf = NULL;
			}
		}

		// Allocate buffer for aligned data if necessary.
		if( affineState.alignedImgBuf == NULL )
		{
			affineState.alignedImgWidth = w;
			affineState.alignedImgHeight = h;
			affineState.alignedImgBuf = (uint8_t*)fcvMemAlloc( w*h*3/2, 16 );
		}

		memcpy( affineState.alignedImgBuf, jimgData, w*h*3/2 );
		pJimgData = affineState.alignedImgBuf;
	}

	// Perform FastCV Function processing
	switch( affineState.affineType )
	{
		case AFFINE_U8:
			updateAffine( (uint8_t*)pJimgData, w, h, affineState.affineImgBuf);
			colorConvertYUV420ToRGB565Renderer(affineState.affineImgBuf, w, h, (uint32_t*)renderBuffer );
			break;

		case AFFINE_8X8:
			updateAffine( (uint8_t*)pJimgData, w, h, affineState.affineImgBuf);
			colorConvertYUV420ToRGB565Renderer(affineState.affineImgBuf, w, h, (uint32_t*)renderBuffer );
			break;

		case NO_AFFINE:
		default:
			colorConvertYUV420ToRGB565Renderer(pJimgData, w, h, (uint32_t*)renderBuffer );
			break;
	}

	// Update image
	timeMs = ( util.getTimeMicroSeconds() - time ) / 1000.f;
	util.setProcessTime((util.getProcessTime()*(29.f/30.f))+(float)(timeMs/30.f));

	unlockRenderBuffer();

	// Let JNI know we don't need data anymore
	env->ReleaseByteArrayElements( img, jimgData, JNI_ABORT );
}
Example #4
0
        // We need this with the mips levels  
        batch.generateTextureMips(_framebuffer->getLinearDepthTexture());
        
        // Occlusion pass
        batch.setFramebuffer(occlusionFBO);
        batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(1.0f));
        batch.setPipeline(occlusionPipeline);
        batch.setResourceTexture(AmbientOcclusionEffect_LinearDepthMapSlot, _framebuffer->getLinearDepthTexture());
        batch.draw(gpu::TRIANGLE_STRIP, 4);

        
        if (_parametersBuffer->getBlurRadius() > 0) {
            // Blur 1st pass
            batch.setFramebuffer(occlusionBlurredFBO);
            batch.setPipeline(firstHBlurPipeline);
            batch.setResourceTexture(AmbientOcclusionEffect_OcclusionMapSlot, occlusionFBO->getRenderBuffer(0));
            batch.draw(gpu::TRIANGLE_STRIP, 4);

            // Blur 2nd pass
            batch.setFramebuffer(occlusionFBO);
            batch.setPipeline(lastVBlurPipeline);
            batch.setResourceTexture(AmbientOcclusionEffect_OcclusionMapSlot, occlusionBlurredFBO->getRenderBuffer(0));
            batch.draw(gpu::TRIANGLE_STRIP, 4);
        }
        
        
        batch.setResourceTexture(AmbientOcclusionEffect_LinearDepthMapSlot, nullptr);
        batch.setResourceTexture(AmbientOcclusionEffect_OcclusionMapSlot, nullptr);
        
        _gpuTimer.end(batch);
    });
Example #5
0
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
JNIEXPORT void JNICALL Java_com_qualcomm_fastcvdemo_apis_imageProcessing_Filter_update
(
   JNIEnv*     env,
   jobject     obj,
   jbyteArray  img,
   jint        w,
   jint        h
)
{
   jbyte*            jimgData = NULL;
   jboolean          isCopy = 0;
   uint32_t*         curCornerPtr = 0;
   uint8_t*          renderBuffer;
   uint64_t          time;
   float             timeMs;

   // Allocate the buffer once here if it's not allocated already
   if( filterState.filteredImgBuf == NULL)
   {
      int frameSize = w*h*3/2;
      filterState.filteredImgBuf = (uint8_t *)fcvMemAlloc(frameSize, 16);
      if( filterState.filteredImgBuf == NULL )
      {
         EPRINTF("Allocate filteredImgBuf failed");
      }
      else
      {
         memset(filterState.filteredImgBuf, 128, frameSize);
      }
   }

   // Get data from JNI
   jimgData = env->GetByteArrayElements( img, &isCopy );

   renderBuffer = getRenderBuffer( w, h );

   lockRenderBuffer();

   time = util.getTimeMicroSeconds();

   // jimgData might not be 128 bit aligned.
   // fcvColorYUV420toRGB565u8() and other fcv functionality inside
   // require 128 bit memory aligned. In case of jimgData
   // is not 128 bit aligned, it will allocate memory that is 128 bit
   // aligned and copies jimgData to the aligned memory.

   uint8_t* pJimgData    = (uint8_t*)jimgData;
   uint8_t* pFilteringData = (uint8_t*)filterState.filteredImgBuf;

   // Check if camera image data is not aligned.
   if( (int)jimgData & 0xF )
   {
      // Allow for rescale if dimensions changed.
      if( w != (int)filterState.imgWidth ||
          h != (int)filterState.imgHeight )
      {
         if( filterState.alignedImgBuf != NULL )
         {
            DPRINTF( "%s %d Creating aligned for preview\n",
               __FILE__, __LINE__ );
            fcvMemFree( filterState.alignedImgBuf );
            filterState.alignedImgBuf = NULL;
         }
      }

      // Allocate buffer for aligned data if necessary.
      if( filterState.alignedImgBuf == NULL )
      {
    	  filterState.imgWidth = w;
    	  filterState.imgHeight = h;
    	  filterState.alignedImgBuf = (uint8_t*)fcvMemAlloc( w*h*3/2, 16 );
      }

      memcpy( filterState.alignedImgBuf, jimgData, w*h*3/2 );
      pJimgData = filterState.alignedImgBuf;
	}

   else if( w != (int)filterState.imgWidth ||
				h != (int)filterState.imgHeight )
   {
	   filterState.imgWidth = w;
	   filterState.imgHeight = h;
   }

   // Perform FastCV Function processing
   updateFilter( (uint8_t*)pJimgData, w, h, (uint8_t*)pFilteringData );

   // Copy the image first in our own buffer to avoid corruption during
   // rendering. Not that we can still have corruption in image while we do
   // copy but we can't help that.

   colorConvertYUV420ToRGB565Renderer(pFilteringData,
                            w,
                            h,
                            (uint32_t*)renderBuffer );

   // Update image
   timeMs = ( util.getTimeMicroSeconds() - time ) / 1000.f;
   util.setProcessTime((util.getProcessTime()*(29.f/30.f))+(float)(timeMs/30.f));

   if( filterState.filterType == ENABLE_CANNY )
   {
	   drawEdges(filterState.edgeImgBuf, filterState.edgeImgHeight, filterState.edgeImgWidth);
   }

   unlockRenderBuffer();

   // Let JNI know we don't need data anymore
   env->ReleaseByteArrayElements( img, jimgData, JNI_ABORT );
}
Example #6
0
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
JNIEXPORT void JNICALL
	Java_com_qualcomm_fastcvdemo_apis_imageProcessing_ImgDiff_update
(
   JNIEnv*     env,
   jobject     obj,
   jbyteArray  img,
   jint        w,
   jint        h
)
{
   jbyte*            jimgData = NULL;
   jboolean          isCopy = 0;
   uint32_t*         curCornerPtr = 0;
   uint8_t*          renderBuffer;
   uint64_t          time;
   float             timeMs;


   // Get data from JNI
   jimgData = env->GetByteArrayElements( img, &isCopy );

   DPRINTF("ImgDiff: update");

   renderBuffer = getRenderBuffer( w, h );

   lockRenderBuffer();

   time = util.getTimeMicroSeconds();

   // Allocate the buffer once here if it's not allocated already
   if( imgDiffState.referenceImgBuf == NULL)
   {
      imgDiffState.referenceImgBuf = (uint8_t *)fcvMemAlloc(w*h*3/2, 16);
      if( imgDiffState.referenceImgBuf == NULL )
      {
         EPRINTF("Allocate referenceImgBuf failed");
      }
   }

   // Allocate the buffer once here if it's not allocated already
   if( imgDiffState.diffImgBuf == NULL)
   {
	  int frameSize = w*h*3/2;
      imgDiffState.diffImgBuf = (uint8_t *)fcvMemAlloc(frameSize, 16);
      if( imgDiffState.diffImgBuf == NULL )
      {
         EPRINTF("Allocate diffImgBuf failed");
      }
      else
      {
    	  memset(imgDiffState.diffImgBuf, 0, w*h);
    	  memset(imgDiffState.diffImgBuf+(w*h), 128, w*h/2);
      }
   }

   uint8_t* pJimgData    = (uint8_t*)jimgData;
   uint8_t* pDiffData    = (uint8_t*)imgDiffState.diffImgBuf;

   // jimgData might not be 128 bit aligned.
   // fcvColorYUV420toRGB565u8() and other fcv functionality inside
   // require 128 bit memory aligned. In case of jimgData 
   // is not 128 bit aligned, it will allocate memory that is 128 bit
   // aligned and copies jimgData to the aligned memory.

   // Check if camera image data is not aligned.
   if( (int)jimgData & 0xF )
   {
      // Allow for rescale if dimensions changed.
      if( w != (int)imgDiffState.alignedImgWidth ||
          h != (int)imgDiffState.alignedImgHeight )
      {
         if( imgDiffState.alignedImgBuf != NULL )
         {
            DPRINTF( "%s %d Creating aligned for preview\n",
               __FILE__, __LINE__ );
            fcvMemFree( imgDiffState.alignedImgBuf );
            imgDiffState.alignedImgBuf = NULL;
         }
      }

      // Allocate buffer for aligned data if necessary.
      if( imgDiffState.alignedImgBuf == NULL )
      {
         imgDiffState.alignedImgWidth = w;
         imgDiffState.alignedImgHeight = h;
         imgDiffState.alignedImgBuf = (uint8_t*)fcvMemAlloc( w*h*3/2, 16 );
      }

      memcpy( imgDiffState.alignedImgBuf, jimgData, w*h*3/2 );
      pJimgData = imgDiffState.alignedImgBuf;
   }

   uint8_t* pPreviewData = pJimgData;

   //Handles reference frame state
   switch( imgDiffState.diffState )
   {
      case NO_REF_FRAME:
         break;

      case TAKE_REF_FRAME:
         DPRINTF("In take frame");
         memcpy(imgDiffState.referenceImgBuf, pPreviewData, w*h);
         imgDiffState.diffState = HAS_REF_FRAME;
         DPRINTF("ImgDiff: taken frame");
         break;

      case HAS_REF_FRAME:
         updateDiff( imgDiffState.referenceImgBuf, pJimgData, w, h, pDiffData );
         pPreviewData = pDiffData;
         break;

      case NEED_RESET:
         imgDiffState.diffState = NO_REF_FRAME;
         break;

      default:
         break; 
   }

   colorConvertYUV420ToRGB565Renderer(pPreviewData,
							w,
							h,
							(uint32_t*)renderBuffer );

   // Update image
   timeMs = ( util.getTimeMicroSeconds() - time ) / 1000.f;
   util.setProcessTime((util.getProcessTime()*(29.f/30.f))+(float)(timeMs/30.f));

   unlockRenderBuffer();

   // Let JNI know we don't need data anymore
   env->ReleaseByteArrayElements( img, jimgData, JNI_ABORT );
}