void CVCaptureAndroid::setFrame(const void* buffer, int bufferSize)
{
    int width = activity->getFrameWidth();
    int height = activity->getFrameHeight();
    int expectedSize = width*height*3/2;

    if(expectedSize != bufferSize) //Something wrong with the raw camera buffer, maybe we didn't get YUV420sp
        return;

    if(width != this->width || height != this->height){
        this->width = width;
        this->height = height;
        reallocateBuffers();
    }

    memcpy(frameYUV420next.ptr(), buffer, bufferSize);

    dataState = HAS_NEW_FRAME_UNGRABBED;
    waitingNextFrame = false;
}
//! render
void CParticleSystemSceneNode::render()
{
	video::IVideoDriver* driver = SceneManager->getVideoDriver();
	ICameraSceneNode* camera = SceneManager->getActiveCamera();

	if (!camera || !driver)
		return;


#if 0
	// calculate vectors for letting particles look to camera
	core::vector3df view(camera->getTarget() - camera->getAbsolutePosition());
	view.normalize();

	view *= -1.0f;

#else

	const core::matrix4 &m = camera->getViewFrustum()->getTransform( video::ETS_VIEW );

	const core::vector3df view ( -m[2], -m[6] , -m[10] );

#endif

	// reallocate arrays, if they are too small
	reallocateBuffers();

	// create particle vertex data
	s32 idx = 0;
	for (u32 i=0; i<Particles.size(); ++i)
	{
		const SParticle& particle = Particles[i];

		#if 0
			core::vector3df horizontal = camera->getUpVector().crossProduct(view);
			horizontal.normalize();
			horizontal *= 0.5f * particle.size.Width;

			core::vector3df vertical = horizontal.crossProduct(view);
			vertical.normalize();
			vertical *= 0.5f * particle.size.Height;

		#else
			f32 f;

			f = 0.5f * particle.size.Width;
			const core::vector3df horizontal ( m[0] * f, m[4] * f, m[8] * f );

			f = -0.5f * particle.size.Height;
			const core::vector3df vertical ( m[1] * f, m[5] * f, m[9] * f );
		#endif

		Buffer->Vertices[0+idx].Pos = particle.pos + horizontal + vertical;
		Buffer->Vertices[0+idx].Color = particle.color;
		Buffer->Vertices[0+idx].Normal = view;

		Buffer->Vertices[1+idx].Pos = particle.pos + horizontal - vertical;
		Buffer->Vertices[1+idx].Color = particle.color;
		Buffer->Vertices[1+idx].Normal = view;

		Buffer->Vertices[2+idx].Pos = particle.pos - horizontal - vertical;
		Buffer->Vertices[2+idx].Color = particle.color;
		Buffer->Vertices[2+idx].Normal = view;

		Buffer->Vertices[3+idx].Pos = particle.pos - horizontal + vertical;
		Buffer->Vertices[3+idx].Color = particle.color;
		Buffer->Vertices[3+idx].Normal = view;

		idx +=4;
	}

	// render all
	core::matrix4 mat;
	if (!ParticlesAreGlobal)
		mat.setTranslation(AbsoluteTransformation.getTranslation());
	driver->setTransform(video::ETS_WORLD, mat);

	driver->setMaterial(Buffer->Material);

	driver->drawVertexPrimitiveList(Buffer->getVertices(), Particles.size()*4,
		Buffer->getIndices(), Particles.size()*2, video::EVT_STANDARD, EPT_TRIANGLES,Buffer->getIndexType());

	// for debug purposes only:
	if ( DebugDataVisible & scene::EDS_BBOX )
	{
		driver->setTransform(video::ETS_WORLD, AbsoluteTransformation);
		video::SMaterial deb_m;
		deb_m.Lighting = false;
		driver->setMaterial(deb_m);
		driver->draw3DBox(Buffer->BoundingBox, video::SColor(0,255,255,255));
	}
}
int ExynosMPP::processM2M(hwc_layer_1_t &layer, int dst_format, hwc_frect_t *sourceCrop)
#endif
{
    ALOGV("configuring gscaler %u for memory-to-memory", AVAILABLE_GSC_UNITS[mIndex]);

    alloc_device_t* alloc_device = mDisplay->mAllocDevice;
    private_handle_t *src_handle = private_handle_t::dynamicCast(layer.handle);
    buffer_handle_t dst_buf;
    private_handle_t *dst_handle;
    buffer_handle_t mid_buf;
    private_handle_t *mid_handle;
    int ret = 0;
    int dstAlign;
#ifdef USES_VIRTUAL_DISPLAY
    bool need_gsc_op_twice = false;
#endif

    exynos_mpp_img src_img, dst_img;
    memset(&src_img, 0, sizeof(src_img));
    memset(&dst_img, 0, sizeof(dst_img));
    exynos_mpp_img mid_img;
    memset(&mid_img, 0, sizeof(mid_img));

    hwc_frect_t sourceCropTemp;
    if (!sourceCrop)
        sourceCrop = &sourceCropTemp;

    setupSource(src_img, layer);
    src_img.mem_type = GSC_MEM_DMABUF;

#ifdef USES_VIRTUAL_DISPLAY
    need_gsc_op_twice = setupDoubleOperation(src_img, mid_img, layer) && isNeedBufferAlloc;
#else
    bool need_gsc_op_twice = setupDoubleOperation(src_img, mid_img, layer);
#endif

    setupM2MDestination(src_img, dst_img, dst_format, layer, sourceCrop);

#ifdef USES_VIRTUAL_DISPLAY
    if (!isNeedBufferAlloc) {
        dst_img.x = mDisplay->mHwc->mVirtualDisplayRect.left;
        dst_img.y = mDisplay->mHwc->mVirtualDisplayRect.top;
        dst_img.w = mDisplay->mHwc->mVirtualDisplayRect.width;
        dst_img.h = mDisplay->mHwc->mVirtualDisplayRect.height;
    }
#endif

    ALOGV("source configuration:");
    dumpMPPImage(src_img);

    bool reconfigure = isSrcConfigChanged(src_img, mSrcConfig) ||
            isDstConfigChanged(dst_img, mDstConfig);
    bool realloc = true;

#ifdef USES_VIRTUAL_DISPLAY
    if (isNeedBufferAlloc) {
#endif
    /* ext_only andn int_only changes */
    if (!need_gsc_op_twice && getDrmMode(src_handle->flags) == SECURE_DRM) {
        if (dst_img.drmMode != mDstConfig.drmMode)
            realloc = true;
        else
            realloc = false;
    }

    if (reconfigure && realloc) {
        if (reallocateBuffers(src_handle, dst_img, mid_img, need_gsc_op_twice) < 0)
            goto err_alloc;

        mCurrentBuf = 0;
        mLastGSCLayerHandle = 0;
    }

    if (!reconfigure && (mLastGSCLayerHandle == layer.handle)) {
        ALOGV("[USE] GSC_SKIP_DUPLICATE_FRAME_PROCESSING\n");
        if (layer.acquireFenceFd >= 0)
            close(layer.acquireFenceFd);

        layer.releaseFenceFd = -1;
        layer.acquireFenceFd = -1;
        mDstConfig.releaseFenceFd = -1;

        mCurrentBuf = (mCurrentBuf + NUM_GSC_DST_BUFS - 1) % NUM_GSC_DST_BUFS;
        if (mDstBufFence[mCurrentBuf] >= 0) {
            close (mDstBufFence[mCurrentBuf]);
            mDstBufFence[mCurrentBuf] = -1;
        }
        return 0;
    } else {
        mLastGSCLayerHandle = layer.handle;
    }
#ifdef USES_VIRTUAL_DISPLAY
    }
#endif

    layer.acquireFenceFd = -1;
    if (need_gsc_op_twice) {
        mid_img.acquireFenceFd = mMidBufFence[mCurrentBuf];
        mMidBufFence[mCurrentBuf] = -1;
        mid_buf = mMidBuffers[mCurrentBuf];
        mid_handle = private_handle_t::dynamicCast(mid_buf);

        mid_img.fw = mid_handle->stride;
        mid_img.fh = mid_handle->vstride;
        mid_img.yaddr = mid_handle->fd;
        if (isFormatYCrCb(mid_handle->format)) {
            mid_img.uaddr = mid_handle->fd2;
            mid_img.vaddr = mid_handle->fd1;
        } else {
            mid_img.uaddr = mid_handle->fd1;
            mid_img.vaddr = mid_handle->fd2;
        }
        //mid_img.acquireFenceFd = -1;

        ALOGV("mid configuration:");
        dumpMPPImage(mid_img);
    }

    dst_buf = mDstBuffers[mCurrentBuf];
    dst_handle = private_handle_t::dynamicCast(dst_buf);

    dst_img.fw = dst_handle->stride;
    dst_img.fh = dst_handle->vstride;
    dst_img.yaddr = dst_handle->fd;
    dst_img.uaddr = dst_handle->fd1;
    dst_img.vaddr = dst_handle->fd2;
    dst_img.acquireFenceFd = mDstBufFence[mCurrentBuf];
    mDstBufFence[mCurrentBuf] = -1;

    ALOGV("destination configuration:");
    dumpMPPImage(dst_img);

    if ((int)dst_img.w != WIDTH(layer.displayFrame))
        ALOGV("padding %u x %u output to %u x %u and cropping to {%7.1f,%7.1f,%7.1f,%7.1f}",
                WIDTH(layer.displayFrame), HEIGHT(layer.displayFrame),
                dst_img.w, dst_img.h, sourceCrop->left, sourceCrop->top,
                sourceCrop->right, sourceCrop->bottom);

    if (mGscHandle) {
        ALOGV("reusing open gscaler %u", AVAILABLE_GSC_UNITS[mIndex]);
    } else {
        ALOGV("opening gscaler %u", AVAILABLE_GSC_UNITS[mIndex]);
        mGscHandle = createMPP(
                AVAILABLE_GSC_UNITS[mIndex], GSC_M2M_MODE, GSC_DUMMY, true);
        if (!mGscHandle) {
            ALOGE("failed to create gscaler handle");
            ret = -1;
            goto err_alloc;
        }
    }

    if (!need_gsc_op_twice)
        memcpy(&mid_img, &dst_img, sizeof(exynos_mpp_img));

    /* src -> mid or src->dest */
    if (reconfigure || need_gsc_op_twice) {
        ret = stopMPP(mGscHandle);
        if (ret < 0) {
            ALOGE("failed to stop gscaler %u", mIndex);
            goto err_gsc_config;
        }

        ret = setCSCProperty(mGscHandle, 0, !mid_img.narrowRgb, 1);
        ret = configMPP(mGscHandle, &src_img, &mid_img);
        if (ret < 0) {
            ALOGE("failed to configure gscaler %u", mIndex);
            goto err_gsc_config;
        }
    }

    ret = runMPP(mGscHandle, &src_img, &mid_img);
    if (ret < 0) {
        ALOGE("failed to run gscaler %u", mIndex);
        goto err_gsc_config;
    }

    /* mid -> dst */
    if (need_gsc_op_twice) {
        ret = stopMPP(mGscHandle);
        if (ret < 0) {
            ALOGE("failed to stop gscaler %u", mIndex);
            goto err_gsc_config;
        }

        mid_img.acquireFenceFd = mid_img.releaseFenceFd;

        ret = setCSCProperty(mGscHandle, 0, !dst_img.narrowRgb, 1);
        ret = configMPP(mGscHandle, &mid_img, &dst_img);
        if (ret < 0) {
            ALOGE("failed to configure gscaler %u", mIndex);
            goto err_gsc_config;
        }

        ret = runMPP(mGscHandle, &mid_img, &dst_img);
        if (ret < 0) {
            ALOGE("failed to run gscaler %u", mIndex);
             goto err_gsc_config;
        }
        mMidBufFence[mCurrentBuf] = mid_img.releaseFenceFd;
    }

    mSrcConfig = src_img;
    mMidConfig = mid_img;

    if (need_gsc_op_twice) {
        mDstConfig = dst_img;
    } else {
        mDstConfig = mid_img;
    }

    layer.releaseFenceFd = src_img.releaseFenceFd;

    return 0;

err_gsc_config:
    destroyMPP(mGscHandle);
    mGscHandle = NULL;
err_alloc:
    if (src_img.acquireFenceFd >= 0)
        close(src_img.acquireFenceFd);
#ifdef USES_VIRTUAL_DISPLAY
    if (isNeedBufferAlloc) {
#endif
    for (size_t i = 0; i < NUM_GSC_DST_BUFS; i++) {
       if (mDstBuffers[i]) {
           alloc_device->free(alloc_device, mDstBuffers[i]);
           mDstBuffers[i] = NULL;
       }
       if (mDstBufFence[i] >= 0) {
           close(mDstBufFence[i]);
           mDstBufFence[i] = -1;
       }
       if (mMidBuffers[i]) {
           alloc_device->free(alloc_device, mMidBuffers[i]);
           mMidBuffers[i] = NULL;
       }
       if (mMidBufFence[i] >= 0) {
           close(mMidBufFence[i]);
           mMidBufFence[i] = -1;
       }
    }
#ifdef USES_VIRTUAL_DISPLAY
    }
#endif
    memset(&mSrcConfig, 0, sizeof(mSrcConfig));
    memset(&mDstConfig, 0, sizeof(mDstConfig));
    memset(&mMidConfig, 0, sizeof(mMidConfig));
    return ret;
}
//! render
void CParticleSystemSceneNode::render()
{
	video::IVideoDriver* driver = SceneManager->getVideoDriver();
	ICameraSceneNode* camera = SceneManager->getActiveCamera();

	if (!camera || !driver)
		return;

	// calculate vectors for letting particles look to camera
	core::vector3df campos = camera->getAbsolutePosition();
	core::vector3df target = camera->getTarget();
	core::vector3df up = camera->getUpVector();
	core::vector3df view = target - campos;
	view.normalize();

	core::vector3df horizontal = up.crossProduct(view);
	horizontal.normalize();

	core::vector3df vertical = horizontal.crossProduct(view);
	vertical.normalize();

	horizontal *= 0.5f * ParticleSize.Width;
	vertical *= 0.5f * ParticleSize.Height;	

	view *= -1.0f;

	// reallocate arrays, if they are too small
	reallocateBuffers();

	// create particle vertex data
	for (u32 i=0; i<Particles.size(); ++i)
	{
		const SParticle& particle = Particles[i];

		s32 idx = i*4;

		Vertices[0+idx].Pos = particle.pos + horizontal + vertical;
		Vertices[0+idx].Color = particle.color;
		Vertices[0+idx].Normal = view;

		Vertices[1+idx].Pos = particle.pos + horizontal - vertical;
		Vertices[1+idx].Color = particle.color;
		Vertices[1+idx].Normal = view;

		Vertices[2+idx].Pos = particle.pos - horizontal - vertical;
		Vertices[2+idx].Color = particle.color;
		Vertices[2+idx].Normal = view;

		Vertices[3+idx].Pos = particle.pos - horizontal + vertical;
		Vertices[3+idx].Color = particle.color;
		Vertices[3+idx].Normal = view;
	}

	// render all 
	core::matrix4 mat;
	if (!ParticlesAreGlobal)
		mat.setTranslation(AbsoluteTransformation.getTranslation());
	driver->setTransform(video::ETS_WORLD, mat);
		

	driver->setMaterial(Material);

	driver->drawIndexedTriangleList(Vertices.pointer(), Particles.size()*4,
		Indices.pointer(), Particles.size()*2);

	// for debug purposes only:
	if (DebugDataVisible)
	{
		driver->setTransform(video::ETS_WORLD, AbsoluteTransformation);
		video::SMaterial m;
		m.Lighting = false;
		driver->setMaterial(m);
		driver->draw3DBox(Box, video::SColor(0,255,255,255));
	}
}
// render
// ISceneNode implement
void CGameStaticShadowSceneNode::render()
{
	video::IVideoDriver* driver = SceneManager->getVideoDriver();
	ICameraSceneNode* camera = SceneManager->getActiveCamera();

	if (!camera || !driver)
		return;

	const core::matrix4 &m = camera->getViewFrustum()->getTransform( video::ETS_VIEW );
	const core::vector3df view ( -m[2], -m[6] , -m[10] );

	// get shadow comp
	CShadowComponent* shadow = (CShadowComponent*)m_owner->getComponent(IObjectComponent::Shadow);
	if ( shadow == NULL )
		return;

	// make buffer by shadow pos
	std::vector<video::S3DVertex>& listShadow = shadow->getListShadow();
	int nShadow = (int)listShadow.size();

	if ( nShadow > 0 )
	{
		// create shadow mesh buffer
		reallocateBuffers(nShadow);
		
		s32 idx = 0;

		std::vector<video::S3DVertex>::iterator i = listShadow.begin(), end = listShadow.end();
		while (i != end)
		{
			core::vector3df pos = i->Pos;
			core::vector3df nor = i->Normal;
			float width		= 80.0f;
			float height	= 80.0f;

			// set texcoord
			Buffer->Vertices[0+idx].TCoords.set(0.0f, 0.0f);
			Buffer->Vertices[1+idx].TCoords.set(0.0f, 1.0f);
			Buffer->Vertices[2+idx].TCoords.set(1.0f, 1.0f);
			Buffer->Vertices[3+idx].TCoords.set(1.0f, 0.0f);
			
			// calc plane position
			f32 f = 0.5f * width;
			core::vector3df horizontal ( f, 0, 0 );

			f = -0.5f * height;
			core::vector3df vertical ( 0, 0, f );

			// rotate plane
			core::quaternion quaternion; 			
			quaternion.rotationFromTo( core::vector3df(0.0f,1.0f,0.0f), nor );

			core::matrix4 matrix = quaternion.getMatrix(); 
			matrix.rotateVect(horizontal); 
			matrix.rotateVect(vertical);


			// update buffer position
			Buffer->Vertices[0+idx].Pos = pos + horizontal + vertical;
			Buffer->Vertices[0+idx].Color = SColor(255,0,0,0);
			Buffer->Vertices[0+idx].Normal = nor;

			Buffer->Vertices[1+idx].Pos = pos + horizontal - vertical;
			Buffer->Vertices[1+idx].Color = SColor(255,0,0,0);
			Buffer->Vertices[1+idx].Normal = nor;

			Buffer->Vertices[2+idx].Pos = pos - horizontal - vertical;
			Buffer->Vertices[2+idx].Color = SColor(255,0,0,0);
			Buffer->Vertices[2+idx].Normal = nor;

			Buffer->Vertices[3+idx].Pos = pos - horizontal + vertical;
			Buffer->Vertices[3+idx].Color = SColor(255,0,0,0);
			Buffer->Vertices[3+idx].Normal = nor;

			idx += 4;

			Buffer->BoundingBox.addInternalPoint(pos);
			++i;
		}

		// render all
		core::matrix4 mat;
		driver->setTransform(video::ETS_WORLD, mat);

		// render 2 face on nonbillboard particle
		Buffer->Material.BackfaceCulling = false;
		Buffer->Material.FrontfaceCulling = false;
        
        Buffer->Material.Lighting = false;

		driver->setMaterial(Buffer->Material);

		driver->drawVertexPrimitiveList(
				Buffer->getVertices(), 
				nShadow*4,
				Buffer->getIndices(), 
				nShadow*2,
				video::EVT_STANDARD, EPT_TRIANGLES,
				Buffer->getIndexType()
			);
	}

	listShadow.clear();
}
void CImpostorSceneNode::render()
{
	// get camera pos
	ICameraSceneNode* cam=SceneManager->getActiveCamera();
	core::vector3df camPos = cam->getAbsolutePosition();
	// driver
	video::IVideoDriver* Driver = SceneManager->getVideoDriver();

	// all aboard the node sorter!
	NodeSorter.set_used(0);
	u32 i=0;
	for (; i < Impostors.size(); ++i)
	{
		core::aabbox3df bbox = Impostors[i].Node->getTransformedBoundingBox();
		if (i==0)
			Box.reset(bbox.getCenter());

		Box.addInternalBox(bbox);

		if (Impostors[i].IsActive && Impostors[i].BufferID != -1)
		{
			// debug data
			if (DebugDataVisible & EDS_BBOX_BUFFERS)
			{
				video::SMaterial material;
				material.Lighting=false;
				material.MaterialType = video::EMT_TRANSPARENT_ADD_COLOR;
				s32 col = Impostors[i].Color.getAlpha();
				material.DiffuseColor = video::SColor(255, col, col, col);

				Driver->setMaterial(material);
				Driver->setTransform(video::ETS_WORLD, Impostors[i].Node->getAbsoluteTransformation());
				Driver->draw3DBox(Impostors[i].Node->getBoundingBox(), Impostors[i].IsQueued ? video::SColor(255,255,0,0) : video::SColor(255,0,255,0) );
			}
			// todo: use real centre
			SNodeSorter n;
			n.NodeID = i;
			n.Position = bbox.getCenter();
			n.Distance = camPos.getDistanceFrom(n.Position);

			NodeSorter.push_back(n);
		}
	}
	// sort them
	NodeSorter.sort();

	// clear mesh buffers
	reallocateBuffers();

	// add to mesh buffers

	// fill buffers

	Positions.set_used(Buffers.size());
	for (i=0; i < Positions.size(); ++i)
		Positions[i] = 0;

	for (i=0; i < NodeSorter.size(); ++i)
	{
		SNodeLink &Imp = Impostors[NodeSorter[i].NodeID];
		u32 no = Imp.BufferID;
		SMeshBuffer& Buffer = *(Buffers[no].MeshBuffer);
		u32 v = Positions[no];

		// tcoords
		f32 slot = f32(Buffers[no].SlotSize) / f32(TextureWidth);
		s32 d = TextureWidth / Buffers[no].SlotSize;
		f32 x = f32(Imp.SlotID % d) * slot;
		f32 y = f32(Imp.SlotID / d) * slot;


		// normal
		core::vector3df view(NodeSorter[i].Position - camPos);
		view.normalize();
		view *= -1.0f;

		// position
		Buffer.Vertices[0+v].Pos = Imp.BilPos2;
		Buffer.Vertices[1+v].Pos = Imp.BilPos3;
		Buffer.Vertices[2+v].Pos = Imp.BilPos1;
		Buffer.Vertices[3+v].Pos = Imp.BilPos4;

		core::matrix4 matrix = Imp.Node->getAbsoluteTransformation();
		matrix.transformVect(Buffer.Vertices[0+v].Pos);
		matrix.transformVect(Buffer.Vertices[1+v].Pos);
		matrix.transformVect(Buffer.Vertices[2+v].Pos);
		matrix.transformVect(Buffer.Vertices[3+v].Pos);

		// colour
		video::SColor bilCol = Imp.Color;

		if (DebugDataVisible & EDS_IMPOSTOR_STRESS)
		{
			bilCol = Imp.IsQueued ? video::SColor(255,255,0,0) : video::SColor(255,0,255,0);
		}


		// everything else

		Buffer.Vertices[0+v].Normal = view;
		Buffer.Vertices[0+v].TCoords.set(x+slot, y+slot);
		Buffer.Vertices[0+v].Color = bilCol;

		Buffer.Vertices[1+v].Normal = view;
		Buffer.Vertices[1+v].TCoords.set(x+slot, y);
		Buffer.Vertices[1+v].Color = bilCol;

		Buffer.Vertices[2+v].Normal = view;
		Buffer.Vertices[2+v].TCoords.set(x, y);
		Buffer.Vertices[2+v].Color = bilCol;

		Buffer.Vertices[3+v].Normal = view;
		Buffer.Vertices[3+v].TCoords.set(x, y+slot);
		Buffer.Vertices[3+v].Color = bilCol;

		Positions[no] += 4;
	}
	for (i=0; i<Buffers.size(); ++i)
	{
		u32 quadCount = Positions[i] / 4;
		if (quadCount < Buffers[i].MeshBuffer->Vertices.size() / 4)
			Buffers[i].MeshBuffer->Vertices.set_used(Positions[i]);
		if (quadCount < Buffers[i].MeshBuffer->Indices.size() / 6)
			Buffers[i].MeshBuffer->Indices.set_used(quadCount*6);
	}

	// everyone get onboard the buffer sorter..
	BufferSorter.set_used(0);
	for (i=0; i < Buffers.size(); ++i)
	{
		if (Buffers[i].FreeSlots == Buffers[i].Slots.size())
			continue;
		SBufferSorter bs;
		bs.BufferID = i;
		bs.ItemCount = Buffers[i].Slots.size() - Buffers[i].FreeSlots;
		bs.SlotSize = Buffers[i].SlotSize;
		BufferSorter.push_back(bs);
	}
	BufferSorter.sort();

	// set up the driver for drawing
	core::matrix4 m; // identity
	Driver->setTransform(video::ETS_WORLD, m);


	//printf("Drawing %d buffers\n", BufferSorter.size());
	// and draw the buffers in order
	for (i=0; i<BufferSorter.size(); ++i)
	{
		Material.setTexture(0, Buffers[BufferSorter[i].BufferID].Texture);
		//Material.setTexture(0, WorkTexture);
		Driver->setMaterial(Material);
		Driver->drawMeshBuffer(Buffers[BufferSorter[i].BufferID].MeshBuffer);

		if (DebugDataVisible & EDS_MESH_WIRE_OVERLAY)
		{
			Material.Wireframe = true;
			Material.setTexture(0, 0);
			Driver->setMaterial(Material);
			Driver->drawMeshBuffer(Buffers[BufferSorter[i].BufferID].MeshBuffer);

			Material.Wireframe = false;
		}
	}

	if (DebugDataVisible & EDS_BBOX)
	{
		video::SMaterial material;
		material.Lighting=false;
		material.DiffuseColor = video::SColor(255,0,0,255);

		Driver->setMaterial(material);
		Driver->draw3DBox(Box);
	}

}
Exemple #7
0
/*
 * Hook called by EGL to acquire a buffer. This call may block if no
 * buffers are available.
 *
 * The window holds a reference to the buffer between dequeueBuffer and
 * either queueBuffer or cancelBuffer, so clients only need their own
 * reference if they might use the buffer after queueing or canceling it.
 * Holding a reference to a buffer after queueing or canceling it is only
 * allowed if a specific buffer count has been set.
 *
 * The libsync fence file descriptor returned in the int pointed to by the
 * fenceFd argument will refer to the fence that must signal before the
 * dequeued buffer may be written to.  A value of -1 indicates that the
 * caller may access the buffer immediately without waiting on a fence.  If
 * a valid file descriptor is returned (i.e. any value except -1) then the
 * caller is responsible for closing the file descriptor.
 *
 * Returns 0 on success or -errno on error.
 */
int FbDevNativeWindow::dequeueBuffer(BaseNativeWindowBuffer** buffer, int *fenceFd)
{
    HYBRIS_TRACE_BEGIN("fbdev-platform", "dequeueBuffer", "");
    FbDevNativeWindowBuffer* fbnb=NULL;

    pthread_mutex_lock(&_mutex);

    if (m_allocateBuffers)
        reallocateBuffers();

    HYBRIS_TRACE_BEGIN("fbdev-platform", "dequeueBuffer-wait", "");
#if defined(DEBUG)

    if (m_frontBuf)
        TRACE("Status: Has front buf %p", m_frontBuf);

    std::list<FbDevNativeWindowBuffer*>::iterator cit = m_bufList.begin();
    for (; cit != m_bufList.end(); ++cit)
    {
        TRACE("Status: Buffer %p with busy %i\n", (*cit), (*cit)->busy);
    }
#endif

    while (m_freeBufs==0)
    {
        pthread_cond_wait(&_cond, &_mutex);
    }

    while (1)
    {
        std::list<FbDevNativeWindowBuffer*>::iterator it = m_bufList.begin();
        for (; it != m_bufList.end(); ++it)
        {
            if (*it==m_frontBuf)
                continue;
            if ((*it)->busy==0)
            {
                TRACE("Found a free non-front buffer");
                break;
            }
        }

        if (it == m_bufList.end())
        {
#if ANDROID_VERSION_MAJOR<=4 && ANDROID_VERSION_MINOR<2
            /*
             * This is acceptable in case you are on a stack that calls lock() before starting to render into buffer
             * When you are using fences (>= 2) you'll be waiting on the fence to signal instead. 
             * 
             * This optimization allows eglSwapBuffers to return and you can begin to utilize the GPU for rendering. 
             * The actual lock() probably first comes at glFlush/eglSwapBuffers
            */
            if (m_frontBuf && m_frontBuf->busy == 0)
            {
                TRACE("Used front buffer as buffer");
                fbnb = m_frontBuf;
                break;
            }
#endif
            // have to wait once again
            pthread_cond_wait(&_cond, &_mutex);
            continue;
        }

        fbnb = *it;
        break;
    }

    HYBRIS_TRACE_END("fbdev-platform", "dequeueBuffer-wait", "");
    assert(fbnb!=NULL);
    fbnb->busy = 1;
    m_freeBufs--;

    *buffer = fbnb;
    *fenceFd = -1;

    TRACE("%lu DONE --> %p", pthread_self(), fbnb);
    pthread_mutex_unlock(&_mutex);
    HYBRIS_TRACE_END("fbdev-platform", "dequeueBuffer", "");
    return 0;
}