Пример #1
0
void Renderer::initFont(const Graphics::Surface *surface) {
	_font = createTexture(surface);
}
Пример #2
0
/**
 * @brief create all texture needed.
 * This function build all the texture.
 */
void loadTexture(){

	//nature
	createTexture(&texGrass1,"./texture/grass.jpg");
	createTexture(&texGrass2,"./texture/grass2.jpg");
	createTexture(&texTree0,"./texture/tree0.png");
	createTexture(&texTree1,"./texture/tree1.png");
	createTexture(&texTree2,"./texture/tree2.png");
	createTexture(&texTree3,"./texture/tree3.png");

	//house section
	createTexture(&texBuild,"./texture/build.jpg");
	createTexture(&texParquet,"./texture/parquet.jpg");
	createTexture(&texDoor,"./texture/door.png");
	createTexture(&texRoof,"./texture/roof.jpg");
	createTexture(&texGround3,"./texture/ground3.jpg");
	createTexture(&texSchermatura,"./texture/schermatura.png");

	//external house stuff
	createTexture(&texSwim,"./texture/water.jpg");
	createTexture(&texFence,"./texture/fence.png");
	createTexture(&texHedge,"./texture/hedge.jpg");
	createTexture(&texGate,"./texture/gate.png");
	createTexture(&texStoneGround,"./texture/stoneground.jpg");
	createTexture(&texPath,"./texture/path.jpg");

	createTexture(&texSky,"./texture/sky2.jpg");

	//Inutility
	createTexture(&texCrowBar,"./texture/crowbar2.png");
}
Пример #3
0
void keyboardInit()
{
	kbTexture = createTexture(keyboard_data.width, keyboard_data.height, keyboard_data.pixel_data);
}
Пример #4
0
bool DeviceDirect3D::create()
{
	HMODULE libD3D11 = LoadLibrary("d3d11.dll");
	if(!libD3D11)
	{
		Logger() << "Could not load d3d11.dll, you probably do not have DirectX 11 installed.";
		return false;
	}

	HMODULE libCompiler43 = LoadLibrary("d3dcompiler_43.dll");
	if(!libCompiler43)
	{
		Logger() << "Could not load d3dcompiler_43.dll, try updating your DirectX";
		return false;
	}

	//Release handles
	FreeLibrary(libD3D11);
	FreeLibrary(libCompiler43);

	std::vector<IDXGIAdapter1*> adapters;
	if(!getAdapterHandle(&adapters))
	{
		return false;
	}

	UINT createDeviceFlags = 0;
#if defined(_DEBUG)
    createDeviceFlags |= D3D11_CREATE_DEVICE_DEBUG;
#endif

	const D3D_FEATURE_LEVEL featureLevels[] = { D3D_FEATURE_LEVEL_11_0 /*, D3D_FEATURE_LEVEL_10_1, D3D_FEATURE_LEVEL_10_0*/ };
	DXGI_SWAP_CHAIN_DESC sd;
	
	ZeroMemory(&sd,sizeof(sd));
	
	const WindowSettings& ws = getWindow()->getWindowSettings();

	sd.BufferCount = 1;	
	sd.BufferDesc.Width = (UINT)ws.width;
	sd.BufferDesc.Height = (UINT)ws.height;
	sd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; //_SRGB;
	sd.BufferDesc.RefreshRate.Numerator = 60;	
	sd.BufferDesc.RefreshRate.Denominator = 1;	
	sd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT | DXGI_USAGE_UNORDERED_ACCESS;
	sd.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
	sd.OutputWindow = static_cast<WindowWinAPI*>(getWindow())->getHandle();
	sd.SampleDesc.Count = 1;
	sd.SampleDesc.Quality = 0;
	sd.Windowed = ws.fullscreen ? FALSE : TRUE;
	
	int selectedAdapterId = ws.gpu;
	IDXGIAdapter* selectedAdapter = nullptr;
	if(selectedAdapterId >= 0) 
	{
		if(selectedAdapterId < (int)adapters.size())
		{
			selectedAdapter = adapters[selectedAdapterId];
		} else {
			LOGFUNCERROR("Selected graphics card " << selectedAdapterId << " does not exist");
		}
	}

	HRESULT result = D3D11CreateDeviceAndSwapChain(selectedAdapter, selectedAdapter ? D3D_DRIVER_TYPE_UNKNOWN : D3D_DRIVER_TYPE_HARDWARE, 0, createDeviceFlags, featureLevels,
                        _countof(featureLevels), D3D11_SDK_VERSION, &sd, &swapChain, &device, &featureLevel, &context);

	if(result != S_OK)
	{
		if(result == DXGI_ERROR_UNSUPPORTED)
		{
			LOGFUNCERROR("Your videocard does not appear to support DirectX 11");
		} else {
			LOGERROR(result, "D3D11CreateDeviceAndSwapChain");
		}
		return false;
	}

	//D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;	
	/*result = device->CreateShaderResourceView(swapBackBuffer, 0, &swapBackBufferSRV);

	if(result != S_OK){
		LOGERROR(result, "ID3D11Device::CreateShaderResourceView");
		return false;
	}*/

	D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS dxHwOpt;
	result = device->CheckFeatureSupport(D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS, &dxHwOpt, sizeof(dxHwOpt));
	if(FAILED(result))
	{
		LOGERROR(result, "CheckFeatureSupport");
		return false;
	}
	if(!dxHwOpt.ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x)
	{
		Logger() << "ComputeShaders are not supported on this device";
		return false;
	}
	
	//Get the buffer from the swapchain
	result = swapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&swapBackBuffer);
	if(result != S_OK)
    {
		LOGERROR(result, "IDXGISwapChain::GetBuffer");
        return false;
    }

	//Create trace result texture/RT
	swapStaging = static_cast<TextureDirect3D*>(createTexture());
	swapStaging->create(TextureDimensions::Texture2D, TextureFormat::R8G8B8A8_UNORM, sd.BufferDesc.Width, sd.BufferDesc.Height, nullptr, TextureBinding::Staging, CPUAccess::Read);
	/*result = device->CreateRenderTargetView(traceResultTexture->getResource(), nullptr, &traceResultRT);
	if(FAILED(result))
	{
		LOGERROR(result, "CreateRenderTargetView");
		return false;
	}*/

	//Create the UAV for the trace result
	D3D11_UNORDERED_ACCESS_VIEW_DESC uavDesc;
	ZeroMemory(&uavDesc, sizeof(uavDesc));
	uavDesc.Format = sd.BufferDesc.Format;
	uavDesc.ViewDimension = D3D11_UAV_DIMENSION_TEXTURE2D;
	uavDesc.Buffer.FirstElement = 0; 
	uavDesc.Buffer.NumElements = sd.BufferDesc.Width * sd.BufferDesc.Height;

	result = device->CreateUnorderedAccessView(swapBackBuffer, &uavDesc, &uavSwapBuffer);
	if(FAILED(result))
	{
		LOGERROR(result, "CreateUnorderedAccessView");
		return false;
	}

	//Setup sampler
	D3D11_SAMPLER_DESC samplerDesc;
	ZeroMemory(&samplerDesc, sizeof(samplerDesc));
	samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR; //D3D11_FILTER_ANISOTROPIC;
	samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
	samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
	samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
	samplerDesc.MinLOD = 0;
	samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
	samplerDesc.MipLODBias = 0.0f;
	samplerDesc.MaxAnisotropy = 0;
	samplerDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;

	ID3D11SamplerState* sampler;
	device->CreateSamplerState(&samplerDesc, &sampler);
	context->CSSetSamplers(0, 1, &sampler);
	sampler->Release();

	return true;
}
Пример #5
0
//--------------------------------------------------------------------------------------------------
/// 
//--------------------------------------------------------------------------------------------------
bool PointSprites::onInitialize()
{
    ref<ModelBasicList> model = new ModelBasicList;

    bool useShaders = true;

    {
        GeometryBuilderDrawableGeo builder;
        GeometryUtils::createSphere(1, 10, 10, &builder);
        ref<DrawableGeo> geo = builder.drawableGeo();

        ref<Effect> eff = new Effect;

        if (useShaders)
        {
            cvf::ShaderProgramGenerator gen("SimpleHeadlight", cvf::ShaderSourceProvider::instance());
            gen.configureStandardHeadlightColor();
            ref<ShaderProgram> prog = gen.generate();
            eff->setShaderProgram(prog.p());
            eff->setUniform(new UniformFloat("u_color", Color4f(Color3::YELLOW)));
        }
        else
        {
            eff->setRenderState(new RenderStateMaterial_FF(RenderStateMaterial_FF::PURE_YELLOW));
        }

        ref<Part> part = new Part;
        part->setDrawable(geo.p());
        part->setEffect(eff.p());

        model->addPart(part.p());
    }

    {
        ref<Vec3fArray> vertices = new Vec3fArray;
        vertices->reserve(10);
        vertices->add(Vec3f(0, 0, 0));
        vertices->add(Vec3f(-3, 0, 0));
        vertices->add(Vec3f(3, 0, 0));

        ref<UIntArray> indices = new UIntArray(vertices->size());
        indices->setConsecutive(0);

        ref<PrimitiveSetIndexedUInt> primSet = new PrimitiveSetIndexedUInt(PT_POINTS);
        primSet->setIndices(indices.p());

        ref<DrawableGeo> geo = new DrawableGeo;
        geo->setVertexArray(vertices.p());
        geo->addPrimitiveSet(primSet.p());

        ref<Effect> eff = new Effect;

        if (useShaders)
        {
            bool useTextureSprite = true;

            cvf::ShaderProgramGenerator gen("PointSprites", cvf::ShaderSourceProvider::instance());
            gen.addVertexCode(ShaderSourceRepository::vs_DistanceScaledPoints);
            
            if (useTextureSprite)  gen.addFragmentCode(ShaderSourceRepository::src_TextureFromPointCoord);
            else                   gen.addFragmentCode(ShaderSourceRepository::src_Color);
            gen.addFragmentCode(ShaderSourceRepository::fs_CenterLitSpherePoints);

            ref<cvf::ShaderProgram> prog = gen.generate();
            eff->setShaderProgram(prog.p());
            eff->setUniform(new UniformFloat("u_pointRadius", 1.0f));

            if (useTextureSprite)
            {
                ref<Texture> tex = createTexture();
                ref<Sampler> sampler = new Sampler;
                sampler->setMinFilter(Sampler::LINEAR);
                sampler->setMagFilter(Sampler::NEAREST);
                sampler->setWrapModeS(Sampler::REPEAT);
                sampler->setWrapModeT(Sampler::REPEAT);
                ref<RenderStateTextureBindings> texBind = new RenderStateTextureBindings(tex.p(), sampler.p(), "u_texture2D");
                eff->setRenderState(texBind.p());
            }
            else
            {
                eff->setUniform(new UniformFloat("u_color", Color4f(Color3::RED)));
            }

            ref<RenderStatePoint> point = new RenderStatePoint(RenderStatePoint::PROGRAM_SIZE);
            point->enablePointSprite(true);
            eff->setRenderState(point.p());
        }
        else
        {
            eff->setRenderState(new RenderStateLighting_FF(false));
            eff->setRenderState(new RenderStateMaterial_FF(RenderStateMaterial_FF::PURE_MAGENTA));

            ref<RenderStatePoint> point = new RenderStatePoint(RenderStatePoint::FIXED_SIZE);
            point->enablePointSprite(true);
            point->setSize(600.0f);
            eff->setRenderState(point.p());
        }

        ref<Part> part = new Part;
        part->setDrawable(geo.p());
        part->setEffect(eff.p());

        model->addPart(part.p());
    }

    model->updateBoundingBoxesRecursive();
    m_renderSequence->firstRendering()->scene()->addModel(model.p());

    BoundingBox bb = model->boundingBox();
    if (bb.isValid())
    {
        m_camera->fitView(bb, -Vec3d::Z_AXIS, Vec3d::Y_AXIS);
        if (m_usePerspective)
        {
            m_camera->setProjectionAsPerspective(m_fovScale*40.0, m_nearPlane, m_camera->farPlane());
        }
        else
        {
            m_camera->setProjectionAsOrtho(m_fovScale*bb.extent().length(), m_nearPlane, m_camera->farPlane());
        }
    }

    return true;
}
Пример #6
0
VkBool32 Example::buildTexture(const vkts::ICommandBuffersSP& cmdBuffer, vkts::IImageSP& stageImage, vkts::IDeviceMemorySP& stageDeviceMemoryImage)
{
	auto imageData = vkts::imageDataLoad(VKTS_TEXTURE_NAME);

	if (!imageData.get())
	{
		vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not load image data: '%s'", VKTS_TEXTURE_NAME);

		return VK_FALSE;
	}

	//

	VkFormatProperties formatProperties;

	vkGetPhysicalDeviceFormatProperties(physicalDevice->getPhysicalDevice(), imageData->getFormat(), &formatProperties);

	VkImageTiling imageTiling = VK_IMAGE_TILING_LINEAR;
	VkMemoryPropertyFlagBits memoryPropertyFlagBits = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;

	// Check, how to upload image data.
	if (!(formatProperties.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT))
	{
		if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT))
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Format not supported.");

			return VK_FALSE;
		}

		imageTiling = VK_IMAGE_TILING_OPTIMAL;
		memoryPropertyFlagBits = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
	}

	//

	if (!createTexture(image, deviceMemoryImage, imageData, imageTiling, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, memoryPropertyFlagBits))
	{
		vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not create image.");

		return VK_FALSE;
	}

	//

	VkImageMemoryBarrier imageMemoryBarrier;

	memset(&imageMemoryBarrier, 0, sizeof(VkImageMemoryBarrier));

	imageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;

	imageMemoryBarrier.srcAccessMask = 0;			// Defined later.
	imageMemoryBarrier.dstAccessMask = 0;			// Defined later.
	imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;// Defined later.
	imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_UNDEFINED;// Defined later.
	imageMemoryBarrier.srcQueueFamilyIndex = 0;
	imageMemoryBarrier.dstQueueFamilyIndex = 0;
	imageMemoryBarrier.image = VK_NULL_HANDLE;		// Defined later.
	imageMemoryBarrier.subresourceRange = {	VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};

	//

	imageMemoryBarrier.dstAccessMask = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT;
	imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
	imageMemoryBarrier.image = image->getImage();

	cmdBuffer->cmdPipelineBarrier(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);

	//

	// If the image is only accessible by the device ...
	if (memoryPropertyFlagBits == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
	{
		// ... create texture with host visibility. This texture contains the pixel data.
		if (!createTexture(stageImage, stageDeviceMemoryImage, imageData, VK_IMAGE_TILING_LINEAR, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not create image.");

			return VK_FALSE;
		}

		// Prepare source image layout for copy.

		imageMemoryBarrier.dstAccessMask = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_ACCESS_TRANSFER_READ_BIT;
		imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
		imageMemoryBarrier.image = stageImage->getImage();

		cmdBuffer->cmdPipelineBarrier(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);

		// Prepare target image layout for copy.

		imageMemoryBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
		imageMemoryBarrier.dstAccessMask = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
		imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
		imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
		imageMemoryBarrier.image = image->getImage();

		cmdBuffer->cmdPipelineBarrier(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);

		// Copy image data by command.

		VkImageCopy imageCopy;

		imageCopy.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};

		imageCopy.srcOffset = {	0, 0, 0};

		imageCopy.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
		imageCopy.dstOffset = {	0, 0, 0};
		imageCopy.extent = { imageData->getWidth(), imageData->getHeight(), imageData->getDepth()};

		vkCmdCopyImage(cmdBuffer->getCommandBuffer(), stageImage->getImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image->getImage(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageCopy);

		// Switch back to original layout.
		imageMemoryBarrier.srcAccessMask = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
		imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
		imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
		imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;

		cmdBuffer->cmdPipelineBarrier(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
	}

	//

	sampler = vkts::samplerCreate(device->getDevice(), 0, VK_FILTER_NEAREST, VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, 0.0f, VK_FALSE, 1.0f, VK_FALSE, VK_COMPARE_OP_NEVER, 0.0f, 0.0f, VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE, VK_FALSE);

	if (!sampler.get())
	{
		vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not create sampler.");

		return VK_FALSE;
	}

	imageView = vkts::imageViewCreate(device->getDevice(), 0, image->getImage(), VK_IMAGE_VIEW_TYPE_2D, image->getFormat(), { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }, { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 });

	if (!imageView.get())
	{
		vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not create image view.");

		return VK_FALSE;
	}

	return VK_TRUE;
}
Пример #7
0
//-----------------------------------------------------------------------------
void QGLImageGpuWidget::useLinear(bool val)
{
  filter_linear_ = val;
  createTexture();
  this->update();
}
Пример #8
0
//
// TextureManager::readAnimDefLump
//
// [RH] This uses a Hexen ANIMDEFS lump to define the animation sequences.
//
void TextureManager::readAnimDefLump()
{
	int lump = -1;
	
	while ((lump = W_FindLump("ANIMDEFS", lump)) != -1)
	{
		SC_OpenLumpNum(lump, "ANIMDEFS");

		while (SC_GetString())
		{
			if (SC_Compare("flat") || SC_Compare("texture"))
			{
				anim_t anim;

				Texture::TextureSourceType texture_type = Texture::TEX_WALLTEXTURE;
				if (SC_Compare("flat"))
					texture_type = Texture::TEX_FLAT;

				SC_MustGetString();
				anim.basepic = texturemanager.getHandle(sc_String, texture_type);

				anim.curframe = 0;
				anim.numframes = 0;
				memset(anim.speedmin, 1, anim_t::MAX_ANIM_FRAMES * sizeof(*anim.speedmin));
				memset(anim.speedmax, 1, anim_t::MAX_ANIM_FRAMES * sizeof(*anim.speedmax));

				while (SC_GetString())
				{
					if (!SC_Compare("pic"))
					{
						SC_UnGet();
						break;
					}

					if ((unsigned)anim.numframes == anim_t::MAX_ANIM_FRAMES)
						SC_ScriptError ("Animation has too many frames");

					byte min = 1, max = 1;
					
					SC_MustGetNumber();
					int frame = sc_Number;
					SC_MustGetString();
					if (SC_Compare("tics"))
					{
						SC_MustGetNumber();
						sc_Number = clamp(sc_Number, 0, 255);
						min = max = sc_Number;
					}
					else if (SC_Compare("rand"))
					{
						SC_MustGetNumber();
						min = MAX(sc_Number, 0);
						SC_MustGetNumber();
						max = MIN(sc_Number, 255);
						if (min > max)
							min = max = 1;
					}
					else
					{
						SC_ScriptError ("Must specify a duration for animation frame");
					}

					anim.speedmin[anim.numframes] = min;
					anim.speedmax[anim.numframes] = max;
					anim.framepic[anim.numframes] = frame + anim.basepic - 1;
					anim.numframes++;
				}

				anim.countdown = anim.speedmin[0];

				if (anim.basepic != TextureManager::NOT_FOUND_TEXTURE_HANDLE &&
					anim.basepic != TextureManager::NO_TEXTURE_HANDLE)
					mAnimDefs.push_back(anim);
			}
			else if (SC_Compare ("switch"))   // Don't support switchdef yet...
			{
				//P_ProcessSwitchDef ();
				SC_ScriptError("switchdef not supported.");
			}
			else if (SC_Compare("warp"))
			{
				SC_MustGetString();
				if (SC_Compare("flat") || SC_Compare("texture"))
				{

					Texture::TextureSourceType texture_type = Texture::TEX_WALLTEXTURE;
					if (SC_Compare("flat"))
						texture_type = Texture::TEX_FLAT;

					SC_MustGetString();

					texhandle_t texhandle = texturemanager.getHandle(sc_String, texture_type);
					if (texhandle == TextureManager::NOT_FOUND_TEXTURE_HANDLE ||
						texhandle == TextureManager::NO_TEXTURE_HANDLE)
						continue;

					warp_t warp;

					// backup the original texture
					warp.original_texture = getTexture(texhandle);

					int width = 1 << warp.original_texture->getWidthBits();
					int height = 1 << warp.original_texture->getHeightBits();

					// create a new texture of the same size for the warped image
					warp.warped_texture = createTexture(texhandle, width, height);

					mWarpDefs.push_back(warp);
				}
				else
				{
					SC_ScriptError(NULL, NULL);
				}
			}
		}
		SC_Close ();
	}
}
Пример #9
0
bool FBO::init(int width, int height, GraphicsContext3D::Attributes attributes)
{
#if !USE(SHARED_TEXTURE_WEBGL)
    // 1. Allocate a graphic buffer
    sp<ISurfaceComposer> composer(ComposerService::getComposerService());
    m_graphicBufferAlloc = composer->createGraphicBufferAlloc();

    status_t error;

    PixelFormat format = attributes.alpha ? HAL_PIXEL_FORMAT_RGBA_8888 : HAL_PIXEL_FORMAT_RGBX_8888;

    m_grBuffer = m_graphicBufferAlloc->createGraphicBuffer(width, height, format,
                 GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN | GRALLOC_USAGE_HW_TEXTURE, &error);
    if (error != NO_ERROR) {
        LOGWEBGL(" failed to allocate GraphicBuffer, error = %d", error);
        return false;
    }

    void *addr = 0;
    if (m_grBuffer->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, &addr) != NO_ERROR) {
        LOGWEBGL("  failed to lock the GraphicBuffer");
        return false;
    }
    // WebGL requires all buffers to be initialized to 0.
    memset(addr, 0, width * height * 4);
    m_grBuffer->unlock();

    ANativeWindowBuffer* clientBuf = m_grBuffer->getNativeBuffer();
    if (clientBuf->handle == 0) {
        LOGWEBGL(" empty handle in GraphicBuffer");
        return false;
    }

    // 2. Create an EGLImage from the graphic buffer
    const EGLint attrs[] = {
        EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
        EGL_NONE,                EGL_NONE
    };

    m_image = eglCreateImageKHR(m_dpy,
                                EGL_NO_CONTEXT,
                                EGL_NATIVE_BUFFER_ANDROID,
                                (EGLClientBuffer)clientBuf,
                                attrs);
    if (GraphicsContext3DInternal::checkEGLError("eglCreateImageKHR") != EGL_SUCCESS) {
        LOGWEBGL("eglCreateImageKHR() failed");
        return false;
    }
#endif
    // 3. Create a texture from the EGLImage
    m_texture = createTexture(m_image, width, height);

    if (m_texture == 0) {
        LOGWEBGL("createTexture() failed");
        return false;
    }

    // 4. Create the Framebuffer Object from the texture
    glGenFramebuffers(1, &m_fbo);

    if (attributes.depth) {
        glGenRenderbuffers(1, &m_depthBuffer);
        glBindRenderbuffer(GL_RENDERBUFFER, m_depthBuffer);
        glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, width, height);
        if (GraphicsContext3DInternal::checkGLError("glRenderbufferStorage") != GL_NO_ERROR)
            return false;
    }

    if (attributes.stencil) {
        glGenRenderbuffers(1, &m_stencilBuffer);
        glBindRenderbuffer(GL_RENDERBUFFER, m_stencilBuffer);
        glRenderbufferStorage(GL_RENDERBUFFER, GL_STENCIL_INDEX8, width, height);
        if (GraphicsContext3DInternal::checkGLError("glRenderbufferStorage") != GL_NO_ERROR)
            return false;
    }

    glBindRenderbuffer(GL_RENDERBUFFER, 0);

    glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_texture, 0);
    if (GraphicsContext3DInternal::checkGLError("glFramebufferTexture2D") != GL_NO_ERROR)
        return false;

    if (attributes.depth) {
        glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, m_depthBuffer);
        if (GraphicsContext3DInternal::checkGLError("glFramebufferRenderbuffer") != GL_NO_ERROR)
            return false;
    }

    if (attributes.stencil) {
        glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, m_stencilBuffer);
        if (GraphicsContext3DInternal::checkGLError("glFramebufferRenderbuffer") != GL_NO_ERROR)
            return false;
    }

    GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
    if (status != GL_FRAMEBUFFER_COMPLETE) {
        LOGWEBGL("Framebuffer incomplete: %d", status);
        return false;
    }

    glBindFramebuffer(GL_FRAMEBUFFER, 0);

    return true;
}
Пример #10
0
void Dataset3D::scalingChanged()
{
    glDeleteTextures( 1, &m_textureGLuint );
    createTexture();
}
////////////////////////////////////////////////////////////////////////////////
// basic test of the cache's base functionality:
//  push, pop, set, canReuse & getters
static void test_cache(skiatest::Reporter* reporter, GrContext* context) {

    if (false) { // avoid bit rot, suppress warning
        createTexture(context);
    }
    GrClipMaskCache cache;

    cache.setContext(context);

    // check initial state
    check_empty_state(reporter, cache);

    // set the current state
    SkIRect bound1;
    bound1.set(0, 0, 100, 100);

    SkClipStack clip1(bound1);

    GrTextureDesc desc;
    desc.fFlags = kRenderTarget_GrTextureFlagBit;
    desc.fWidth = X_SIZE;
    desc.fHeight = Y_SIZE;
    desc.fConfig = kSkia8888_GrPixelConfig;

    cache.acquireMask(clip1.getTopmostGenID(), desc, bound1);

    GrTexture* texture1 = cache.getLastMask();
    REPORTER_ASSERT(reporter, texture1);
    if (NULL == texture1) {
        return;
    }

    // check that the set took
    check_state(reporter, cache, clip1, texture1, bound1);
    REPORTER_ASSERT(reporter, texture1->getRefCnt());

    // push the state
    cache.push();

    // verify that the pushed state is initially empty
    check_empty_state(reporter, cache);
    REPORTER_ASSERT(reporter, texture1->getRefCnt());

    // modify the new state
    SkIRect bound2;
    bound2.set(-10, -10, 10, 10);

    SkClipStack clip2(bound2);

    cache.acquireMask(clip2.getTopmostGenID(), desc, bound2);

    GrTexture* texture2 = cache.getLastMask();
    REPORTER_ASSERT(reporter, texture2);
    if (NULL == texture2) {
        return;
    }

    // check that the changes took
    check_state(reporter, cache, clip2, texture2, bound2);
    REPORTER_ASSERT(reporter, texture1->getRefCnt());
    REPORTER_ASSERT(reporter, texture2->getRefCnt());

    // check to make sure canReuse works
    REPORTER_ASSERT(reporter, cache.canReuse(clip2.getTopmostGenID(), bound2));
    REPORTER_ASSERT(reporter, !cache.canReuse(clip1.getTopmostGenID(), bound1));

    // pop the state
    cache.pop();

    // verify that the old state is restored
    check_state(reporter, cache, clip1, texture1, bound1);
    REPORTER_ASSERT(reporter, texture1->getRefCnt());

    // manually clear the state
    cache.reset();

    // verify it is now empty
    check_empty_state(reporter, cache);

    // pop again - so there is no state
    cache.pop();

#if !defined(SK_DEBUG)
    // verify that the getters don't crash
    // only do in release since it generates asserts in debug
    check_empty_state(reporter, cache);
#endif
}
Пример #12
0
vkts::IImageDataSP Example::gatherImageData() const
{
	VkResult result;

	auto imageData = vkts::imageDataCreate(VKTS_IMAGE_NAME, VKTS_IMAGE_LENGTH, VKTS_IMAGE_LENGTH, 1, 1.0f, 0.0f, 0.0f, 1.0f, VK_IMAGE_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM);

	// Check, if we can use a linear tiled image for staging.
	if (physicalDevice->isImageTilingAvailable(VK_IMAGE_TILING_LINEAR, imageData->getFormat(), imageData->getImageType(), 0, imageData->getExtent3D(), imageData->getMipLevels(), 1, VK_SAMPLE_COUNT_1_BIT, imageData->getSize()))
	{
		vkts::IImageSP stageImage;
		vkts::IDeviceMemorySP stageDeviceMemory;

		if (!createTexture(stageImage, stageDeviceMemory, VK_IMAGE_TILING_LINEAR, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0))
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not create stage image and device memory.");

			return vkts::IImageDataSP();
		}

		//

		cmdBuffer->reset();


		result = cmdBuffer->beginCommandBuffer(0, VK_NULL_HANDLE, 0, VK_NULL_HANDLE, VK_FALSE, 0, 0);

		if (result != VK_SUCCESS)
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not begin command buffer.");

			return vkts::IImageDataSP();
		}


		VkImageSubresourceRange imageSubresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };

		// Prepare stage image for final layout etc.
		stageImage->cmdPipelineBarrier(cmdBuffer->getCommandBuffer(), VK_ACCESS_HOST_READ_BIT, VK_IMAGE_LAYOUT_GENERAL, imageSubresourceRange);


		VkImageCopy imageCopy;

		imageCopy.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
		imageCopy.srcOffset = {0, 0, 0};
		imageCopy.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
		imageCopy.dstOffset = {0, 0, 0};
		imageCopy.extent = { VKTS_IMAGE_LENGTH, VKTS_IMAGE_LENGTH, 1u };

		// Copy form device to host visible image / memory.
		image->copyImage(cmdBuffer->getCommandBuffer(), stageImage->getImage(), stageImage->getAccessMask(), stageImage->getImageLayout(), imageCopy);


		result = cmdBuffer->endCommandBuffer();

		if (result != VK_SUCCESS)
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not end command buffer.");

			return VK_FALSE;
		}


		VkSubmitInfo submitInfo;

		memset(&submitInfo, 0, sizeof(VkSubmitInfo));

		submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;

		submitInfo.waitSemaphoreCount = 0;
		submitInfo.pWaitSemaphores = nullptr;
		submitInfo.commandBufferCount = 1;
		submitInfo.pCommandBuffers = cmdBuffer->getCommandBuffers();
		submitInfo.signalSemaphoreCount = 0;
		submitInfo.pSignalSemaphores = nullptr;

		result = queue->submit(1, &submitInfo, VK_NULL_HANDLE);

		if (result != VK_SUCCESS)
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not submit queue.");

			return vkts::IImageDataSP();
		}

		result = queue->waitIdle();

		if (result != VK_SUCCESS)
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not wait for idle queue.");

			return vkts::IImageDataSP();
		}

		//
		// Copy pixel data from device memory into image data memory.
		//

		VkImageSubresource imageSubresource;

		imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
		imageSubresource.mipLevel = 0;
		imageSubresource.arrayLayer = 0;

		VkSubresourceLayout subresourceLayout;

		stageImage->getImageSubresourceLayout(subresourceLayout, imageSubresource);

		//

		result = stageDeviceMemory->mapMemory(0, stageDeviceMemory->getAllocationSize(), 0);

		if (result != VK_SUCCESS)
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not map memory.");

			return vkts::IImageDataSP();
		}

		imageData->upload(stageDeviceMemory->getMemory(), 0, subresourceLayout);

		stageDeviceMemory->unmapMemory();

		// Stage image and device memory are automatically destroyed.
	}
	else
	{
		// As an alternative, use the buffer.

		vkts::IBufferSP stageBuffer;
		vkts::IDeviceMemorySP stageDeviceMemory;

		VkBufferCreateInfo bufferCreateInfo;

        memset(&bufferCreateInfo, 0, sizeof(VkBufferCreateInfo));

        bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
        bufferCreateInfo.size = imageData->getSize();
        bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
        bufferCreateInfo.flags = 0;
        bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
        bufferCreateInfo.queueFamilyIndexCount = 0;
        bufferCreateInfo.pQueueFamilyIndices = nullptr;

        if (!createBuffer(stageBuffer, stageDeviceMemory, bufferCreateInfo, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
        {
    		vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not create buffer.");

			return vkts::IImageDataSP();
        }


		//

		cmdBuffer->reset();


		result = cmdBuffer->beginCommandBuffer(0, VK_NULL_HANDLE, 0, VK_NULL_HANDLE, VK_FALSE, 0, 0);

		if (result != VK_SUCCESS)
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not begin command buffer.");

			return vkts::IImageDataSP();
		}



		VkImageSubresourceRange imageSubresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };


		VkBufferImageCopy bufferImageCopy;

		bufferImageCopy.bufferOffset = 0;
		bufferImageCopy.bufferRowLength = VKTS_IMAGE_LENGTH;
		bufferImageCopy.bufferImageHeight = VKTS_IMAGE_LENGTH;
		bufferImageCopy.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
		bufferImageCopy.imageOffset = {0, 0, 0};
		bufferImageCopy.imageExtent = {VKTS_IMAGE_LENGTH, VKTS_IMAGE_LENGTH, 1};

		image->copyImageToBuffer(cmdBuffer->getCommandBuffer(), stageBuffer->getBuffer(), 1, &bufferImageCopy, imageSubresourceRange);


		result = cmdBuffer->endCommandBuffer();

		if (result != VK_SUCCESS)
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not end command buffer.");

			return VK_FALSE;
		}


		VkSubmitInfo submitInfo;

		memset(&submitInfo, 0, sizeof(VkSubmitInfo));

		submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;

		submitInfo.waitSemaphoreCount = 0;
		submitInfo.pWaitSemaphores = nullptr;
		submitInfo.commandBufferCount = 1;
		submitInfo.pCommandBuffers = cmdBuffer->getCommandBuffers();
		submitInfo.signalSemaphoreCount = 0;
		submitInfo.pSignalSemaphores = nullptr;

		result = queue->submit(1, &submitInfo, VK_NULL_HANDLE);

		if (result != VK_SUCCESS)
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not submit queue.");

			return vkts::IImageDataSP();
		}

		result = queue->waitIdle();

		if (result != VK_SUCCESS)
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not wait for idle queue.");

			return vkts::IImageDataSP();
		}

		//
		// Copy pixel data from device memory into image data memory.
		//

		VkSubresourceLayout subresourceLayout;

		subresourceLayout.offset = 0;
		subresourceLayout.size = stageBuffer->getSize();
		subresourceLayout.rowPitch = VKTS_IMAGE_LENGTH * 4 * sizeof(uint8_t);
		subresourceLayout.arrayPitch = VKTS_IMAGE_LENGTH * VKTS_IMAGE_LENGTH * 4 * sizeof(uint8_t);
		subresourceLayout.depthPitch = VKTS_IMAGE_LENGTH * VKTS_IMAGE_LENGTH * 4 * sizeof(uint8_t);

		result = stageDeviceMemory->mapMemory(0, stageDeviceMemory->getAllocationSize(), 0);

		if (result != VK_SUCCESS)
		{
			vkts::logPrint(VKTS_LOG_ERROR, "Example: Could not map memory.");

			return vkts::IImageDataSP();
		}

		imageData->upload(stageDeviceMemory->getMemory(), 0, subresourceLayout);

		stageDeviceMemory->unmapMemory();

		// Stage image and device memory are automatically destroyed.
	}

	//

	return imageData;
}
bool DirectXTextureBuilder::createNewTexture(IND_Surface *pNewSurface,
        IND_Image       *pImage,
        int             pBlockSizeX,
        int             pBlockSizeY) {

	//pType and pQuality are the requested texture parameters, not the actual image type.

	bool success = false;

	// ----- Check IND_Type and IND_Quality of the image and choose a D3D source and destination format -----
	// ----- Source and destination DirectX format ------
	D3DFORMAT mSrcFormat, mDstFormat;
		
	getSourceAndDestinationFormat(pImage,&mSrcFormat,&mDstFormat);  
	
	// ----- Cutting blocks -----

	// ----- Obtaining info in order to store the image -----

	INFO_SURFACE mI;
	_cutter->fillInfoSurface(pImage, &mI, pBlockSizeX, pBlockSizeY);

	// Fill attributes
	pNewSurface->_surface->_attributes._type			 = mI._type;
	pNewSurface->_surface->_attributes._quality			 = mI._quality;
	pNewSurface->_surface->_attributes._blocksX          = mI._blocksX;
	pNewSurface->_surface->_attributes._blocksY          = mI._blocksY;
	pNewSurface->_surface->_attributes._spareX           = mI._spareX;
	pNewSurface->_surface->_attributes._spareY           = mI._spareY;
	pNewSurface->_surface->_attributes._numBlocks        = mI._blocksX * mI._blocksY;
	pNewSurface->_surface->_attributes._numTextures      = mI._blocksX * mI._blocksY;
	pNewSurface->_surface->_attributes._isHaveGrid       = 0;
	pNewSurface->_surface->_attributes._widthBlock       = mI._widthBlock;
	pNewSurface->_surface->_attributes._heightBlock      = mI._heightBlock;
	pNewSurface->_surface->_attributes._width            = mI._widthImage;
	pNewSurface->_surface->_attributes._height           = mI._heightImage;
	pNewSurface->_surface->_attributes._isHaveSurface    = 1;

	// Allocate space for the vertex buffer
	// This buffer will be used for drawing the IND_Surface using DrawPrimitiveUp
	pNewSurface->_surface->_vertexArray = new CUSTOMVERTEX2D [mI._numVertices];

	// Each block, needs a texture. We use an array of textures in order to store them.
	pNewSurface->_surface->_texturesArray = new TEXTURE [mI._blocksX * mI._blocksY];

	// Current position of the vertex
	int mPosX = 0;
	int mPosY = mI._heightImage;
	int mPosZ = 0;

	// Position in wich we are storing a vertex
	int mPosVer = 0;

	// Position in wich we are storing a texture
	int mCont = 0;

	// Image pointer
	BYTE *mPtrBlock = pImage->getPointer();

	// Vars
	int mActualWidthBlockX (0);
	int mActualHeightBlockY (0);
	float mActualU (0.0f);
	float mActualV (0.0f);
	int mActualSpareX (0);
	int mActualSpareY (0);
	int mSrcBytespp = pImage->getBytespp();

	// ----- Cutting blocks -----

	// We iterate the blocks starting from the lower row
	// We MUST draw the blocks in this order, because the image starts drawing from the lower-left corner
	//LOOP - All blocks (Y coords)
	for (int i = mI._blocksY; i > 0; i--) {
		//LOOP - All blocks (X coords)
		for (int j = 1; j < mI._blocksX + 1; j++) {
			// ----- Vertices position of the block -----

			// There are 4 types of blocks: the ones of the right column, the ones of the upper row,
			// the one of the upper-right corner and the rest of blocks.
			// Depending on the block, we store the vertices one way or another.

			// Normal block
			if (i != 1 && j !=  mI._blocksX) {
				mActualWidthBlockX  = mI._widthBlock;
				mActualHeightBlockY = mI._heightBlock;
				mActualU            = 1.0f;
				mActualV            = 1.0f;
				mActualSpareX       = 0;
				mActualSpareY       = 0;
			}

			// The ones of the right column
			if (i != 1 && j ==  mI._blocksX) {
				mActualWidthBlockX  = mI._widthSpareImage;
				mActualHeightBlockY = mI._heightBlock;
				mActualU            = (float) mI._widthSpareImage / mI._widthBlock;
				mActualV            = 1.0f;
				mActualSpareX       = mI._spareX;
				mActualSpareY       = 0;
			}

			// The ones of the upper row
			if (i == 1 && j !=  mI._blocksX) {
				mActualWidthBlockX  = mI._widthBlock;
				mActualHeightBlockY = mI._heightSpareImage;
				mActualU            = 1.0f;
				mActualV            = (float) mI._heightSpareImage / mI._heightBlock;
				mActualSpareX       = 0;
				mActualSpareY       = mI._spareY;
			}

			// The one of the upper-right corner
			if (i == 1 && j ==  mI._blocksX) {
				mActualWidthBlockX  = mI._widthSpareImage;
				mActualHeightBlockY = mI._heightSpareImage;
				mActualU            = (float) mI._widthSpareImage / mI._widthBlock;
				mActualV            = (float) mI._heightSpareImage / mI._heightBlock;
				mActualSpareX       = mI._spareX;
				mActualSpareY       = mI._spareY;
			}

			// ----- Block creation (using the position, uv coordiantes and texture) -----

			// We push into the buffer the 4 vertices of the block
			push4Vertices(pNewSurface->_surface->_vertexArray,           // Pointer to the buffer
			              mPosVer,                                    // Position in wich we are storing a vertex
			              mPosX,                                      // x
			              mPosY,                                      // y
			              mPosZ,                                      // z
			              mActualWidthBlockX,                         // Block width
			              mActualHeightBlockY,                        // Block height
			              mActualU,                                   // U mapping coordinate
			              mActualV);                                  // V mapping coordinate

			// Cuts a block from the image (bitmap)
			BYTE *mTempBlock = 0;
			_cutter->cutBlock(mPtrBlock,
			                  mI._widthImage,
			                  mI._widthBlock,
			                  mI._heightBlock,
			                  mActualSpareX,
			                  mActualSpareY,
			                  mSrcBytespp,
			                  &mTempBlock);

			// We create a texture using the cut bitmap block
			pNewSurface->_surface->_texturesArray [mCont]._texture = createTexture(mTempBlock,
			        mI._widthBlock,
			        mI._heightBlock,
			        mSrcBytespp,
			        mSrcFormat,
			        mDstFormat);

			// Free the bitmap cutted block
			DISPOSEARRAY(mTempBlock);

			// ----- Advance -----

			// Increase in 4 vertices the position (we have already stored a quad)
			mPosVer += 4;

			// Increase the texture counter (we have alread stored one texture)
			mCont++;

			// ----- Column change -----

			// We point to the next block (memory and screen)
			mPosX += mI._widthBlock;
			mPtrBlock += mI._widthBlock * mSrcBytespp;
		}//LOOP END - All blocks (Y coords)

		// ----- Row change -----

		// We point to the next block  (memory and screen)
		mPosX = 0;
		mPtrBlock -= mI._spareX * mSrcBytespp;

		// If this block is in the last row, we take in count the spare areas.
		if (i == 1) {
			mPosY -= mI._spareY;
			mPtrBlock += (mI._widthImage * mSrcBytespp) * (mI._spareY - 1);
		} else {
			mPosY -= mI._heightBlock;
			mPtrBlock += (mI._widthImage * mSrcBytespp) * (mI._heightBlock - 1);
		}
	}//LOOP END - All blocks (Y coords)

	success = true;

	return success;
}
Пример #14
0
void SDLFontGL::drawTextGL(TextGraphicsInfo_t & graphicsInfo,
													 int nX, int nY, const char * text) {
	if (!GlyphCache) createTexture();

	int fnt = graphicsInfo.font;
	int fg = graphicsInfo.fg;
	int bg = graphicsInfo.bg;
	int blink = graphicsInfo.blink;

	assert(fnt >= 0 && fnt < nFonts);
	assert(fg >= 0 && fg < nCols);
	assert(bg >= 0 && bg < nCols);
	assert(fnts && cols && GlyphCache);

	unsigned len = strlen(text);

	// Ensure we have the needed font/slots:
	ensureCacheLine(fnt, graphicsInfo.slot1);
	ensureCacheLine(fnt, graphicsInfo.slot2);

	const int stride = 12; // GL_TRIANGLE_STRIP 2*6

	drawBackground(bg, nX, nY, len);

	if (blink) return;

	GLfloat *tex = &texValues[stride*numChars];
	GLfloat *vtx = &vtxValues[stride*numChars];
	GLfloat *clrs = &colorValues[2*stride*numChars];
	numChars += len;

	float x_scale = ((float)nWidth) / (float)texW;
	float y_scale = ((float)nHeight) / (float)texH;
	GLfloat texCopy[] = {
		0.0, 0.0,
		0.0, y_scale,
		x_scale, 0.0,
		0.0, y_scale,
		x_scale, 0.0,
		x_scale, y_scale
	};
	GLfloat vtxCopy[] = {
		nX, nY,
		nX, nY + nHeight,
		nX + nWidth, nY,
		nX, nY + nHeight,
		nX + nWidth, nY,
		nX + nWidth, nY + nHeight
	};
	SDL_Color fgc = cols[fg];
	GLfloat colorCopy[] = {
			((float)fgc.r)/255.f,
			((float)fgc.g)/255.f,
			((float)fgc.b)/255.f,
			1.f,
			((float)fgc.r)/255.f,
			((float)fgc.g)/255.f,
			((float)fgc.b)/255.f,
			1.f,
			((float)fgc.r)/255.f,
			((float)fgc.g)/255.f,
			((float)fgc.b)/255.f,
			1.f,
			((float)fgc.r)/255.f,
			((float)fgc.g)/255.f,
			((float)fgc.b)/255.f,
			1.f,
			((float)fgc.r)/255.f,
			((float)fgc.g)/255.f,
			((float)fgc.b)/255.f,
			1.f,
			((float)fgc.r)/255.f,
			((float)fgc.g)/255.f,
			((float)fgc.b)/255.f,
			1.f
	};

	for (unsigned i = 0; i < len; ++i)
	{
		// Populate texture coordinates
		memcpy(&tex[i*stride],texCopy,sizeof(texCopy));

		char c = text[i];

		int x,y;
		getTextureCoordinates(graphicsInfo, c, x, y);

		float x_offset = ((float)x) / texW;
		float y_offset = ((float)y) / texH;

		for(unsigned j = 0; j < stride; j += 2) {
			tex[i*stride+j] += x_offset;
			tex[i*stride+j+1] += y_offset;
		}

		// Populate vertex coordinates
		memcpy(&vtx[i*stride],vtxCopy,sizeof(vtxCopy));
		for(unsigned j = 0; j < stride; j += 2) {
			vtxCopy[j] += nWidth;
		}

		// Populate color coodinates
		memcpy(&clrs[i*2*stride], colorCopy, sizeof(colorCopy));
	}
}
Пример #15
0
Texture::Texture(const char *filename,
		FilterType minFilter, FilterType magFilter,
		WrapMode wrap)
{
#if defined (PNG_LOADER_LIBPNG)
	m_filename = strdup(filename);
	m_minFilter = minFilter;
	m_magFilter = magFilter;

	m_wrapMode = wrap;

	m_image = 0;
	m_handle = 0;

	m_isReady = false;

	// Try to read in the image file
	FILE *infile = fopen(filename, "r");
	if (!infile)
	{
		return;
	}

	Image::PNGDecoder decoder;
	if ( !decoder.checkSig(infile) )
	{
		fprintf(stderr, "ERROR! Texture file not a png\n");
		fclose(infile);
		return;
	}

	void *m_image = decoder.decode(infile, m_width, m_height, m_nChannels, m_bitDepth, m_rowBytes);

	if ( !m_image )
	{
		return;
	}

	// Upload the texture to the GL context
	glGenTextures(1, &m_handle);
	if ( isGLError() || (0==m_handle) )
	{
		return;
	}
	glBindTexture(GL_TEXTURE_2D, m_handle);

	if ( isGLError() )
	{
		fprintf(stderr, "ERROR!! 1\n");
		return;
	}

	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, m_wrapMode);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, m_wrapMode);

	if ( isGLError() )
	{
		fprintf(stderr, "ERROR!! 1\n");
		return;
	}

	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, m_minFilter);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, m_magFilter);

	if ( isGLError() )
	{
		fprintf(stderr, "ERROR!! 1\n");
		return;
	}

	// Each row of image data is 4-byte aligned.
	glPixelStorei(GL_UNPACK_ALIGNMENT, 4);

	if ( isGLError() )
	{
		fprintf(stderr, "ERROR!! 1\n");
		return;
	}

	//fprintf(stderr, "Channels = %u\nwidth = %u\nheight = %u\nbitdepth = %u\n", m_nChannels, m_width, m_height, m_bitDepth);

	glTexImage2D(GL_TEXTURE_2D, 0,
			(m_nChannels==1)?GL_RED:GL_RGB8,
			m_width, m_height, 0,
			(m_nChannels==1)?GL_RED:GL_RGB,
			(m_bitDepth==8)?GL_UNSIGNED_BYTE:GL_UNSIGNED_SHORT,
			m_image);

	if ( isGLError() )
	{
		fprintf(stderr, "ERROR!! 1\n");
		return;
	}

	m_isReady = glIsTexture(m_handle) == GL_TRUE;
#elif defined (PNG_LOADER_LODEPNG)
	m_handle = createTexture(filename);
	m_isReady = glIsTexture(m_handle) == GL_TRUE;
#endif
}