Ejemplo n.º 1
0
void GrManager::getTextureVolumeUploadInfo(TexturePtr tex, const TextureVolumeInfo& vol, PtrSize& allocationSize)
{
	const TextureImpl& impl = *tex->m_impl;
	impl.checkVolume(vol);

	U width = impl.m_width >> vol.m_level;
	U height = impl.m_height >> vol.m_level;
	U depth = impl.m_depth >> vol.m_level;

	if(!impl.m_workarounds)
	{
		allocationSize = computeVolumeSize(width, height, depth, impl.m_format);
	}
	else if(!!(impl.m_workarounds & TextureImplWorkaround::R8G8B8_TO_R8G8B8A8))
	{
		// Extra size for staging buffer
		allocationSize =
			computeVolumeSize(width, height, depth, PixelFormat(ComponentFormat::R8G8B8, TransformFormat::UNORM));
		alignRoundUp(16, allocationSize);
		allocationSize +=
			computeVolumeSize(width, height, depth, PixelFormat(ComponentFormat::R8G8B8A8, TransformFormat::UNORM));
	}
	else
	{
		ANKI_ASSERT(0);
	}
}
Ejemplo n.º 2
0
//==============================================================================
void RenderableDrawer::setupUniforms(
	VisibleNode& visibleNode, 
	RenderComponent& renderable,
	FrustumComponent& fr,
	F32 flod)
{
	const Material& mtl = renderable.getMaterial();
	U blockSize = mtl.getDefaultBlockSize();
	U8* persistent = (U8*)m_uniformBuff.getPersistentMappingAddress();

	// Find a place to write the uniforms
	//
	U8* prevUniformPtr = m_uniformPtr;
	alignRoundUp(GlDeviceSingleton::get().getBufferOffsetAlignment(
		m_uniformBuff.getTarget()), m_uniformPtr);
	U diff = m_uniformPtr - prevUniformPtr;

	if(m_uniformPtr + blockSize >= persistent + m_uniformBuff.getSize())
	{
		// Rewind
		m_uniformPtr = persistent;
		diff = 0;
	}

	// Call the visitor
	//
	SetupRenderableVariableVisitor vis;
	
	vis.m_visibleNode = &visibleNode;
	vis.m_renderable = &renderable;
	vis.m_fr = &fr;
	vis.m_drawer = this;
	vis.m_instanceCount = visibleNode.m_spatialsCount;
	vis.m_jobs = m_jobs;
	vis.m_flod = flod;

	for(auto it = renderable.getVariablesBegin();
		it != renderable.getVariablesEnd(); ++it)
	{
		RenderComponentVariable* rvar = *it;

		vis.m_rvar = rvar;
		rvar->acceptVisitor(vis);
	}

	// Update the uniform descriptor
	//
	m_uniformBuff.bindShaderBuffer(
		m_jobs, 
		m_uniformPtr - persistent,
		mtl.getDefaultBlockSize(),
		0);

	// Advance the uniform ptr
	m_uniformPtr += blockSize;
	m_uniformsUsedSize += blockSize + diff;
}
Ejemplo n.º 3
0
//==============================================================================
void Ssao::initInternal(const ConfigSet& config)
{
	m_enabled = config.get("pps.ssao.enabled");

	if(!m_enabled)
	{
		return;
	}

	m_blurringIterationsCount = 
		config.get("pps.ssao.blurringIterationsCount");

	//
	// Init the widths/heights
	//
	const F32 quality = config.get("pps.ssao.renderingQuality");

	m_width = quality * (F32)m_r->getWidth();
	alignRoundUp(16, m_width);
	m_height = quality * (F32)m_r->getHeight();
	alignRoundUp(16, m_height);

	//
	// create FBOs
	//
	createFb(m_hblurFb, m_hblurRt);
	createFb(m_vblurFb, m_vblurRt);

	//
	// noise texture
	//
	GlCommandBufferHandle cmdb;
	cmdb.create(&getGlDevice());

	GlClientBufferHandle noise;
	noise.create(
		cmdb, sizeof(Vec3) * NOISE_TEX_SIZE * NOISE_TEX_SIZE, nullptr);

	genNoise((Vec3*)noise.getBaseAddress(), 
		(Vec3*)((U8*)noise.getBaseAddress() + noise.getSize()));

	GlTextureHandle::Initializer tinit;

	tinit.m_width = tinit.m_height = NOISE_TEX_SIZE;
	tinit.m_target = GL_TEXTURE_2D;
	tinit.m_internalFormat = GL_RGB32F;
	tinit.m_format = GL_RGB;
	tinit.m_type = GL_FLOAT;
	tinit.m_filterType = GlTextureHandle::Filter::NEAREST;
	tinit.m_repeat = true;
	tinit.m_mipmapsCount = 1;
	tinit.m_data[0][0] = noise;

	m_noiseTex.create(cmdb, tinit);

	//
	// Kernel
	//
	String kernelStr(getAllocator());
	Array<Vec3, KERNEL_SIZE> kernel;

	genKernel(kernel.begin(), kernel.end());
	kernelStr = "vec3[](";
	for(U i = 0; i < kernel.size(); i++)
	{
		String tmp(getAllocator());

		tmp.sprintf("vec3(%f, %f, %f) %s",
			kernel[i].x(), kernel[i].y(), kernel[i].z(),
			(i != kernel.size() - 1) ? ", " : ")");

		kernelStr += tmp;
	}

	//
	// Shaders
	//
	m_uniformsBuff.create(cmdb, GL_SHADER_STORAGE_BUFFER, 
		sizeof(ShaderCommonUniforms), GL_DYNAMIC_STORAGE_BIT);

	String pps(getAllocator());

	// main pass prog
	pps.sprintf(
		"#define NOISE_MAP_SIZE %u\n"
		"#define WIDTH %u\n"
		"#define HEIGHT %u\n"
		"#define KERNEL_SIZE %u\n"
		"#define KERNEL_ARRAY %s\n",
		NOISE_TEX_SIZE, m_width, m_height, KERNEL_SIZE, &kernelStr[0]);

	m_ssaoFrag.loadToCache(&getResourceManager(),
		"shaders/PpsSsao.frag.glsl", pps.toCString(), "r_");


	m_ssaoPpline = m_r->createDrawQuadProgramPipeline(
		m_ssaoFrag->getGlProgram());

	// blurring progs
	const char* SHADER_FILENAME = 
		"shaders/VariableSamplingBlurGeneric.frag.glsl";

	pps.sprintf(
		"#define HPASS\n"
		"#define COL_R\n"
		"#define IMG_DIMENSION %u\n"
		"#define SAMPLES 7\n", 
		m_height);

	m_hblurFrag.loadToCache(&getResourceManager(),
		SHADER_FILENAME, pps.toCString(), "r_");

	m_hblurPpline = m_r->createDrawQuadProgramPipeline(
		m_hblurFrag->getGlProgram());

	pps.sprintf(
		"#define VPASS\n"
		"#define COL_R\n"
		"#define IMG_DIMENSION %u\n"
		"#define SAMPLES 7\n", 
		m_width);

	m_vblurFrag.loadToCache(&getResourceManager(),
		SHADER_FILENAME, pps.toCString(), "r_");

	m_vblurPpline = m_r->createDrawQuadProgramPipeline(
		m_vblurFrag->getGlProgram());

	cmdb.flush();
}
Ejemplo n.º 4
0
Error BufferImpl::init(const BufferInitInfo& inf)
{
	ANKI_ASSERT(!isCreated());

	PtrSize size = inf.m_size;
	BufferMapAccessBit access = inf.m_access;
	BufferUsageBit usage = inf.m_usage;

	ANKI_ASSERT(size > 0);
	ANKI_ASSERT(usage != BufferUsageBit::NONE);

	// Align the size to satisfy fill buffer
	alignRoundUp(4, size);

	// Create the buffer
	VkBufferCreateInfo ci = {};
	ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
	ci.size = size;
	ci.usage = convertBufferUsageBit(usage);
	ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
	ci.queueFamilyIndexCount = 1;
	U32 queueIdx = getGrManagerImpl().getGraphicsQueueFamily();
	ci.pQueueFamilyIndices = &queueIdx;
	ANKI_VK_CHECK(vkCreateBuffer(getDevice(), &ci, nullptr, &m_handle));
	getGrManagerImpl().trySetVulkanHandleName(inf.getName(), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, m_handle);

	// Get mem requirements
	VkMemoryRequirements req;
	vkGetBufferMemoryRequirements(getDevice(), m_handle, &req);
	U memIdx = MAX_U32;

	if(access == BufferMapAccessBit::WRITE)
	{
		// Only write, probably for uploads

		VkMemoryPropertyFlags preferDeviceLocal;
		VkMemoryPropertyFlags avoidDeviceLocal;
		if((usage & (~BufferUsageBit::TRANSFER_ALL)) != BufferUsageBit::NONE)
		{
			// Will be used for something other than transfer, try to put it in the device
			preferDeviceLocal = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
			avoidDeviceLocal = 0;
		}
		else
		{
			// Will be used only for transfers, don't want it in the device
			preferDeviceLocal = 0;
			avoidDeviceLocal = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
		}

		// Device & host & coherent but not cached
		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits,
			VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | preferDeviceLocal,
			VK_MEMORY_PROPERTY_HOST_CACHED_BIT | avoidDeviceLocal);

		// Fallback: host & coherent and not cached
		if(memIdx == MAX_U32)
		{
			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits,
				VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
				VK_MEMORY_PROPERTY_HOST_CACHED_BIT | avoidDeviceLocal);
		}

		// Fallback: just host
		if(memIdx == MAX_U32)
		{
			ANKI_VK_LOGW("Using a fallback mode for write-only buffer");
			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
				req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0);
		}
	}
	else if((access & BufferMapAccessBit::READ) != BufferMapAccessBit::NONE)
	{
		// Read or read/write

		// Cached & coherent
		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits,
			VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT
				| VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
			0);

		// Fallback: Just cached
		if(memIdx == MAX_U32)
		{
			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
				req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, 0);
		}

		// Fallback: Just host
		if(memIdx == MAX_U32)
		{
			ANKI_VK_LOGW("Using a fallback mode for read/write buffer");
			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
				req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0);
		}
	}
	else
	{
		// Not mapped

		ANKI_ASSERT(access == BufferMapAccessBit::NONE);

		// Device only
		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
			req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);

		// Fallback: Device with anything else
		if(memIdx == MAX_U32)
		{
			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
				req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, 0);
		}
	}

	ANKI_ASSERT(memIdx != MAX_U32);

	const VkPhysicalDeviceMemoryProperties& props = getGrManagerImpl().getMemoryProperties();
	m_memoryFlags = props.memoryTypes[memIdx].propertyFlags;

	// Allocate
	getGrManagerImpl().getGpuMemoryManager().allocateMemory(memIdx, req.size, req.alignment, true, m_memHandle);

	// Bind mem to buffer
	{
		ANKI_TRACE_SCOPED_EVENT(VK_BIND_OBJECT);
		ANKI_VK_CHECK(vkBindBufferMemory(getDevice(), m_handle, m_memHandle.m_memory, m_memHandle.m_offset));
	}

	m_access = access;
	m_size = inf.m_size;
	m_actualSize = size;
	m_usage = usage;
	return Error::NONE;
}