//==============================================================================
CommandBufferImpl::~CommandBufferImpl()
{
	if(m_empty)
	{
		ANKI_LOGW("Command buffer was empty");
	}

	if(!m_finalized)
	{
		ANKI_LOGW("Command buffer was not flushed");
	}

	if(m_handle)
	{
		Bool secondLevel = (m_flags & CommandBufferFlag::SECOND_LEVEL)
			== CommandBufferFlag::SECOND_LEVEL;
		getGrManagerImpl().deleteCommandBuffer(m_handle, secondLevel, m_tid);
	}

	m_pplineList.destroy(m_alloc);
	m_fbList.destroy(m_alloc);
	m_rcList.destroy(m_alloc);
	m_texList.destroy(m_alloc);
	m_queryList.destroy(m_alloc);
}
Esempio n. 2
0
QueryAllocator::~QueryAllocator()
{
	if(!m_chunks.isEmpty())
	{
		ANKI_LOGW("Forgot the delete some queries");
	}
}
//==============================================================================
void CommandBufferImpl::destroy()
{
	ANKI_TRACE_START_EVENT(GL_CMD_BUFFER_DESTROY);

#if ANKI_DEBUG
	if(!m_executed && m_firstCommand)
	{
		ANKI_LOGW("Chain contains commands but never executed. "
				  "This should only happen on exceptions");
	}
#endif

	GlCommand* command = m_firstCommand;
	while(command != nullptr)
	{
		GlCommand* next = command->m_nextCommand; // Get next before deleting
		m_alloc.deleteInstance(command);
		command = next;
	}

	ANKI_ASSERT(m_alloc.getMemoryPool().getUsersCount() == 1
		&& "Someone is holding a reference to the command buffer's allocator");

	m_alloc = CommandBufferAllocator<U8>();

	ANKI_TRACE_STOP_EVENT(GL_CMD_BUFFER_DESTROY);
}
Esempio n. 4
0
//==============================================================================
void RenderingThread::flushCommandBuffer(CommandBufferPtr cmdb)
{
	cmdb->getImplementation().makeImmutable();

	{
		LockGuard<Mutex> lock(m_mtx);

		// Set commands
		U64 diff = m_tail - m_head;

		if(diff < m_queue.getSize())
		{
			U64 idx = m_tail % m_queue.getSize();

			m_queue[idx] = cmdb;
			++m_tail;
		}
		else
		{
			ANKI_LOGW("Rendering queue too small");
		}

		m_condVar.notifyOne(); // Wake the thread
	}
}
Esempio n. 5
0
//==============================================================================
Threadpool::Threadpool(U32 threadsCount)
#if !ANKI_DISABLE_THREADPOOL_THREADING
:	m_barrier(threadsCount + 1)
#endif
{
	m_threadsCount = threadsCount;
	ANKI_ASSERT(m_threadsCount <= MAX_THREADS && m_threadsCount > 0);

#if ANKI_DISABLE_THREADPOOL_THREADING
	ANKI_LOGW("Threadpool works in synchronous mode");
#else
	m_threads = reinterpret_cast<detail::ThreadpoolThread*>(
		malloc(sizeof(detail::ThreadpoolThread) * m_threadsCount));

	if(m_threads == nullptr)
	{
		ANKI_LOGF("Out of memory");
	}

	while(threadsCount-- != 0)
	{
		construct(&m_threads[threadsCount], threadsCount, this);
	}
#endif
}
Esempio n. 6
0
GpuBlockAllocator::~GpuBlockAllocator()
{
	if(m_freeBlockCount != m_blocks.getSize())
	{
		ANKI_LOGW("Forgot to free memory");
	}

	m_blocks.destroy(m_alloc);
	m_freeBlocksStack.destroy(m_alloc);
}
Esempio n. 7
0
//==============================================================================
void RenderableDrawer::finishDraw()
{
	// Release the job chain
	m_jobs = GlCommandBufferHandle();

	if(m_uniformsUsedSize > MAX_UNIFORM_BUFFER_SIZE / 3)
	{
		ANKI_LOGW("Increase the uniform buffer to avoid corruption");
	}
}
Esempio n. 8
0
//==============================================================================
void DebugDrawer::end()
{
	if(m_primitive == GL_LINES)
	{
		if(m_lineVertCount % 2 != 0)
		{
			pushBackVertex(Vec3(0.0));
			ANKI_LOGW("Forgot to close the line loop");
		}
	}
	else
	{
		if(m_triVertCount % 3 != 0)
		{
			pushBackVertex(Vec3(0.0));
			pushBackVertex(Vec3(0.0));
			ANKI_LOGW("Forgot to close the line loop");
		}
	}
}
Esempio n. 9
0
void GrManagerImpl::endFrame()
{
	LockGuard<Mutex> lock(m_globalMtx);

	PerFrame& frame = m_perFrame[m_frame % MAX_FRAMES_IN_FLIGHT];

	// Wait for the fence of N-2 frame
	U waitFrameIdx = (m_frame + 1) % MAX_FRAMES_IN_FLIGHT;
	PerFrame& waitFrame = m_perFrame[waitFrameIdx];
	if(waitFrame.m_presentFence)
	{
		waitFrame.m_presentFence->wait();
	}

	resetFrame(waitFrame);

	if(!frame.m_renderSemaphore)
	{
		ANKI_LOGW("Nobody draw to the default framebuffer");
	}

	// Present
	VkResult res;
	VkPresentInfoKHR present = {};
	present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
	present.waitSemaphoreCount = (frame.m_renderSemaphore) ? 1 : 0;
	present.pWaitSemaphores = (frame.m_renderSemaphore) ? &frame.m_renderSemaphore->getHandle() : nullptr;
	present.swapchainCount = 1;
	present.pSwapchains = &m_swapchain;
	present.pImageIndices = &m_crntBackbufferIdx;
	present.pResults = &res;

	ANKI_VK_CHECKF(vkQueuePresentKHR(m_queue, &present));
	ANKI_VK_CHECKF(res);

	m_transientMem.endFrame();

	// Finalize
	++m_frame;
}
Esempio n. 10
0
//==============================================================================
void BufferImpl::init(
	PtrSize size, BufferUsageBit usage, BufferAccessBit access)
{
	ANKI_ASSERT(!isCreated());
	m_usage = usage;
	m_access = access;

	///
	// Check size
	//

	ANKI_ASSERT(size > 0 && "Unacceptable size");

	// This is a guess, not very important since DSA doesn't care about it on
	// creation
	m_target = GL_ARRAY_BUFFER;

	if((usage & BufferUsageBit::UNIFORM) != BufferUsageBit::NONE)
	{
		GLint64 maxBufferSize;
		glGetInteger64v(GL_MAX_UNIFORM_BLOCK_SIZE, &maxBufferSize);

		if(size > 16384)
		{
			ANKI_LOGW("The size (%u) of the uniform buffer is greater "
					  "than the spec's min",
				size);
		}
		else if(size > PtrSize(maxBufferSize))
		{
			ANKI_LOGW("The size (%u) of the uniform buffer is greater "
					  "than the implementation's min (%u)",
				size,
				maxBufferSize);
		}

		m_target = GL_UNIFORM_BUFFER;
	}

	if((usage & BufferUsageBit::STORAGE) != BufferUsageBit::NONE)
	{
		GLint64 maxBufferSize;
		glGetInteger64v(GL_MAX_SHADER_STORAGE_BLOCK_SIZE, &maxBufferSize);

		if(size > pow(2, 24))
		{
			ANKI_LOGW("The size (%u) of the uniform buffer is greater "
					  "than the spec's min",
				size);
		}
		else if(size > PtrSize(maxBufferSize))
		{
			ANKI_LOGW("The size (%u) of the shader storage buffer is greater "
					  "than the implementation's min (%u)",
				size,
				maxBufferSize);
		}

		m_target = GL_SHADER_STORAGE_BUFFER;
	}

	m_size = size;

	//
	// Determine the creation flags
	//
	GLbitfield flags = 0;
	Bool shouldMap = false;
	if((access & BufferAccessBit::CLIENT_WRITE) != BufferAccessBit::NONE)
	{
		flags |= GL_DYNAMIC_STORAGE_BIT;
	}

	if((access & BufferAccessBit::CLIENT_MAP_WRITE) != BufferAccessBit::NONE)
	{
		flags |= GL_MAP_WRITE_BIT;
		flags |= GL_MAP_PERSISTENT_BIT;
		flags |= GL_MAP_COHERENT_BIT;

		shouldMap = true;
	}

	if((access & BufferAccessBit::CLIENT_MAP_READ) != BufferAccessBit::NONE)
	{
		flags |= GL_MAP_READ_BIT;
		flags |= GL_MAP_PERSISTENT_BIT;
		flags |= GL_MAP_COHERENT_BIT;

		shouldMap = true;
	}

	//
	// Create
	//
	glGenBuffers(1, &m_glName);
	glBindBuffer(m_target, m_glName);
	glBufferStorage(m_target, size, nullptr, flags);

	//
	// Map
	//
	if(shouldMap)
	{
		const GLbitfield MAP_BITS = GL_MAP_READ_BIT | GL_MAP_WRITE_BIT
			| GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT;

		m_persistentMapping =
			glMapBufferRange(m_target, 0, size, flags & MAP_BITS);
		ANKI_ASSERT(m_persistentMapping != nullptr);
	}
}
Esempio n. 11
0
//==============================================================================
static ANKI_USE_RESULT Error loadAnkiTexture(ResourceFilePtr file,
	U32 maxTextureSize,
	ImageLoader::DataCompression& preferredCompression,
	DynamicArray<ImageLoader::Surface>& surfaces,
	GenericMemoryPoolAllocator<U8>& alloc,
	U8& depthOrLayerCount,
	U8& mipLevels,
	ImageLoader::TextureType& textureType,
	ImageLoader::ColorFormat& colorFormat)
{
	//
	// Read and check the header
	//
	AnkiTextureHeader header;
	ANKI_CHECK(file->read(&header, sizeof(AnkiTextureHeader)));

	if(std::memcmp(&header.m_magic[0], "ANKITEX1", 8) != 0)
	{
		ANKI_LOGE("Wrong magic word");
		return ErrorCode::USER_DATA;
	}

	if(header.m_width == 0 || !isPowerOfTwo(header.m_width)
		|| header.m_width > 4096
		|| header.m_height == 0
		|| !isPowerOfTwo(header.m_height)
		|| header.m_height > 4096)
	{
		ANKI_LOGE("Incorrect width/height value");
		return ErrorCode::USER_DATA;
	}

	if(header.m_depthOrLayerCount < 1 || header.m_depthOrLayerCount > 128)
	{
		ANKI_LOGE("Zero or too big depth or layerCount");
		return ErrorCode::USER_DATA;
	}

	if(header.m_type < ImageLoader::TextureType::_2D
		|| header.m_type > ImageLoader::TextureType::_2D_ARRAY)
	{
		ANKI_LOGE("Incorrect header: texture type");
		return ErrorCode::USER_DATA;
	}

	if(header.m_colorFormat < ImageLoader::ColorFormat::RGB8
		|| header.m_colorFormat > ImageLoader::ColorFormat::RGBA8)
	{
		ANKI_LOGE("Incorrect header: color format");
		return ErrorCode::USER_DATA;
	}

	if((header.m_compressionFormats & preferredCompression)
		== ImageLoader::DataCompression::NONE)
	{
		ANKI_LOGW("File does not contain the requested compression");

		// Fallback
		preferredCompression = ImageLoader::DataCompression::RAW;

		if((header.m_compressionFormats & preferredCompression)
			== ImageLoader::DataCompression::NONE)
		{
			ANKI_LOGE("File does not contain raw compression");
			return ErrorCode::USER_DATA;
		}
	}

	if(header.m_normal != 0 && header.m_normal != 1)
	{
		ANKI_LOGE("Incorrect header: normal");
		return ErrorCode::USER_DATA;
	}

	// Check mip levels
	U size = min(header.m_width, header.m_height);
	U maxsize = max(header.m_width, header.m_height);
	mipLevels = 0;
	U tmpMipLevels = 0;
	while(size >= 4) // The minimum size is 4x4
	{
		++tmpMipLevels;

		if(maxsize <= maxTextureSize)
		{
			++mipLevels;
		}

		size /= 2;
		maxsize /= 2;
	}

	if(header.m_mipLevels > tmpMipLevels)
	{
		ANKI_LOGE("Incorrect number of mip levels");
		return ErrorCode::USER_DATA;
	}

	mipLevels = min<U>(mipLevels, header.m_mipLevels);

	colorFormat = header.m_colorFormat;

	switch(header.m_type)
	{
	case ImageLoader::TextureType::_2D:
		depthOrLayerCount = 1;
		break;
	case ImageLoader::TextureType::CUBE:
		depthOrLayerCount = 6;
		break;
	case ImageLoader::TextureType::_3D:
	case ImageLoader::TextureType::_2D_ARRAY:
		depthOrLayerCount = header.m_depthOrLayerCount;
		break;
	default:
		ANKI_ASSERT(0);
	}

	textureType = header.m_type;

	//
	// Move file pointer
	//

	if(preferredCompression == ImageLoader::DataCompression::RAW)
	{
		// Do nothing
	}
	else if(preferredCompression == ImageLoader::DataCompression::S3TC)
	{
		if((header.m_compressionFormats & ImageLoader::DataCompression::RAW)
			!= ImageLoader::DataCompression::NONE)
		{
			// If raw compression is present then skip it
			ANKI_CHECK(file->seek(
				calcSizeOfSegment(header, ImageLoader::DataCompression::RAW),
				ResourceFile::SeekOrigin::CURRENT));
		}
	}
	else if(preferredCompression == ImageLoader::DataCompression::ETC)
	{
		if((header.m_compressionFormats & ImageLoader::DataCompression::RAW)
			!= ImageLoader::DataCompression::NONE)
		{
			// If raw compression is present then skip it
			ANKI_CHECK(file->seek(
				calcSizeOfSegment(header, ImageLoader::DataCompression::RAW),
				ResourceFile::SeekOrigin::CURRENT));
		}

		if((header.m_compressionFormats & ImageLoader::DataCompression::S3TC)
			!= ImageLoader::DataCompression::NONE)
		{
			// If s3tc compression is present then skip it
			ANKI_CHECK(file->seek(
				calcSizeOfSegment(header, ImageLoader::DataCompression::S3TC),
				ResourceFile::SeekOrigin::CURRENT));
		}
	}

	//
	// It's time to read
	//

	// Allocate the surfaces
	surfaces.create(alloc, mipLevels * depthOrLayerCount);

	// Read all surfaces
	U mipWidth = header.m_width;
	U mipHeight = header.m_height;
	U index = 0;
	for(U mip = 0; mip < header.m_mipLevels; mip++)
	{
		for(U d = 0; d < depthOrLayerCount; d++)
		{
			U dataSize = calcSurfaceSize(mipWidth,
				mipHeight,
				preferredCompression,
				header.m_colorFormat);

			// Check if this mipmap can be skipped because of size
			if(max(mipWidth, mipHeight) <= maxTextureSize)
			{
				ImageLoader::Surface& surf = surfaces[index++];
				surf.m_width = mipWidth;
				surf.m_height = mipHeight;

				surf.m_data.create(alloc, dataSize);

				ANKI_CHECK(file->read(&surf.m_data[0], dataSize));
			}
			else
			{
				ANKI_CHECK(
					file->seek(dataSize, ResourceFile::SeekOrigin::CURRENT));
			}
		}

		mipWidth /= 2;
		mipHeight /= 2;
	}

	return ErrorCode::NONE;
}
Esempio n. 12
0
//==============================================================================
void GlBuffer::create(GLenum target, U32 sizeInBytes,
	const void* dataPtr, GLbitfield flags)
{
	ANKI_ASSERT(!isCreated());

	if(target == GL_UNIFORM_BUFFER)
	{
		GLint64 maxBufferSize;
		glGetInteger64v(GL_MAX_UNIFORM_BLOCK_SIZE, &maxBufferSize);

		if(sizeInBytes > 16384)
		{
			ANKI_LOGW("The size (%u) of the uniform buffer is greater "
				"than the spec's min", sizeInBytes);
		} 
		else if(sizeInBytes > (PtrSize)maxBufferSize)
		{
			ANKI_LOGW("The size (%u) of the uniform buffer is greater "
				"than the implementation's min (%u)", sizeInBytes, 
				maxBufferSize);
		}
	}
	else if(target == GL_SHADER_STORAGE_BUFFER)
	{
		GLint64 maxBufferSize;
		glGetInteger64v(GL_MAX_SHADER_STORAGE_BLOCK_SIZE, &maxBufferSize);

		if(sizeInBytes > pow(2, 24))
		{
			ANKI_LOGW("The size (%u) of the uniform buffer is greater "
				"than the spec's min", sizeInBytes);
		} 
		else if(sizeInBytes > (PtrSize)maxBufferSize)
		{
			ANKI_LOGW("The size (%u) of the shader storage buffer is greater "
				"than the implementation's min (%u)", sizeInBytes, 
				maxBufferSize);
		}
	}

	m_target = target;
	m_size = sizeInBytes;

	ANKI_ASSERT(m_size > 0 && "Unacceptable size");

	// Create
	glGenBuffers(1, &m_glName);

	glBindBuffer(m_target, m_glName);
	glBufferStorage(m_target, m_size, dataPtr, flags);

	// Map if needed
	if((flags & GL_MAP_PERSISTENT_BIT) && (flags & GL_MAP_COHERENT_BIT))
	{
		const GLbitfield mapbits = GL_MAP_READ_BIT | GL_MAP_WRITE_BIT 
			| GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT;

		m_persistentMapping = 
			glMapBufferRange(m_target, 0, sizeInBytes, flags & mapbits);
		ANKI_ASSERT(m_persistentMapping != nullptr);
	}
}