//============================================================================== Error Thread::join() { ANKI_ASSERT(m_started); // Wait thread WaitForSingleObject(m_impl, INFINITE); // Get return code DWORD exitCode = 0; BOOL ok = GetExitCodeThread(m_impl, &exitCode); if(!ok) { ANKI_LOGF("GetExitCodeThread() failed"); } // Delete handle ok = CloseHandle(m_impl); if(!ok) { ANKI_LOGF("CloseHandle() failed"); } m_impl = nullptr; #if ANKI_ASSERTIONS m_started = false; #endif return static_cast<ErrorCode>(exitCode); }
//============================================================================== Threadpool::Threadpool(U32 threadsCount) #if !ANKI_DISABLE_THREADPOOL_THREADING : m_barrier(threadsCount + 1) #endif { m_threadsCount = threadsCount; ANKI_ASSERT(m_threadsCount <= MAX_THREADS && m_threadsCount > 0); #if ANKI_DISABLE_THREADPOOL_THREADING ANKI_LOGW("Threadpool works in synchronous mode"); #else m_threads = reinterpret_cast<detail::ThreadpoolThread*>( malloc(sizeof(detail::ThreadpoolThread) * m_threadsCount)); if(m_threads == nullptr) { ANKI_LOGF("Out of memory"); } while(threadsCount-- != 0) { construct(&m_threads[threadsCount], threadsCount, this); } #endif }
//============================================================================== void TransientMemoryManager::allocate(PtrSize size, BufferUsageBit usage, TransientMemoryToken& token, void*& ptr, Error* outErr) { Error err = ErrorCode::NONE; ptr = nullptr; PerFrameBuffer& buff = m_perFrameBuffers[bufferUsageToTransient(usage)]; err = buff.m_alloc.allocate(size, token.m_offset); if(!err) { token.m_usage = usage; token.m_range = size; token.m_lifetime = TransientMemoryTokenLifetime::PER_FRAME; ptr = buff.m_mappedMem + token.m_offset; } else if(outErr) { *outErr = err; } else { ANKI_LOGF("Out of transient GPU memory"); } }
void Pipeline::init(const PipelineInitInfo& init) { m_impl.reset(getAllocator().newInstance<PipelineImpl>(&getManager())); if(m_impl->init(init)) { ANKI_LOGF("Cannot recover"); } }
void Buffer::init(PtrSize size, BufferUsageBit usage, BufferMapAccessBit access) { m_impl.reset(getAllocator().newInstance<BufferImpl>(&getManager())); if(m_impl->init(size, usage, access)) { ANKI_LOGF("Cannot recover"); } }
ConditionVariable::ConditionVariable() { CONDITION_VARIABLE* cond = reinterpret_cast<CONDITION_VARIABLE*>(malloc(sizeof(CONDITION_VARIABLE))); if(cond == nullptr) { ANKI_LOGF("Out of memory"); } m_impl = cond; InitializeConditionVariable(cond); }
Mutex::Mutex() { CRITICAL_SECTION* mtx = reinterpret_cast<CRITICAL_SECTION*>(malloc(sizeof(CRITICAL_SECTION))); if(mtx == nullptr) { ANKI_LOGF("Out of memory"); } m_impl = mtx; InitializeCriticalSection(mtx); }
VkCommandBuffer GrManagerImpl::newCommandBuffer(ThreadId tid, CommandBufferFlag cmdbFlags) { // Get the per thread cache PerThread& thread = getPerThreadCache(tid); // Try initialize the recycler if(ANKI_UNLIKELY(!thread.m_cmdbs.isCreated())) { Error err = thread.m_cmdbs.init(getAllocator(), m_device, m_queueIdx); if(err) { ANKI_LOGF("Cannot recover"); } } return thread.m_cmdbs.newCommandBuffer(cmdbFlags); }
Barrier::Barrier(U32 count) { ANKI_ASSERT(count > 1); BarrierImpl* barrier = reinterpret_cast<BarrierImpl*>(malloc(sizeof(BarrierImpl))); if(barrier == nullptr) { ANKI_LOGF("Out of memory"); } InitializeCriticalSection(&barrier->m_mtx); InitializeConditionVariable(&barrier->m_cvar); barrier->m_threshold = count; barrier->m_count = count; barrier->m_generation = 0; m_impl = barrier; }
//============================================================================== void Thread::start(void* userData, Callback callback) { ANKI_ASSERT(!m_started); ANKI_ASSERT(callback != nullptr); m_callback = callback; m_userData = userData; m_impl = CreateThread(nullptr, 0, threadCallback, this, 0, nullptr); if(m_impl == nullptr) { ANKI_LOGF("CreateThread() failed"); } else { #if ANKI_ASSERTIONS m_started = true; #endif } }