void ExecutionGroup::finalizeChunkExecution(int chunkNumber, MemoryBuffer **memoryBuffers) { if (this->m_chunkExecutionStates[chunkNumber] == COM_ES_SCHEDULED) this->m_chunkExecutionStates[chunkNumber] = COM_ES_EXECUTED; atomic_add_and_fetch_u(&this->m_chunksFinished, 1); if (memoryBuffers) { for (unsigned int index = 0; index < this->m_cachedMaxReadBufferOffset; index++) { MemoryBuffer *buffer = memoryBuffers[index]; if (buffer) { if (buffer->isTemporarily()) { memoryBuffers[index] = NULL; delete buffer; } } } MEM_freeN(memoryBuffers); } if (this->m_bTree) { // status report is only performed for top level Execution Groups. float progress = this->m_chunksFinished; progress /= this->m_numberOfChunks; this->m_bTree->progress(this->m_bTree->prh, progress); char buf[128]; BLI_snprintf(buf, sizeof(buf), IFACE_("Compositing | Tile %u-%u"), this->m_chunksFinished, this->m_numberOfChunks); this->m_bTree->stats_draw(this->m_bTree->sdh, buf); } }
void *MEM_lockfree_mallocN_aligned(size_t len, size_t alignment, const char *str) { MemHeadAligned *memh; /* It's possible that MemHead's size is not properly aligned, * do extra padding to deal with this. * * We only support small alignments which fits into short in * order to save some bits in MemHead structure. */ size_t extra_padding = MEMHEAD_ALIGN_PADDING(alignment); /* Huge alignment values doesn't make sense and they * wouldn't fit into 'short' used in the MemHead. */ assert(alignment < 1024); /* We only support alignment to a power of two. */ assert(IS_POW2(alignment)); len = SIZET_ALIGN_4(len); memh = (MemHeadAligned *)aligned_malloc( len + extra_padding + sizeof(MemHeadAligned), alignment); if (LIKELY(memh)) { /* We keep padding in the beginning of MemHead, * this way it's always possible to get MemHead * from the data pointer. */ memh = (MemHeadAligned *)((char *)memh + extra_padding); if (UNLIKELY(malloc_debug_memset && len)) { memset(memh + 1, 255, len); } memh->len = len | (size_t) MEMHEAD_ALIGN_FLAG; memh->alignment = (short) alignment; atomic_add_and_fetch_u(&totblock, 1); atomic_add_and_fetch_z(&mem_in_use, len); update_maximum(&peak_mem, mem_in_use); return PTR_FROM_MEMHEAD(memh); } print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n", SIZET_ARG(len), str, (unsigned int) mem_in_use); return NULL; }
void *MEM_lockfree_callocN(size_t len, const char *str) { MemHead *memh; len = SIZET_ALIGN_4(len); memh = (MemHead *)calloc(1, len + sizeof(MemHead)); if (LIKELY(memh)) { memh->len = len; atomic_add_and_fetch_u(&totblock, 1); atomic_add_and_fetch_z(&mem_in_use, len); update_maximum(&peak_mem, mem_in_use); return PTR_FROM_MEMHEAD(memh); } print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total %u\n", SIZET_ARG(len), str, (unsigned int) mem_in_use); return NULL; }
void *MEM_lockfree_mapallocN(size_t len, const char *str) { MemHead *memh; /* on 64 bit, simply use calloc instead, as mmap does not support * allocating > 4 GB on Windows. the only reason mapalloc exists * is to get around address space limitations in 32 bit OSes. */ if (sizeof(void *) >= 8) return MEM_lockfree_callocN(len, str); len = SIZET_ALIGN_4(len); #if defined(WIN32) /* our windows mmap implementation is not thread safe */ mem_lock_thread(); #endif memh = mmap(NULL, len + sizeof(MemHead), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0); #if defined(WIN32) mem_unlock_thread(); #endif if (memh != (MemHead *)-1) { memh->len = len | (size_t) MEMHEAD_MMAP_FLAG; atomic_add_and_fetch_u(&totblock, 1); atomic_add_and_fetch_z(&mem_in_use, len); atomic_add_and_fetch_z(&mmap_in_use, len); update_maximum(&peak_mem, mem_in_use); update_maximum(&peak_mem, mmap_in_use); return PTR_FROM_MEMHEAD(memh); } print_error("Mapalloc returns null, fallback to regular malloc: " "len=" SIZET_FORMAT " in %s, total %u\n", SIZET_ARG(len), str, (unsigned int) mmap_in_use); return MEM_lockfree_callocN(len, str); }