AudicleFont * AudicleFont::loadFont ( const char * name ) { AudicleFont * font = check_pool ( name ); if ( !font ) { if ( strncmp ( name, "OpenGL:", 7 ) == 0 ) { bool mono = ( strcmp ( name+7, "mono" ) == 0 ); font = new AudicleOpenGLFont ( mono ); } #ifdef _USE_FTGL_FONTS_ else if ( strncmp ( name, "FTGL:", 5 ) == 0 ) { font = new AudicleFTGLFont ( name + 5 ); } #endif else { fprintf( stderr, "Audicle::loadFont - font not found, using OpenGL mono\n" ); font = check_pool( "OpenGL:mono" ); if ( !font ) font = new AudicleOpenGLFont ( true ); } if ( font ) m_font_pool.push_back( font ); } if ( font ) { font->m_ref++; return font; } return NULL; }
void AllocPool::FreeAll() { check_pool(); AllocAreaPtr area = mAreas; if (area) { AllocAreaPtr firstArea = area; do { AllocAreaPtr nextarea = area->mNext; (mFreeArea)(area->mUnalignedPointerToThis); area = nextarea; } while (area != firstArea); mAreas = NULL; } InitBins(); check_pool(); }
int send_uuid(int sock_con) { char uuid[37]; int rtn; next_uuid(uuid); rtn = write(sock_con, uuid, sizeof(uuid)); check_pool(); // Refresh the pool if necessary return rtn; }
void AllocPool::FreeAllInternal() { check_pool(); InitBins(); AllocAreaPtr area = mAreas; if (area) { AllocAreaPtr firstArea = area; do { AllocAreaPtr nextarea = area->mNext; size_t size = area->mSize; AllocChunkPtr chunk = &area->mChunk; chunk->SetSizeFree(size); chunk->SetNeighborsInUse(size); LinkFree(chunk); area = nextarea; } while (area != firstArea); } check_pool(); }
void AllocPool::Free(void *inPtr) { #ifdef DISABLE_MEMORY_POOLS free(inPtr); return; #endif check_pool(); if (inPtr == 0) return; /* free(0) has no effect */ AllocChunkPtr chunk = MemToChunk(inPtr); check_inuse_chunk(chunk); garbage_fill(chunk); size_t size = chunk->Size(); if (!chunk->PrevInUse()) /* consolidate backward */ { size_t prevSize = chunk->PrevSize(); chunk = chunk->ChunkAtOffset(0L-prevSize); size += prevSize; UnlinkFree(chunk); } AllocChunkPtr next = chunk->ChunkAtOffset(size); if (!next->InUse()) /* consolidate forward */ { size += next->Size(); UnlinkFree(next); } chunk->SetSizeFree(size); if (mAreaMoreSize && chunk->IsArea()) { // whole area is free FreeArea(chunk); } else { LinkFree(chunk); } check_pool(); }
// amqp_destroy_context int amqp_destroy_context(amqp_context_t *context) { int rc = true; if (context != 0) { if (((amqp__context_with_guard_t *) context)->multiple_delete_protection != random_sequence) { amqp_fatal_program_error("Attempting to destroy a Context twice."); } ((amqp__context_with_guard_t *) context)->multiple_delete_protection = 0; amqp_deallocate_buffer(context, context->encode.buffer); amqp_deallocate_buffer(context, context->decode.buffer); rc = check_pool(context, &context->pools.amqp_buffer_t_pool) && check_pool(context, &context->pools.amqp_type_t_pool); AMQP_FREE(context); } return rc; }
AllocPool::AllocPool(NewAreaFunc inAllocArea, FreeAreaFunc inFreeArea, size_t inAreaInitSize, size_t inAreaMoreSize) { InitBins(); mAreaInitSize = inAreaInitSize; mAreaMoreSize = inAreaMoreSize; mAllocArea = inAllocArea; mFreeArea = inFreeArea; mAreas = 0; check_pool(); InitAlloc(); }
/* void* allocmem(AllocPool *pool, int32 size); void* allocmem(AllocPool *pool, int32 size) { return pool->Alloc(size); } void* reallocmem(AllocPool *pool, void* ptr, int32 size); void* reallocmem(AllocPool *pool, void* ptr, int32 size) { return pool->Realloc(ptr, size); } void freemem(AllocPool *pool, void* ptr); void freemem(AllocPool *pool, void* ptr) { pool->Free(ptr); } */ void AllocPool::InitAlloc() { if (mAreaInitSize == 0) return; /* alloc initial area */ NewArea(mAreaInitSize); /* get chunk */ AllocAreaPtr area = mAreas; AllocChunkPtr chunk = &area->mChunk; LinkFree(chunk); check_pool(); }
Domain::DataKey DEKPool::get_active_data_key() { DEKPoolStatus pool_status = check_pool(); if (!pool_status.active_count) throw std::runtime_error("DEK pool is exhausted"); auto active_deks = Yb::query<Domain::DataKey>(session_) .filter_by(Domain::DataKey::c.counter < dek_use_count_); int choice = rand() % pool_status.active_count; for(auto &dek : active_deks.all()) { if (--choice <= 0) return dek; } throw std::runtime_error("Cannot obtain active DEK"); }
char *EMalloc(unsigned long nbytes) /* storage allocator */ /* Always returns a pointer that has 8-byte alignment (essential for our internal representation of an object). */ { unsigned char *p; unsigned char *temp; register struct block_list *list; int alignment; int min_align; #ifdef ELINUX #ifndef EBSD62 return malloc(nbytes); #else p = malloc( nbytes + 8 ); if( (unsigned long)p & 7 ){ *(int *)p = MAGIC_FILLER; p += 4; } else{ *(int *)(p+4) = 0; p += 8; } return p; #endif #else #ifdef HEAP_CHECK long size; check_pool(); #endif nbytes += align4; // allow for possible 4-aligned malloc pointers if (nbytes <= MAX_CACHED_SIZE) { /* See if we have a block of this size in our cache. Every block in the cache is 8-aligned. */ list = pool_map[(nbytes + (RESOLUTION - 1)) >> LOG_RESOLUTION]; #ifdef HEAP_CHECK if (list->size < nbytes || list->size > nbytes * 2) { sprintf(msg, "Alloc - size is %d, nbytes is %d", list->size, nbytes); RTInternal(msg); } #endif temp = (char *)list->first; if (temp != NULL) { /* a cache hit */ #ifdef EXTRA_STATS a_hit++; #endif list->first = ((free_block_ptr)temp)->next; cache_size -= 2; #ifdef HEAP_CHECK if (cache_size > 100000000) RTInternal("cache size is bad"); p = temp; if (align4 && *(int *)(p-4) == MAGIC_FILLER) p = p - 4; if (((unsigned long)temp) & 3) RTInternal("unaligned address in storage cache"); Allocated(block_size(p)); #endif return temp; /* will be 8-aligned */ } else { nbytes = list->size; /* better to grab bigger size so it can be reused for same purpose */ #ifdef EXTRA_STATS a_miss++; #endif } }
void* AllocPool::Realloc(void* inPtr, size_t inReqSize) { #ifdef DISABLE_MEMORY_POOLS return realloc(inPtr, inReqSize); #endif void *outPtr; AllocChunkPtr prev; check_pool(); bool docopy = false; /* realloc of null is supposed to be same as malloc */ if (inPtr == 0) return Alloc(inReqSize); AllocChunkPtr oldChunk = MemToChunk(inPtr); AllocChunkPtr newChunk = oldChunk; size_t oldsize = oldChunk->Size(); size_t newsize = oldsize; size_t size = RequestToSize(inReqSize); size_t nextsize, prevsize; check_inuse_chunk(oldChunk); if (oldsize < size) { /* Try expanding forward */ AllocChunkPtr next = oldChunk->NextChunk(); if (!next->InUse()) { nextsize = next->Size(); /* Forward into next chunk */ if (nextsize + newsize >= size) { UnlinkFree(next); newsize += nextsize; goto split; } } else { next = 0; nextsize = 0; } /* Try shifting backwards. */ prev = oldChunk->PrevChunk(); if (!prev->InUse()) { prevsize = prev->Size(); /* try forward + backward first to save a later consolidation */ if (next != 0) { /* into next chunk */ if (nextsize + prevsize + newsize >= size) { newsize += nextsize + prevsize; UnlinkFree(next); goto alloc_prev; } } /* backward only */ if (prev != 0 && prevsize + newsize >= size) { newsize += prevsize; goto alloc_prev; } } /* Must allocate */ outPtr = Alloc(inReqSize); check_pool(); if (outPtr == 0) { //ipostbuf("realloc failed. size: %d\n", inReqSize); throw std::runtime_error("realloc failed, increase server's memory allocation (e.g. via ServerOptions)"); } /* Otherwise copy, free, and exit */ memcpy(outPtr, inPtr, oldsize - sizeof(AllocChunk)); Free(inPtr); return outPtr; } else goto split; alloc_prev: UnlinkFree(prev); newChunk = prev; docopy = true; // FALL THROUGH split: /* split off extra room in old or expanded chunk */ //check_pool(); if (newsize - size >= kMinAllocSize) { /* split off remainder */ size_t remainder_size = newsize - size; AllocChunkPtr remainder = newChunk->ChunkAtOffset(size); remainder->SetSizeInUse(remainder_size); newChunk->SetSizeInUse(size); Free(remainder->ToPtr()); /* let free() deal with it */ } else { newChunk->SetSizeInUse(newsize); } outPtr = newChunk->ToPtr(); if (docopy) { memmove(outPtr, inPtr, oldsize - sizeof(AllocChunk)); } check_inuse_chunk(newChunk); check_pool(); garbage_fill(newChunk); return outPtr; }
void* AllocPool::Alloc(size_t inReqSize) { #ifdef DISABLE_MEMORY_POOLS return malloc(inReqSize); #endif // OK it has a lot of gotos, but these remove a whole lot of common code // that was obfuscating the original version of this function. // So here I am choosing the OnceAndOnlyOnce principle over the caveats on gotos. // The gotos only jump forward and only to the exit paths of the function // The old bin block scheme has been replaced by 4 x 32 bit words so that each bin has a bit // and the next bin is found using a count leading zeroes instruction. Much faster. // Also now each bin's flag can be kept accurate. This simplifies the searching code quite a bit. // Also fwiw, changed 'victim' in the original code to 'candidate'. 'victim' just bothered me. AllocChunkPtr candidate; /* inspected/selected chunk */ size_t candidate_size; /* its size */ AllocChunkPtr remainder; /* remainder from a split */ int32 remainder_size; /* its size */ AllocAreaPtr area; size_t areaSize; size_t size = RequestToSize(inReqSize); int index = BinIndex(size); assert(index < 128); AllocChunkPtr bin = mBins + index; check_pool(); /* Check for exact match in a bin */ if (index < kMaxSmallBin) { /* Faster version for small requests */ /* No traversal or size check necessary for small bins. */ candidate = bin->Prev(); /* Also scan the next one, since it would have a remainder < kMinAllocSize */ if (candidate == bin) candidate = (++bin)->Prev(); if (candidate != bin) { candidate_size = candidate->Size(); goto found_exact_fit; } index += 2; /* Set for bin scan below. We've already scanned 2 bins. */ } else { for (candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) { candidate_size = candidate->Size(); remainder_size = (int)(candidate_size - size); if (remainder_size >= (int32)kMinAllocSize) { /* too big */ --index; /* adjust to rescan below after checking last remainder */ break; } else if (remainder_size >= 0) { /* exact fit */ goto found_exact_fit; } } ++index; } for(; (index = NextFullBin(index)) >= 0; ++index) { bin = mBins + index; /* Find and use first big enough chunk ... */ for (candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) { candidate_size = candidate->Size(); remainder_size = (int)(candidate_size - size); if (remainder_size >= (int32)kMinAllocSize) { /* split */ UnlinkFree(candidate); goto found_bigger_fit; } else if (remainder_size >= 0) goto found_exact_fit; } } check_pool(); if (mAreaMoreSize == 0) { /* pool has a non-growable area */ if (mAreas != NULL /* fixed size area exhausted */ || size > mAreaInitSize) /* too big anyway */ goto found_nothing; areaSize = mAreaInitSize; goto split_new_area; } if (size > mAreaMoreSize) { areaSize = size; goto whole_new_area; } else { areaSize = mAreaMoreSize; goto split_new_area; } // exit paths: found_nothing: //ipostbuf("alloc failed. size: %d\n", inReqSize); throw std::runtime_error("alloc failed, increase server's memory allocation (e.g. via ServerOptions)"); whole_new_area: //ipostbuf("whole_new_area\n"); area = NewArea(areaSize); if (!area) return 0; candidate = &area->mChunk; candidate_size = candidate->Size(); goto return_chunk; split_new_area: //ipostbuf("split_new_area\n"); area = NewArea(areaSize); if (!area) return 0; candidate = &area->mChunk; candidate_size = candidate->Size(); remainder_size = (int)(areaSize - size); // FALL THROUGH found_bigger_fit: //ipostbuf("found_bigger_fit\n"); remainder = candidate->ChunkAtOffset(size); remainder->SetSizeFree(remainder_size); candidate_size -= remainder_size; LinkFree(remainder); goto return_chunk; found_exact_fit: check_pool(); UnlinkFree(candidate); // FALL THROUGH return_chunk: candidate->SetSizeInUse(candidate_size); check_malloced_chunk(candidate, candidate_size); check_pool(); garbage_fill(candidate); return candidate->ToPtr(); }