static void * wmem_block_fast_alloc(void *private_data, const size_t size) { wmem_block_fast_allocator_t *allocator = (wmem_block_fast_allocator_t*) private_data; wmem_block_fast_chunk_t *chunk; gint32 real_size; g_assert(size <= WMEM_BLOCK_MAX_ALLOC_SIZE); real_size = (gint32)(WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE); /* Allocate a new block if necessary. */ if (!allocator->block_list || (WMEM_BLOCK_SIZE - allocator->block_list->pos) < real_size) { wmem_block_fast_new_block(allocator); } chunk = (wmem_block_fast_chunk_t *) ((guint8 *) allocator->block_list + allocator->block_list->pos); /* safe to cast, size smaller than WMEM_BLOCK_MAX_ALLOC_SIZE */ chunk->len = (guint32) size; allocator->block_list->pos += real_size; /* and return the user's pointer */ return WMEM_CHUNK_TO_DATA(chunk); }
/* Reallocs special 'jumbo' blocks of sizes that won't fit normally. */ static void * wmem_block_realloc_jumbo(wmem_block_allocator_t *allocator, wmem_block_chunk_t *chunk, const size_t size) { wmem_block_hdr_t *block; block = WMEM_CHUNK_TO_BLOCK(chunk); block = (wmem_block_hdr_t *) wmem_realloc(NULL, block, size + WMEM_BLOCK_HEADER_SIZE + WMEM_CHUNK_HEADER_SIZE); if (block->next) { block->next->prev = block; } if (block->prev) { block->prev->next = block; } else { allocator->block_list = block; } return WMEM_CHUNK_TO_DATA(WMEM_BLOCK_TO_CHUNK(block)); }
/* Allocates special 'jumbo' blocks for sizes that won't fit normally. */ static void * wmem_block_alloc_jumbo(wmem_block_allocator_t *allocator, const size_t size) { wmem_block_hdr_t *block; wmem_block_chunk_t *chunk; /* allocate a new block of exactly the right size */ block = (wmem_block_hdr_t *) wmem_alloc(NULL, size + WMEM_BLOCK_HEADER_SIZE + WMEM_CHUNK_HEADER_SIZE); /* add it to the block list */ wmem_block_add_to_block_list(allocator, block); /* the new block contains a single jumbo chunk */ chunk = WMEM_BLOCK_TO_CHUNK(block); chunk->last = TRUE; chunk->used = TRUE; chunk->jumbo = TRUE; chunk->len = 0; chunk->prev = 0; /* and return the data pointer */ return WMEM_CHUNK_TO_DATA(chunk); }
static void * wmem_block_alloc(void *private_data, const size_t size) { wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data; wmem_block_chunk_t *chunk; if (size > WMEM_BLOCK_MAX_ALLOC_SIZE) { return wmem_block_alloc_jumbo(allocator, size); } if (allocator->recycler_head && WMEM_CHUNK_DATA_LEN(allocator->recycler_head) >= size) { /* If we can serve it from the recycler, do so. */ chunk = allocator->recycler_head; } else { if (allocator->master_head && WMEM_CHUNK_DATA_LEN(allocator->master_head) < size) { /* Recycle the head of the master list if necessary. */ chunk = allocator->master_head; wmem_block_pop_master(allocator); wmem_block_add_to_recycler(allocator, chunk); } if (!allocator->master_head) { /* Allocate a new block if necessary. */ wmem_block_new_block(allocator); } chunk = allocator->master_head; } /* if our chunk is used, something is wrong */ g_assert(! chunk->used); /* if we still don't have the space at this point, something is wrong */ g_assert(size <= WMEM_CHUNK_DATA_LEN(chunk)); /* Split our chunk into two to preserve any trailing free space */ wmem_block_split_free_chunk(allocator, chunk, size); /* if our split reduced our size too much, something went wrong */ g_assert(size <= WMEM_CHUNK_DATA_LEN(chunk)); /* the resulting chunk should not be in either free list */ g_assert(chunk != allocator->master_head); g_assert(chunk != allocator->recycler_head); /* Now cycle the recycler */ wmem_block_cycle_recycler(allocator); /* mark it as used */ chunk->used = TRUE; /* and return the user's pointer */ return WMEM_CHUNK_TO_DATA(chunk); }
static void * wmem_block_fast_alloc(void *private_data, const size_t size) { wmem_block_fast_allocator_t *allocator = (wmem_block_fast_allocator_t*) private_data; wmem_block_fast_chunk_t *chunk; gint32 real_size; if (size > WMEM_BLOCK_MAX_ALLOC_SIZE) { wmem_block_fast_jumbo_t *block; /* allocate/initialize a new block of the necessary size */ block = (wmem_block_fast_jumbo_t *)wmem_alloc(NULL, size + WMEM_JUMBO_HEADER_SIZE + WMEM_CHUNK_HEADER_SIZE); block->next = allocator->jumbo_list; block->prev = NULL; allocator->jumbo_list = block; chunk = ((wmem_block_fast_chunk_t*)((guint8*)(block) + WMEM_JUMBO_HEADER_SIZE)); chunk->len = JUMBO_MAGIC; return WMEM_CHUNK_TO_DATA(chunk); } real_size = (gint32)(WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE); /* Allocate a new block if necessary. */ if (!allocator->block_list || (WMEM_BLOCK_SIZE - allocator->block_list->pos) < real_size) { wmem_block_fast_new_block(allocator); } chunk = (wmem_block_fast_chunk_t *) ((guint8 *) allocator->block_list + allocator->block_list->pos); /* safe to cast, size smaller than WMEM_BLOCK_MAX_ALLOC_SIZE */ chunk->len = (guint32) size; allocator->block_list->pos += real_size; /* and return the user's pointer */ return WMEM_CHUNK_TO_DATA(chunk); }
static void * wmem_block_alloc(void *private_data, const size_t size) { wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data; wmem_block_chunk_t *chunk; if (size > WMEM_BLOCK_MAX_ALLOC_SIZE) { return wmem_block_alloc_jumbo(allocator, size); } if (allocator->recycler_head && WMEM_CHUNK_DATA_LEN(allocator->recycler_head) >= size) { /* If we can serve it from the recycler, do so. */ chunk = allocator->recycler_head; } else { if (allocator->master_head && WMEM_CHUNK_DATA_LEN(allocator->master_head) < size) { /* Recycle the head of the master list if necessary. */ chunk = allocator->master_head; wmem_block_pop_master(allocator); wmem_block_add_to_recycler(allocator, chunk); } if (!allocator->master_head) { /* Allocate a new block if necessary. */ wmem_block_new_block(allocator); } chunk = allocator->master_head; } /* Split our chunk into two to preserve any trailing free space */ wmem_block_split_free_chunk(allocator, chunk, size); /* Now cycle the recycler */ wmem_block_cycle_recycler(allocator); /* mark it as used */ chunk->used = TRUE; /* and return the user's pointer */ return WMEM_CHUNK_TO_DATA(chunk); }