static void wmem_block_free(void *private_data, void *ptr) { wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data; wmem_block_chunk_t *chunk; chunk = WMEM_DATA_TO_CHUNK(ptr); if (chunk->jumbo) { wmem_block_free_jumbo(allocator, chunk); return; } g_assert(chunk->used); /* mark it as unused */ chunk->used = FALSE; /* merge it with any other free chunks adjacent to it, so that contiguous * free space doesn't get fragmented */ wmem_block_merge_free(allocator, chunk); /* Now cycle the recycler */ wmem_block_cycle_recycler(allocator); }
static void * wmem_block_alloc(void *private_data, const size_t size) { wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data; wmem_block_chunk_t *chunk; if (size > WMEM_BLOCK_MAX_ALLOC_SIZE) { return wmem_block_alloc_jumbo(allocator, size); } if (allocator->recycler_head && WMEM_CHUNK_DATA_LEN(allocator->recycler_head) >= size) { /* If we can serve it from the recycler, do so. */ chunk = allocator->recycler_head; } else { if (allocator->master_head && WMEM_CHUNK_DATA_LEN(allocator->master_head) < size) { /* Recycle the head of the master list if necessary. */ chunk = allocator->master_head; wmem_block_pop_master(allocator); wmem_block_add_to_recycler(allocator, chunk); } if (!allocator->master_head) { /* Allocate a new block if necessary. */ wmem_block_new_block(allocator); } chunk = allocator->master_head; } /* if our chunk is used, something is wrong */ g_assert(! chunk->used); /* if we still don't have the space at this point, something is wrong */ g_assert(size <= WMEM_CHUNK_DATA_LEN(chunk)); /* Split our chunk into two to preserve any trailing free space */ wmem_block_split_free_chunk(allocator, chunk, size); /* if our split reduced our size too much, something went wrong */ g_assert(size <= WMEM_CHUNK_DATA_LEN(chunk)); /* the resulting chunk should not be in either free list */ g_assert(chunk != allocator->master_head); g_assert(chunk != allocator->recycler_head); /* Now cycle the recycler */ wmem_block_cycle_recycler(allocator); /* mark it as used */ chunk->used = TRUE; /* and return the user's pointer */ return WMEM_CHUNK_TO_DATA(chunk); }
static void * wmem_block_alloc(void *private_data, const size_t size) { wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data; wmem_block_chunk_t *chunk; if (size > WMEM_BLOCK_MAX_ALLOC_SIZE) { return wmem_block_alloc_jumbo(allocator, size); } if (allocator->recycler_head && WMEM_CHUNK_DATA_LEN(allocator->recycler_head) >= size) { /* If we can serve it from the recycler, do so. */ chunk = allocator->recycler_head; } else { if (allocator->master_head && WMEM_CHUNK_DATA_LEN(allocator->master_head) < size) { /* Recycle the head of the master list if necessary. */ chunk = allocator->master_head; wmem_block_pop_master(allocator); wmem_block_add_to_recycler(allocator, chunk); } if (!allocator->master_head) { /* Allocate a new block if necessary. */ wmem_block_new_block(allocator); } chunk = allocator->master_head; } /* Split our chunk into two to preserve any trailing free space */ wmem_block_split_free_chunk(allocator, chunk, size); /* Now cycle the recycler */ wmem_block_cycle_recycler(allocator); /* mark it as used */ chunk->used = TRUE; /* and return the user's pointer */ return WMEM_CHUNK_TO_DATA(chunk); }
static void * wmem_block_realloc(void *private_data, void *ptr, const size_t size) { wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data; wmem_block_chunk_t *chunk; chunk = WMEM_DATA_TO_CHUNK(ptr); if (chunk->jumbo) { return wmem_block_realloc_jumbo(allocator, chunk, size); } if (size > WMEM_CHUNK_DATA_LEN(chunk)) { /* grow */ wmem_block_chunk_t *tmp; tmp = WMEM_CHUNK_NEXT(chunk); if (tmp && (!tmp->used) && (size < WMEM_CHUNK_DATA_LEN(chunk) + tmp->len)) { /* the next chunk is free and has enough extra, so just grab * from that */ size_t split_size; /* we ask for the next chunk to be split, but we don't end up * using the split chunk header (it just gets merged into this one), * so we want the split to be of (size - curdatalen - header_size). * However, this can underflow by header_size, so we do a quick * check here and floor the value to 0. */ split_size = size - WMEM_CHUNK_DATA_LEN(chunk); if (split_size < WMEM_CHUNK_HEADER_SIZE) { split_size = 0; } else { split_size -= WMEM_CHUNK_HEADER_SIZE; } wmem_block_split_free_chunk(allocator, tmp, split_size); /* Now do a 'quickie' merge between the current block and the left- * hand side of the split. Simply calling wmem_block_merge_free * might confuse things, since we may temporarily have two blocks * to our right that are both free (and it isn't guaranteed to * handle that case). Update our 'next' count and last flag, and * our (new) successor's 'prev' count */ chunk->len += tmp->len; chunk->last = tmp->last; tmp = WMEM_CHUNK_NEXT(chunk); if (tmp) { tmp->prev = chunk->len; } /* Now cycle the recycler */ wmem_block_cycle_recycler(allocator); /* And return the same old pointer */ return ptr; } else { /* no room to grow, need to alloc, copy, free */ void *newptr; newptr = wmem_block_alloc(private_data, size); memcpy(newptr, ptr, WMEM_CHUNK_DATA_LEN(chunk)); wmem_block_free(private_data, ptr); /* No need to cycle the recycler, alloc and free both did that * already */ return newptr; } } else if (size < WMEM_CHUNK_DATA_LEN(chunk)) { /* shrink */ wmem_block_split_used_chunk(allocator, chunk, size); /* Now cycle the recycler */ wmem_block_cycle_recycler(allocator); return ptr; } /* no-op */ return ptr; }