void buflib_print_allocs(struct buflib_context *ctx, void (*print)(int, const char*)) { union buflib_data *this, *end = ctx->handle_table; char buf[128]; for(this = end - 1; this >= ctx->last_handle; this--) { if (!this->alloc) continue; int handle_num; const char *name; union buflib_data *block_start, *alloc_start; intptr_t alloc_len; handle_num = end - this; alloc_start = buflib_get_data(ctx, handle_num); name = buflib_get_name(ctx, handle_num); block_start = (union buflib_data*)name - 3; alloc_len = block_start->val * sizeof(union buflib_data); snprintf(buf, sizeof(buf), "%s(%d):\t%p\n" " \t%p\n" " \t%ld\n", name?:"(null)", handle_num, block_start, alloc_start, alloc_len); /* handle_num is 1-based */ print(handle_num - 1, buf); } }
const char* buflib_get_name(struct buflib_context *ctx, int handle) { union buflib_data *data = ALIGN_DOWN(buflib_get_data(ctx, handle), sizeof (*data)); size_t len = data[-1].val; if (len <= 1) return NULL; return data[-len].name; }
/* Shrink the allocation indicated by the handle according to new_start and * new_size. Grow is not possible, therefore new_start and new_start + new_size * must be within the original allocation */ bool buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size) { char* oldstart = buflib_get_data(ctx, handle); char* newstart = new_start; char* newend = newstart + new_size; /* newstart must be higher and new_size not "negative" */ if (newstart < oldstart || newend < newstart) return false; union buflib_data *block = handle_to_block(ctx, handle), *old_next_block = block + block->val, /* newstart isn't necessarily properly aligned but it * needn't be since it's only dereferenced by the user code */ *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart), *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart), *new_next_block = (union buflib_data*)B_ALIGN_UP(newend), *new_block, metadata_size; /* growing is not supported */ if (new_next_block > old_next_block) return false; metadata_size.val = aligned_oldstart - block; /* update val and the handle table entry */ new_block = aligned_newstart - metadata_size.val; block[0].val = new_next_block - new_block; block[1].handle->alloc = newstart; if (block != new_block) { /* move metadata over, i.e. pointer to handle table entry and name * This is actually the point of no return. Data in the allocation is * being modified, and therefore we must successfully finish the shrink * operation */ memmove(new_block, block, metadata_size.val*sizeof(metadata_size)); /* mark the old block unallocated */ block->val = block - new_block; /* find the block before in order to merge with the new free space */ union buflib_data *free_before = find_block_before(ctx, block, true); if (free_before) free_before->val += block->val; /* We didn't handle size changes yet, assign block to the new one * the code below the wants block whether it changed or not */ block = new_block; } /* Now deal with size changes that create free blocks after the allocation */ if (old_next_block != new_next_block) { if (ctx->alloc_end == old_next_block) ctx->alloc_end = new_next_block; else if (old_next_block->val < 0) { /* enlarge next block by moving it up */ new_next_block->val = old_next_block->val - (old_next_block - new_next_block); } else if (old_next_block != new_next_block) { /* creating a hole */ /* must be negative to indicate being unallocated */ new_next_block->val = new_next_block - old_next_block; } } return true; }
/* Shrink the allocation indicated by the handle according to new_start and * new_size. Grow is not possible, therefore new_start and new_start + new_size * must be within the original allocation */ bool buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size) { char* oldstart = buflib_get_data(ctx, handle); char* newstart = new_start; char* newend = newstart + new_size; /* newstart must be higher and new_size not "negative" */ if (newstart < oldstart || newend < newstart) return false; union buflib_data *block = handle_to_block(ctx, handle), *old_next_block = block + block->val, /* newstart isn't necessarily properly aligned but it * needn't be since it's only dereferenced by the user code */ *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart), *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart), *new_next_block = (union buflib_data*)B_ALIGN_UP(newend), *new_block, metadata_size; /* growing is not supported */ if (new_next_block > old_next_block) return false; metadata_size.val = aligned_oldstart - block; /* update val and the handle table entry */ new_block = aligned_newstart - metadata_size.val; block[0].val = new_next_block - new_block; block[1].handle->alloc = newstart; if (block != new_block) { /* move metadata over, i.e. pointer to handle table entry and name * This is actually the point of no return. Data in the allocation is * being modified, and therefore we must successfully finish the shrink * operation */ memmove(new_block, block, metadata_size.val*sizeof(metadata_size)); /* mark the old block unallocated */ block->val = block - new_block; union buflib_data *freed_block = block, *free_before = ctx->first_free_block, *next_block = free_before; /* We need to find the block before the current one, to see if it is free * and can be merged with this one. */ while (next_block < freed_block) { free_before = next_block; next_block += abs(block->val); } /* If next_block == free_before, the above loop didn't go anywhere. * If it did, and the block before this one is empty, we can combine them. */ if (next_block == freed_block && next_block != free_before && free_before->val < 0) free_before->val += freed_block->val; else if (next_block == free_before) ctx->first_free_block = freed_block; /* We didn't handle size changes yet, assign block to the new one * the code below the wants block whether it changed or not */ block = new_block; } /* Now deal with size changes that create free blocks after the allocation */ if (old_next_block != new_next_block) { if (ctx->alloc_end == old_next_block) ctx->alloc_end = new_next_block; else if (old_next_block->val < 0) { /* enlarge next block by moving it up */ new_next_block->val = old_next_block->val - (old_next_block - new_next_block); } else if (old_next_block != new_next_block) { /* creating a hole */ /* must be negative to indicate being unallocated */ new_next_block->val = new_next_block - old_next_block; } /* update first_free_block for the newly created free space */ if (ctx->first_free_block > new_next_block) ctx->first_free_block = new_next_block; } /* if the handle is the one aquired with buflib_alloc_maximum() * unlock buflib_alloc() as part of the shrink */ if (ctx->handle_lock == handle) ctx->handle_lock = 0; return true; }
int buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name, struct buflib_callbacks *ops) { /* busy wait if there's a thread owning the lock */ while (ctx->handle_lock != 0) YIELD(); union buflib_data *handle, *block; size_t name_len = name ? B_ALIGN_UP(strlen(name)+1) : 0; bool last; /* This really is assigned a value before use */ int block_len; size += name_len; size = (size + sizeof(union buflib_data) - 1) / sizeof(union buflib_data) /* add 4 objects for alloc len, pointer to handle table entry and * name length, and the ops pointer */ + 4; handle_alloc: handle = handle_alloc(ctx); if (!handle) { /* If allocation has failed, and compaction has succeded, it may be * possible to get a handle by trying again. */ if (!ctx->compact && buflib_compact(ctx)) goto handle_alloc; else { /* first try to shrink the alloc before the handle table * to make room for new handles */ int handle = ctx->handle_table - ctx->last_handle; union buflib_data* last_block = handle_to_block(ctx, handle); struct buflib_callbacks* ops = last_block[2].ops; if (ops && ops->shrink_callback) { char *data = buflib_get_data(ctx, handle); unsigned hint = BUFLIB_SHRINK_POS_BACK | 10*sizeof(union buflib_data); if (ops->shrink_callback(handle, hint, data, (char*)(last_block+last_block->val)-data) == BUFLIB_CB_OK) { /* retry one more time */ goto handle_alloc; } } return 0; } } buffer_alloc: /* need to re-evaluate last before the loop because the last allocation * possibly made room in its front to fit this, so last would be wrong */ last = false; for (block = ctx->first_free_block;;block += block_len) { /* If the last used block extends all the way to the handle table, the * block "after" it doesn't have a header. Because of this, it's easier * to always find the end of allocation by saving a pointer, and always * calculate the free space at the end by comparing it to the * last_handle pointer. */ if(block == ctx->alloc_end) { last = true; block_len = ctx->last_handle - block; if ((size_t)block_len < size) block = NULL; break; } block_len = block->val; /* blocks with positive length are already allocated. */ if(block_len > 0) continue; block_len = -block_len; /* The search is first-fit, any fragmentation this causes will be * handled at compaction. */ if ((size_t)block_len >= size) break; } if (!block) { /* Try compacting if allocation failed */ if (buflib_compact_and_shrink(ctx, (size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK)) { goto buffer_alloc; } else { handle->val=1; handle_free(ctx, handle); return 0; } } /* Set up the allocated block, by marking the size allocated, and storing * a pointer to the handle. */ union buflib_data *name_len_slot; block->val = size; block[1].handle = handle; block[2].ops = ops ?: &default_callbacks; strcpy(block[3].name, name); name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len); name_len_slot->val = 1 + name_len/sizeof(union buflib_data); handle->alloc = (char*)(name_len_slot + 1); /* If we have just taken the first free block, the next allocation search * can save some time by starting after this block. */ if (block == ctx->first_free_block) ctx->first_free_block += size; block += size; /* alloc_end must be kept current if we're taking the last block. */ if (last) ctx->alloc_end = block; /* Only free blocks *before* alloc_end have tagged length. */ else if ((size_t)block_len > size) block->val = size - block_len; /* Return the handle index as a positive integer. */ return ctx->handle_table - handle; }