/* Compact the buffer by trying both shrinking and moving. * * Try to move first. If unsuccesfull, try to shrink. If that was successful * try to move once more as there might be more room now. */ static bool buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints) { bool result = false; /* if something compacted before already there will be no further gain */ if (!ctx->compact) result = buflib_compact(ctx); if (!result) { union buflib_data* this; for(this = ctx->buf_start; this < ctx->alloc_end; this += abs(this->val)) { if (this->val > 0 && this[2].ops && this[2].ops->shrink_callback) { int ret; int handle = ctx->handle_table - this[1].handle; char* data = this[1].handle->alloc; ret = this[2].ops->shrink_callback(handle, shrink_hints, data, (char*)(this+this->val)-data); result |= (ret == BUFLIB_CB_OK); /* this might have changed in the callback (if * it shrinked from the top), get it again */ this = handle_to_block(ctx, handle); } } /* shrinking was successful at least once, try compaction again */ if (result) result |= buflib_compact(ctx); } return result; }
/* Compact the buffer by trying both shrinking and moving. * * Try to move first. If unsuccesfull, try to shrink. If that was successful * try to move once more as there might be more room now. */ static bool buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints) { bool result = false; /* if something compacted before already there will be no further gain */ if (!ctx->compact) result = buflib_compact(ctx); if (!result) { union buflib_data *this, *before; for(this = ctx->buf_start, before = this; this < ctx->alloc_end; before = this, this += abs(this->val)) { if (this->val > 0 && this[2].ops && this[2].ops->shrink_callback) { int ret; int handle = ctx->handle_table - this[1].handle; char* data = this[1].handle->alloc; bool last = (this+this->val) == ctx->alloc_end; unsigned pos_hints = shrink_hints & BUFLIB_SHRINK_POS_MASK; /* adjust what we ask for if there's free space in the front * this isn't too unlikely assuming this block is * shrinkable but not movable */ if (pos_hints == BUFLIB_SHRINK_POS_FRONT && before != this && before->val < 0) { size_t free_space = (-before->val) * sizeof(union buflib_data); size_t wanted = shrink_hints & BUFLIB_SHRINK_SIZE_MASK; if (wanted < free_space) /* no shrink needed? */ continue; wanted -= free_space; shrink_hints = pos_hints | wanted; } ret = this[2].ops->shrink_callback(handle, shrink_hints, data, (char*)(this+this->val)-data); result |= (ret == BUFLIB_CB_OK); /* 'this' might have changed in the callback (if it shrinked * from the top or even freed the handle), get it again */ this = handle_to_block(ctx, handle); /* The handle was possibly be freed in the callback, * re-run the loop with the handle before */ if (!this) this = before; /* could also change with shrinking from back */ else if (last) ctx->alloc_end = this + this->val; } } /* shrinking was successful at least once, try compaction again */ if (result) result |= buflib_compact(ctx); } return result; }
/* Free the buffer associated with handle_num. */ void buflib_free(struct buflib_context *ctx, int handle_num) { union buflib_data *handle = ctx->handle_table - handle_num, *freed_block = handle_to_block(ctx, handle_num), *block = ctx->first_free_block, *next_block = block; /* We need to find the block before the current one, to see if it is free * and can be merged with this one. */ while (next_block < freed_block) { block = next_block; next_block += abs(block->val); } /* If next_block == block, the above loop didn't go anywhere. If it did, * and the block before this one is empty, we can combine them. */ if (next_block == freed_block && next_block != block && block->val < 0) block->val -= freed_block->val; /* Otherwise, set block to the newly-freed block, and mark it free, before * continuing on, since the code below exects block to point to a free * block which may have free space after it. */ else { block = freed_block; block->val = -block->val; } next_block = block - block->val; /* Check if we are merging with the free space at alloc_end. */ if (next_block == ctx->alloc_end) ctx->alloc_end = block; /* Otherwise, the next block might still be a "normal" free block, and the * mid-allocation free means that the buffer is no longer compact. */ else { ctx->compact = false; if (next_block->val < 0) block->val += next_block->val; } handle_free(ctx, handle); handle->alloc = NULL; /* If this block is before first_free_block, it becomes the new starting * point for free-block search. */ if (block < ctx->first_free_block) ctx->first_free_block = block; /* if the handle is the one aquired with buflib_alloc_maximum() * unlock buflib_alloc() as part of the shrink */ if (ctx->handle_lock == handle_num) ctx->handle_lock = 0; }
/* Free the buffer associated with handle_num. */ int buflib_free(struct buflib_context *ctx, int handle_num) { union buflib_data *handle = ctx->handle_table - handle_num, *freed_block = handle_to_block(ctx, handle_num), *block, *next_block; /* We need to find the block before the current one, to see if it is free * and can be merged with this one. */ block = find_block_before(ctx, freed_block, true); if (block) { block->val -= freed_block->val; } else { /* Otherwise, set block to the newly-freed block, and mark it free, before * continuing on, since the code below exects block to point to a free * block which may have free space after it. */ block = freed_block; block->val = -block->val; } next_block = block - block->val; /* Check if we are merging with the free space at alloc_end. */ if (next_block == ctx->alloc_end) ctx->alloc_end = block; /* Otherwise, the next block might still be a "normal" free block, and the * mid-allocation free means that the buffer is no longer compact. */ else { ctx->compact = false; if (next_block->val < 0) block->val += next_block->val; } handle_free(ctx, handle); handle->alloc = NULL; return 0; /* unconditionally */ }
/* Shrink the allocation indicated by the handle according to new_start and * new_size. Grow is not possible, therefore new_start and new_start + new_size * must be within the original allocation */ bool buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size) { char* oldstart = buflib_get_data(ctx, handle); char* newstart = new_start; char* newend = newstart + new_size; /* newstart must be higher and new_size not "negative" */ if (newstart < oldstart || newend < newstart) return false; union buflib_data *block = handle_to_block(ctx, handle), *old_next_block = block + block->val, /* newstart isn't necessarily properly aligned but it * needn't be since it's only dereferenced by the user code */ *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart), *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart), *new_next_block = (union buflib_data*)B_ALIGN_UP(newend), *new_block, metadata_size; /* growing is not supported */ if (new_next_block > old_next_block) return false; metadata_size.val = aligned_oldstart - block; /* update val and the handle table entry */ new_block = aligned_newstart - metadata_size.val; block[0].val = new_next_block - new_block; block[1].handle->alloc = newstart; if (block != new_block) { /* move metadata over, i.e. pointer to handle table entry and name * This is actually the point of no return. Data in the allocation is * being modified, and therefore we must successfully finish the shrink * operation */ memmove(new_block, block, metadata_size.val*sizeof(metadata_size)); /* mark the old block unallocated */ block->val = block - new_block; /* find the block before in order to merge with the new free space */ union buflib_data *free_before = find_block_before(ctx, block, true); if (free_before) free_before->val += block->val; /* We didn't handle size changes yet, assign block to the new one * the code below the wants block whether it changed or not */ block = new_block; } /* Now deal with size changes that create free blocks after the allocation */ if (old_next_block != new_next_block) { if (ctx->alloc_end == old_next_block) ctx->alloc_end = new_next_block; else if (old_next_block->val < 0) { /* enlarge next block by moving it up */ new_next_block->val = old_next_block->val - (old_next_block - new_next_block); } else if (old_next_block != new_next_block) { /* creating a hole */ /* must be negative to indicate being unallocated */ new_next_block->val = new_next_block - old_next_block; } } return true; }
/* Shrink the allocation indicated by the handle according to new_start and * new_size. Grow is not possible, therefore new_start and new_start + new_size * must be within the original allocation */ bool buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size) { char* oldstart = buflib_get_data(ctx, handle); char* newstart = new_start; char* newend = newstart + new_size; /* newstart must be higher and new_size not "negative" */ if (newstart < oldstart || newend < newstart) return false; union buflib_data *block = handle_to_block(ctx, handle), *old_next_block = block + block->val, /* newstart isn't necessarily properly aligned but it * needn't be since it's only dereferenced by the user code */ *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart), *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart), *new_next_block = (union buflib_data*)B_ALIGN_UP(newend), *new_block, metadata_size; /* growing is not supported */ if (new_next_block > old_next_block) return false; metadata_size.val = aligned_oldstart - block; /* update val and the handle table entry */ new_block = aligned_newstart - metadata_size.val; block[0].val = new_next_block - new_block; block[1].handle->alloc = newstart; if (block != new_block) { /* move metadata over, i.e. pointer to handle table entry and name * This is actually the point of no return. Data in the allocation is * being modified, and therefore we must successfully finish the shrink * operation */ memmove(new_block, block, metadata_size.val*sizeof(metadata_size)); /* mark the old block unallocated */ block->val = block - new_block; union buflib_data *freed_block = block, *free_before = ctx->first_free_block, *next_block = free_before; /* We need to find the block before the current one, to see if it is free * and can be merged with this one. */ while (next_block < freed_block) { free_before = next_block; next_block += abs(block->val); } /* If next_block == free_before, the above loop didn't go anywhere. * If it did, and the block before this one is empty, we can combine them. */ if (next_block == freed_block && next_block != free_before && free_before->val < 0) free_before->val += freed_block->val; else if (next_block == free_before) ctx->first_free_block = freed_block; /* We didn't handle size changes yet, assign block to the new one * the code below the wants block whether it changed or not */ block = new_block; } /* Now deal with size changes that create free blocks after the allocation */ if (old_next_block != new_next_block) { if (ctx->alloc_end == old_next_block) ctx->alloc_end = new_next_block; else if (old_next_block->val < 0) { /* enlarge next block by moving it up */ new_next_block->val = old_next_block->val - (old_next_block - new_next_block); } else if (old_next_block != new_next_block) { /* creating a hole */ /* must be negative to indicate being unallocated */ new_next_block->val = new_next_block - old_next_block; } /* update first_free_block for the newly created free space */ if (ctx->first_free_block > new_next_block) ctx->first_free_block = new_next_block; } /* if the handle is the one aquired with buflib_alloc_maximum() * unlock buflib_alloc() as part of the shrink */ if (ctx->handle_lock == handle) ctx->handle_lock = 0; return true; }
int buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name, struct buflib_callbacks *ops) { /* busy wait if there's a thread owning the lock */ while (ctx->handle_lock != 0) YIELD(); union buflib_data *handle, *block; size_t name_len = name ? B_ALIGN_UP(strlen(name)+1) : 0; bool last; /* This really is assigned a value before use */ int block_len; size += name_len; size = (size + sizeof(union buflib_data) - 1) / sizeof(union buflib_data) /* add 4 objects for alloc len, pointer to handle table entry and * name length, and the ops pointer */ + 4; handle_alloc: handle = handle_alloc(ctx); if (!handle) { /* If allocation has failed, and compaction has succeded, it may be * possible to get a handle by trying again. */ if (!ctx->compact && buflib_compact(ctx)) goto handle_alloc; else { /* first try to shrink the alloc before the handle table * to make room for new handles */ int handle = ctx->handle_table - ctx->last_handle; union buflib_data* last_block = handle_to_block(ctx, handle); struct buflib_callbacks* ops = last_block[2].ops; if (ops && ops->shrink_callback) { char *data = buflib_get_data(ctx, handle); unsigned hint = BUFLIB_SHRINK_POS_BACK | 10*sizeof(union buflib_data); if (ops->shrink_callback(handle, hint, data, (char*)(last_block+last_block->val)-data) == BUFLIB_CB_OK) { /* retry one more time */ goto handle_alloc; } } return 0; } } buffer_alloc: /* need to re-evaluate last before the loop because the last allocation * possibly made room in its front to fit this, so last would be wrong */ last = false; for (block = ctx->first_free_block;;block += block_len) { /* If the last used block extends all the way to the handle table, the * block "after" it doesn't have a header. Because of this, it's easier * to always find the end of allocation by saving a pointer, and always * calculate the free space at the end by comparing it to the * last_handle pointer. */ if(block == ctx->alloc_end) { last = true; block_len = ctx->last_handle - block; if ((size_t)block_len < size) block = NULL; break; } block_len = block->val; /* blocks with positive length are already allocated. */ if(block_len > 0) continue; block_len = -block_len; /* The search is first-fit, any fragmentation this causes will be * handled at compaction. */ if ((size_t)block_len >= size) break; } if (!block) { /* Try compacting if allocation failed */ if (buflib_compact_and_shrink(ctx, (size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK)) { goto buffer_alloc; } else { handle->val=1; handle_free(ctx, handle); return 0; } } /* Set up the allocated block, by marking the size allocated, and storing * a pointer to the handle. */ union buflib_data *name_len_slot; block->val = size; block[1].handle = handle; block[2].ops = ops ?: &default_callbacks; strcpy(block[3].name, name); name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len); name_len_slot->val = 1 + name_len/sizeof(union buflib_data); handle->alloc = (char*)(name_len_slot + 1); /* If we have just taken the first free block, the next allocation search * can save some time by starting after this block. */ if (block == ctx->first_free_block) ctx->first_free_block += size; block += size; /* alloc_end must be kept current if we're taking the last block. */ if (last) ctx->alloc_end = block; /* Only free blocks *before* alloc_end have tagged length. */ else if ((size_t)block_len > size) block->val = size - block_len; /* Return the handle index as a positive integer. */ return ctx->handle_table - handle; }