/* Compact allocations and handle table, adjusting handle pointers as needed. * Return true if any space was freed or consolidated, false otherwise. */ static bool buflib_compact(struct buflib_context *ctx) { BDEBUGF("%s(): Compacting!\n", __func__); union buflib_data *first_free = ctx->first_free_block, *block; int shift = 0, len; /* Store the results of attempting to shrink the handle table */ bool ret = handle_table_shrink(ctx); for(block = first_free; block != ctx->alloc_end; block += len) { len = block->val; /* This block is free, add its length to the shift value */ if (len < 0) { shift += len; len = -len; continue; } /* attempt to fill any hole */ if (abs(ctx->first_free_block->val) > block->val) { intptr_t size = first_free->val; if (move_block(ctx, block, first_free - block)) { block->val *= -1; block = ctx->first_free_block; ctx->first_free_block += block->val; ctx->first_free_block->val = size + block->val; continue; } } /* attempt move the allocation by shift */ if (shift) { /* failing to move creates a hole, therefore mark this * block as not allocated anymore and move first_free_block up */ if (!move_block(ctx, block, shift)) { union buflib_data* hole = block + shift; hole->val = shift; if (ctx->first_free_block > hole) ctx->first_free_block = hole; shift = 0; } } } /* Move the end-of-allocation mark, and return true if any new space has * been freed. */ ctx->alloc_end += shift; /* only move first_free_block up if it wasn't already by a hole */ if (ctx->first_free_block > ctx->alloc_end) ctx->first_free_block = ctx->alloc_end; ctx->compact = true; return ret || shift; }
/* Compact allocations and handle table, adjusting handle pointers as needed. * Return true if any space was freed or consolidated, false otherwise. */ static bool buflib_compact(struct buflib_context *ctx) { BDEBUGF("%s(): Compacting!\n", __func__); union buflib_data *block, *hole = NULL; int shift = 0, len; /* Store the results of attempting to shrink the handle table */ bool ret = handle_table_shrink(ctx); /* compaction has basically two modes of operation: * 1) the buffer is nicely movable: In this mode, blocks can be simply * moved towards the beginning. Free blocks add to a shift value, * which is the amount to move. * 2) the buffer contains unmovable blocks: unmovable blocks create * holes and reset shift. Once a hole is found, we're trying to fill * holes first, moving by shift is the fallback. As the shift is reset, * this effectively splits the buffer into portions of movable blocks. * This mode cannot be used if no holes are found yet as it only works * when it moves blocks across the portions. On the other side, * moving by shift only works within the same portion * For simplicity only 1 hole at a time is considered */ for(block = find_first_free(ctx); block < ctx->alloc_end; block += len) { bool movable = true; /* cache result to avoid 2nd call to move_block */ len = block->val; /* This block is free, add its length to the shift value */ if (len < 0) { shift += len; len = -len; continue; } /* attempt to fill any hole */ if (hole && -hole->val >= len) { intptr_t hlen = -hole->val; if ((movable = move_block(ctx, block, hole - block))) { ret = true; /* Move was successful. The memory at block is now free */ block->val = -len; /* add its length to shift */ shift += -len; /* Reduce the size of the hole accordingly * but be careful to not overwrite an existing block */ if (hlen != len) { hole += len; hole->val = len - hlen; /* negative */ } else /* hole closed */ hole = NULL; continue; } } /* attempt move the allocation by shift */ if (shift) { union buflib_data* target_block = block + shift; if (!movable || !move_block(ctx, block, shift)) { /* free space before an unmovable block becomes a hole, * therefore mark this block free and track the hole */ target_block->val = shift; hole = target_block; shift = 0; } else ret = true; } } /* Move the end-of-allocation mark, and return true if any new space has * been freed. */ ctx->alloc_end += shift; ctx->compact = true; return ret || shift; }