/** * Find a range of free bits. This is useful for allocating * a set of contiguous pages. * * @todo Optimize * * @return On success 0 is returned and \a start_bit is set * to appropriate value. On error 1 is returned. */ static int find_free_bits_range(unsigned count, unsigned *start_bit) { unsigned first_free_bit; unsigned l_start_bit = 0; bool success = FALSE; unsigned i, b; unsigned remaining_bits = count - 1; for (; (l_start_bit + remaining_bits) <= LAST_BIT_NO;) { if (0 == find_first_free(&first_free_bit, l_start_bit)) { /* Now check whether the following 'remaining_bits' are also free */ for (b = first_free_bit + 1, i = 0; (i < remaining_bits); i++, b++) { if (fp_getbit(b)) break; } if (i == remaining_bits) { success = TRUE; break; } } else break; l_start_bit = first_free_bit + i; } if (success) { *start_bit = first_free_bit; return 0; } else return 1; }
/* Return the amount of unallocated memory in bytes (even if not contiguous) */ size_t buflib_available(struct buflib_context* ctx) { union buflib_data *this; size_t free_space = 0; /* now look if there's free in holes */ for(this = find_first_free(ctx); this < ctx->alloc_end; this += abs(this->val)) { if (this->val < 0) { free_space += -this->val; continue; } } free_space *= sizeof(union buflib_data); /* make it bytes */ free_space += free_space_at_end(ctx); return free_space; }
/* Return the maximum allocatable contiguous memory in bytes */ size_t buflib_allocatable(struct buflib_context* ctx) { union buflib_data *this; size_t free_space = 0, max_free_space = 0; /* make sure buffer is as contiguous as possible */ if (!ctx->compact) buflib_compact(ctx); /* now look if there's free in holes */ for(this = find_first_free(ctx); this < ctx->alloc_end; this += abs(this->val)) { if (this->val < 0) { free_space += -this->val; continue; } /* an unmovable section resets the count as free space * can't be contigous */ if (!IS_MOVABLE(this)) { if (max_free_space < free_space) max_free_space = free_space; free_space = 0; } } /* select the best */ max_free_space = MAX(max_free_space, free_space); max_free_space *= sizeof(union buflib_data); max_free_space = MAX(max_free_space, free_space_at_end(ctx)); if (max_free_space > 0) return max_free_space; else return 0; }
/* Request any unit. */ static int anonymous_allocation(okl4_bitmap_allocator_t * allocator, okl4_bitmap_item_t * item) { okl4_word_t i; int pos; okl4_word_t bitmap_words = (allocator->size + OKL4_WORD_T_BIT - 1) / OKL4_WORD_T_BIT; /* Search for a free unit. */ for (i = allocator->pos_guess; i < bitmap_words; i++) { if (allocator->data[i] != ~(okl4_word_t)0) { goto found; } } /* Other possible locations for availble units. */ for (i = 0; i < allocator->pos_guess; i++) { if (allocator->data[i] != ~(okl4_word_t)0) { goto found; } } return OKL4_ALLOC_EXHAUSTED; found: /* Update pos_guess to point to a likely location for free units. */ allocator->pos_guess = i; pos = find_first_free(allocator->data[i]); assert(pos >= 0); /* Allocate and return. */ OKL4_SET_BIT(allocator->data[i], pos); item->unit = (i * OKL4_WORD_T_BIT) + (okl4_word_t)pos + allocator->base; return OKL4_OK; }
int buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name, struct buflib_callbacks *ops) { union buflib_data *handle, *block; size_t name_len = name ? B_ALIGN_UP(strlen(name)+1) : 0; bool last; /* This really is assigned a value before use */ int block_len; size += name_len; size = (size + sizeof(union buflib_data) - 1) / sizeof(union buflib_data) /* add 4 objects for alloc len, pointer to handle table entry and * name length, and the ops pointer */ + 4; handle_alloc: handle = handle_alloc(ctx); if (!handle) { /* If allocation has failed, and compaction has succeded, it may be * possible to get a handle by trying again. */ union buflib_data* last_block = find_block_before(ctx, ctx->alloc_end, false); struct buflib_callbacks* ops = last_block[2].ops; unsigned hints = 0; if (!ops || !ops->shrink_callback) { /* the last one isn't shrinkable * make room in front of a shrinkable and move this alloc */ hints = BUFLIB_SHRINK_POS_FRONT; hints |= last_block->val * sizeof(union buflib_data); } else if (ops && ops->shrink_callback) { /* the last is shrinkable, make room for handles directly */ hints = BUFLIB_SHRINK_POS_BACK; hints |= 16*sizeof(union buflib_data); } /* buflib_compact_and_shrink() will compact and move last_block() * if possible */ if (buflib_compact_and_shrink(ctx, hints)) goto handle_alloc; return -1; } buffer_alloc: /* need to re-evaluate last before the loop because the last allocation * possibly made room in its front to fit this, so last would be wrong */ last = false; for (block = find_first_free(ctx);;block += block_len) { /* If the last used block extends all the way to the handle table, the * block "after" it doesn't have a header. Because of this, it's easier * to always find the end of allocation by saving a pointer, and always * calculate the free space at the end by comparing it to the * last_handle pointer. */ if(block == ctx->alloc_end) { last = true; block_len = ctx->last_handle - block; if ((size_t)block_len < size) block = NULL; break; } block_len = block->val; /* blocks with positive length are already allocated. */ if(block_len > 0) continue; block_len = -block_len; /* The search is first-fit, any fragmentation this causes will be * handled at compaction. */ if ((size_t)block_len >= size) break; } if (!block) { /* Try compacting if allocation failed */ unsigned hint = BUFLIB_SHRINK_POS_FRONT | ((size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK); if (buflib_compact_and_shrink(ctx, hint)) { goto buffer_alloc; } else { handle->val=1; handle_free(ctx, handle); return -2; } } /* Set up the allocated block, by marking the size allocated, and storing * a pointer to the handle. */ union buflib_data *name_len_slot; block->val = size; block[1].handle = handle; block[2].ops = ops; strcpy(block[3].name, name); name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len); name_len_slot->val = 1 + name_len/sizeof(union buflib_data); handle->alloc = (char*)(name_len_slot + 1); block += size; /* alloc_end must be kept current if we're taking the last block. */ if (last) ctx->alloc_end = block; /* Only free blocks *before* alloc_end have tagged length. */ else if ((size_t)block_len > size) block->val = size - block_len; /* Return the handle index as a positive integer. */ return ctx->handle_table - handle; }
/* Compact allocations and handle table, adjusting handle pointers as needed. * Return true if any space was freed or consolidated, false otherwise. */ static bool buflib_compact(struct buflib_context *ctx) { BDEBUGF("%s(): Compacting!\n", __func__); union buflib_data *block, *hole = NULL; int shift = 0, len; /* Store the results of attempting to shrink the handle table */ bool ret = handle_table_shrink(ctx); /* compaction has basically two modes of operation: * 1) the buffer is nicely movable: In this mode, blocks can be simply * moved towards the beginning. Free blocks add to a shift value, * which is the amount to move. * 2) the buffer contains unmovable blocks: unmovable blocks create * holes and reset shift. Once a hole is found, we're trying to fill * holes first, moving by shift is the fallback. As the shift is reset, * this effectively splits the buffer into portions of movable blocks. * This mode cannot be used if no holes are found yet as it only works * when it moves blocks across the portions. On the other side, * moving by shift only works within the same portion * For simplicity only 1 hole at a time is considered */ for(block = find_first_free(ctx); block < ctx->alloc_end; block += len) { bool movable = true; /* cache result to avoid 2nd call to move_block */ len = block->val; /* This block is free, add its length to the shift value */ if (len < 0) { shift += len; len = -len; continue; } /* attempt to fill any hole */ if (hole && -hole->val >= len) { intptr_t hlen = -hole->val; if ((movable = move_block(ctx, block, hole - block))) { ret = true; /* Move was successful. The memory at block is now free */ block->val = -len; /* add its length to shift */ shift += -len; /* Reduce the size of the hole accordingly * but be careful to not overwrite an existing block */ if (hlen != len) { hole += len; hole->val = len - hlen; /* negative */ } else /* hole closed */ hole = NULL; continue; } } /* attempt move the allocation by shift */ if (shift) { union buflib_data* target_block = block + shift; if (!movable || !move_block(ctx, block, shift)) { /* free space before an unmovable block becomes a hole, * therefore mark this block free and track the hole */ target_block->val = shift; hole = target_block; shift = 0; } else ret = true; } } /* Move the end-of-allocation mark, and return true if any new space has * been freed. */ ctx->alloc_end += shift; ctx->compact = true; return ret || shift; }