void heap_insert(struct heap_t *heap, long long value, void *data) { int i; struct heap_elem_t tmp; /* grow heap */ if (heap->count == heap->size && !heap_grow(heap)) { heap->error = HEAP_ENOMEM; return; } /* insert element */ i = heap->count; heap->elem[i].value = value; heap->elem[i].data = data; heap->elem[i].time = heap->time++; while (i > 0 && heap_less_than(heap, i, PARENT(i))) { tmp = heap->elem[i]; heap->elem[i] = heap->elem[PARENT(i)]; heap->elem[PARENT(i)] = tmp; i = PARENT(i); } heap->count++; heap->error = 0; }
int heap_append( heap_t *heap_in, void *data_in ) { if( heap_in->alloc < heap_in->size + 1 ) heap_grow( heap_in, HEAP_SIZE_INC ); bcopy( data_in, (byte_t*) heap_in->start + heap_in->step * heap_in->size, heap_in->step ); ++heap_in->size; return 0; }
/** * Create a new heap structure. * @param heap_in The data structure * @param step_in The size of data in bytes * @param cmp_in Comparison function for the particular data type stored * @return */ int heap_init( heap_t *heap_in, int step_in, int (*cmp_in)(void*,void*) ) { heap_in->cmp = cmp_in; /* Set the order relation function */ heap_in->start = malloc( HEAP_SIZE_INC * step_in ); heap_in->step = step_in; heap_in->size = 0; heap_in->alloc = 0; heap_grow( heap_in, HEAP_SIZE_INC ); return 0; }
static int heap_set_len(struct ptr_heap *heap, size_t new_len) { int ret; ret = heap_grow(heap, new_len); if (unlikely(ret)) return ret; heap->len = new_len; return 0; }
static int heap_set_len(struct lttng_ptr_heap *heap, size_t new_len) { int ret; ret = heap_grow(heap, new_len); if (ret) return ret; heap->len = new_len; return 0; }
struct lispobj *heap_add(struct lispobj *obj) { if(heap->index >= heap->size) { heap_grow(); } heap->data[heap->index] = obj; heap->index++; return obj; }
int bt_heap_init(struct ptr_heap *heap, size_t alloc_len, int gt(void *a, void *b)) { heap->ptrs = NULL; heap->len = 0; heap->alloc_len = 0; heap->gt = gt; /* * Minimum size allocated is 1 entry to ensure memory allocation * never fails within bt_heap_replace_max. */ return heap_grow(heap, max_t(size_t, 1, alloc_len)); }
// Returns '1' if it handles the page fault, 0 if not. // Generally, returning '0' will cause a kernel panic. int vmm_page_fault(struct regs *r, void *cr2_value) { // Was this in the kernel's heap? If so, we'll map more pages into the heap. // Otherwise, panic. if ((cr2_value >= (void*)&_kernel_heap_start) && (cr2_value <= (void*)&_kernel_heap_end)) { heap_grow(r, cr2_value); return 1; } printf("Page fault for %p. Heap from %p to %p\n", cr2_value, (void*)&_kernel_heap_start, (void*)&_kernel_heap_end); return 0; }
/* * Returns the index into the heap array */ int heapInsert(HEAP heap,const DSKEY key,void *data) { Heap * h = (Heap*)heap; HeapElement * he; int i,pidx; HeapInternCmp cmp_func; DBG(debug("heapInsert(heap=%p,key=%p,data=%p)\n", heap,key,data)); if (!h) { XLOG(h); return -1; } if (h->hpMode == HEAP_MINIMIZE) cmp_func = heap_larger; else cmp_func = heap_smaller; /* 1. Make a new element */ he = heap_new_element(key,data); /* 2. Grow the heap if needed */ if (NEEDS2GROW(h) && heap_grow(h)) { LLOG(-1); return -1; } /* 3. Insert the new element into the heap */ i = HSIZE(h); pidx = HPARENT(i); while (i > 0 && cmp_func(h,HARRAY(h,pidx),he)) { if (h->hpChgFunc) h->hpChgFunc(HARRAY(h,pidx)->heData,i); HARRAY(h,i) = HARRAY(h,pidx); i = pidx; pidx = HPARENT(i); } HARRAY(h,i) = he; HSIZE(h)++; return i; }
int gh_heap_push(gh_heap_t *heap, gh_hnode_t *hnode) { gh_hnode_t **new_ptr; unsigned int new_index = heap->count; assert(!gh_hnode_is_valid(hnode)); if (new_index == heap->highwm) heap_grow(heap); hnode->index = new_index; new_ptr = hnode_ptr(heap, new_index); heap->count++; *new_ptr = hnode; hnode_up(heap, hnode); return 0; }
int gh_heap_init(gh_heap_t *heap, gh_heap_cmp_t cmp_op) { int rc; if (!cmp_op) return -1; heap->cmp_op = cmp_op; heap->count = 0; heap->highwm = 0; heap->slots = NULL; rc = heap_grow(heap); if (rc) gh_heap_free(heap); return rc; }
int heap_insert( heap_t *heap_in, void *data_in ) { int i,j,step = heap_in->step; byte_t *start = (byte_t*) heap_in->start; if( heap_in->alloc < heap_in->size + 1 ) heap_grow( heap_in, HEAP_SIZE_INC ); i = heap_in->size; while( i > 0 ) { j = heap_parent( i ); if( heap_in->cmp( start + j * step, data_in ) == 1 ) break; bcopy( start + j * step, start + i * step, step ); i = j; /* Take a step up the tree toward the root */ } bcopy( data_in, start + i * step, step ); ++heap_in->size; return 0; }
void *heap_alloc(size_t size, unsigned int alignment) { void *ptr; #if DEBUG_HEAP size_t original_size = size; #endif LTRACEF("size %zd, align %d\n", size, alignment); // deal with the pending free list if (unlikely(!list_is_empty(&theheap.delayed_free_list))) { heap_free_delayed_list(); } // alignment must be power of 2 if (alignment & (alignment - 1)) return NULL; // we always put a size field + base pointer + magic in front of the allocation size += sizeof(struct alloc_struct_begin); #if DEBUG_HEAP size += PADDING_SIZE; #endif // make sure we allocate at least the size of a struct free_heap_chunk so that // when we free it, we can create a struct free_heap_chunk struct and stick it // in the spot if (size < sizeof(struct free_heap_chunk)) size = sizeof(struct free_heap_chunk); // round up size to a multiple of native pointer size size = ROUNDUP(size, sizeof(void *)); // deal with nonzero alignments if (alignment > 0) { if (alignment < 16) alignment = 16; // add alignment for worst case fit size += alignment; } #if WITH_KERNEL_VM int retry_count = 0; retry: #endif mutex_acquire(&theheap.lock); // walk through the list ptr = NULL; struct free_heap_chunk *chunk; list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) { DEBUG_ASSERT((chunk->len % sizeof(void *)) == 0); // len should always be a multiple of pointer size // is it big enough to service our allocation? if (chunk->len >= size) { ptr = chunk; // remove it from the list struct list_node *next_node = list_next(&theheap.free_list, &chunk->node); list_delete(&chunk->node); if (chunk->len > size + sizeof(struct free_heap_chunk)) { // there's enough space in this chunk to create a new one after the allocation struct free_heap_chunk *newchunk = heap_create_free_chunk((uint8_t *)ptr + size, chunk->len - size, true); // truncate this chunk chunk->len -= chunk->len - size; // add the new one where chunk used to be if (next_node) list_add_before(next_node, &newchunk->node); else list_add_tail(&theheap.free_list, &newchunk->node); } // the allocated size is actually the length of this chunk, not the size requested DEBUG_ASSERT(chunk->len >= size); size = chunk->len; #if DEBUG_HEAP memset(ptr, ALLOC_FILL, size); #endif ptr = (void *)((addr_t)ptr + sizeof(struct alloc_struct_begin)); // align the output if requested if (alignment > 0) { ptr = (void *)ROUNDUP((addr_t)ptr, (addr_t)alignment); } struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr; as--; #if LK_DEBUGLEVEL > 1 as->magic = HEAP_MAGIC; #endif as->ptr = (void *)chunk; as->size = size; theheap.remaining -= size; if (theheap.remaining < theheap.low_watermark) { theheap.low_watermark = theheap.remaining; } #if DEBUG_HEAP as->padding_start = ((uint8_t *)ptr + original_size); as->padding_size = (((addr_t)chunk + size) - ((addr_t)ptr + original_size)); // printf("padding start %p, size %u, chunk %p, size %u\n", as->padding_start, as->padding_size, chunk, size); memset(as->padding_start, PADDING_FILL, as->padding_size); #endif break; } } mutex_release(&theheap.lock); #if WITH_KERNEL_VM /* try to grow the heap if we can */ if (ptr == NULL && retry_count == 0) { size_t growby = MAX(HEAP_GROW_SIZE, ROUNDUP(size, PAGE_SIZE)); ssize_t err = heap_grow(growby); if (err >= 0) { retry_count++; goto retry; } } #endif LTRACEF("returning ptr %p\n", ptr); return ptr; }
/* check if enough room before adding, if not call heap_grow */ void heap_add(heap* h, int x) { if (h->len+1 == h->capacity) heap_grow(h); h->data[h->len] = x; h->len++; }