int __init vmm_host_vapool_init(virtual_addr_t base, virtual_size_t size, virtual_addr_t hkbase) { int rc; if ((hkbase < base) || ((base + size) <= hkbase)) { return VMM_EFAIL; } vpctrl.vapool_start = base; vpctrl.vapool_size = size; vpctrl.vapool_start &= ~VMM_PAGE_MASK; vpctrl.vapool_size &= ~VMM_PAGE_MASK; vpctrl.vapool_page_count = vpctrl.vapool_size >> VMM_PAGE_SHIFT; rc = buddy_allocator_init(&vpctrl.ba, (void *)hkbase, vmm_host_vapool_estimate_hksize(size), base, size, VAPOOL_MIN_BIN, VAPOOL_MAX_BIN); if (rc) { return rc; } return VMM_OK; }
static int heap_init(struct vmm_heap_control *heap, bool is_normal, const u32 size_kb, u32 mem_flags) { int rc = VMM_OK; memset(heap, 0, sizeof(*heap)); heap->heap_size = size_kb * 1024; heap->heap_start = (void *)vmm_host_alloc_pages( VMM_SIZE_TO_PAGE(heap->heap_size), mem_flags); if (!heap->heap_start) { return VMM_ENOMEM; } rc = vmm_host_va2pa((virtual_addr_t)heap->heap_start, &heap->heap_start_pa); if (rc) { goto fail_free_pages; } /* 12.5 percent for house-keeping */ heap->hk_size = (heap->heap_size) / 8; /* Always have book keeping area for * non-normal heaps in normal heap */ if (is_normal) { heap->hk_start = heap->heap_start; heap->mem_start = heap->heap_start + heap->hk_size; heap->mem_size = heap->heap_size - heap->hk_size; } else { heap->hk_start = vmm_malloc(heap->hk_size); if (!heap->hk_start) { rc = VMM_ENOMEM; goto fail_free_pages; } heap->mem_start = heap->heap_start; heap->mem_size = heap->heap_size; } rc = buddy_allocator_init(&heap->ba, heap->hk_start, heap->hk_size, (unsigned long)heap->mem_start, heap->mem_size, HEAP_MIN_BIN, HEAP_MAX_BIN); if (rc) { goto fail_free_pages; } return VMM_OK; fail_free_pages: vmm_host_free_pages((virtual_addr_t)heap->heap_start, VMM_SIZE_TO_PAGE(heap->heap_size)); return rc; }