int versatile_clcd_setup(struct clcd_fb *fb, unsigned long framesize) { int rc; u32 use_dma, val[2]; void *screen_base; unsigned long smem_len; physical_addr_t smem_pa; if (!fb->dev->node) { return VMM_EINVALID; } if (vmm_devtree_read_u32(fb->dev->node, "use_dma", &use_dma)) { use_dma = 0; } if (use_dma) { smem_len = framesize; screen_base = (void *)vmm_host_alloc_pages( VMM_SIZE_TO_PAGE(smem_len), VMM_MEMORY_READABLE | VMM_MEMORY_WRITEABLE); if (!screen_base) { vmm_printf("CLCD: unable to alloc framebuffer\n"); return VMM_ENOMEM; } rc = vmm_host_va2pa((virtual_addr_t)screen_base, &smem_pa); if (rc) { return rc; } } else { rc = vmm_devtree_read_u32_array(fb->dev->node, "framebuffer", val, 2); if (rc) { return rc; } smem_pa = val[0]; smem_len = val[1]; if (smem_len < framesize) { return VMM_ENOMEM; } screen_base = (void *)vmm_host_iomap(smem_pa, smem_len); if (!screen_base) { vmm_printf("CLCD: unable to map framebuffer\n"); return VMM_ENOMEM; } } fb->fb.screen_base = screen_base; fb->fb.fix.smem_start = smem_pa; fb->fb.fix.smem_len = smem_len; return 0; }
static int heap_init(struct vmm_heap_control *heap, bool is_normal, const u32 size_kb, u32 mem_flags) { int rc = VMM_OK; memset(heap, 0, sizeof(*heap)); heap->heap_size = size_kb * 1024; heap->heap_start = (void *)vmm_host_alloc_pages( VMM_SIZE_TO_PAGE(heap->heap_size), mem_flags); if (!heap->heap_start) { return VMM_ENOMEM; } rc = vmm_host_va2pa((virtual_addr_t)heap->heap_start, &heap->heap_start_pa); if (rc) { goto fail_free_pages; } /* 12.5 percent for house-keeping */ heap->hk_size = (heap->heap_size) / 8; /* Always have book keeping area for * non-normal heaps in normal heap */ if (is_normal) { heap->hk_start = heap->heap_start; heap->mem_start = heap->heap_start + heap->hk_size; heap->mem_size = heap->heap_size - heap->hk_size; } else { heap->hk_start = vmm_malloc(heap->hk_size); if (!heap->hk_start) { rc = VMM_ENOMEM; goto fail_free_pages; } heap->mem_start = heap->heap_start; heap->mem_size = heap->heap_size; } rc = buddy_allocator_init(&heap->ba, heap->hk_start, heap->hk_size, (unsigned long)heap->mem_start, heap->mem_size, HEAP_MIN_BIN, HEAP_MAX_BIN); if (rc) { goto fail_free_pages; } return VMM_OK; fail_free_pages: vmm_host_free_pages((virtual_addr_t)heap->heap_start, VMM_SIZE_TO_PAGE(heap->heap_size)); return rc; }
static void *__arm_lpae_alloc_pages(size_t size, struct io_pgtable_cfg *cfg) { size_t p; virtual_addr_t pages = vmm_host_alloc_pages(VMM_SIZE_TO_PAGE(size), VMM_MEMORY_FLAGS_NORMAL_NOCACHE); for (p = 0; p < (VMM_SIZE_TO_PAGE(size) * VMM_PAGE_SIZE); p += 8) { *(u64 *)(pages + p) = 0x0; } return (!pages) ? NULL : (void *)pages; }
physical_addr_t cpu_create_vcpu_intercept_table(size_t size, virtual_addr_t *tbl_vaddr) { physical_addr_t phys = 0; virtual_addr_t vaddr = vmm_host_alloc_pages(VMM_SIZE_TO_PAGE(size), VMM_MEMORY_FLAGS_NORMAL); if (vmm_host_va2pa(vaddr, &phys) != VMM_OK) return 0; memset((void *)vaddr, 0x00, size); *tbl_vaddr = vaddr; return phys; }
struct mempool *mempool_ram_create(u32 entity_size, u32 page_count, u32 mem_flags) { u32 e; virtual_addr_t va; struct mempool *mp; if (!entity_size || ((VMM_PAGE_SIZE * page_count) < entity_size)) { return NULL; } mp = vmm_zalloc(sizeof(struct mempool)); if (!mp) { return NULL; } mp->type = MEMPOOL_TYPE_RAM; mp->entity_size = entity_size; mp->entity_count = udiv64((VMM_PAGE_SIZE * page_count), entity_size); mp->f = fifo_alloc(sizeof(virtual_addr_t), mp->entity_count); if (!mp->f) { vmm_free(mp); return NULL; } mp->entity_base = vmm_host_alloc_pages(page_count, mem_flags); if (!mp->entity_base) { fifo_free(mp->f); vmm_free(mp); return NULL; } mp->d.ram.page_count = page_count; mp->d.ram.mem_flags = mem_flags; for (e = 0; e < mp->entity_count; e++) { va = mp->entity_base + e * entity_size; fifo_enqueue(mp->f, &va, FALSE); } return mp; }
int arch_guest_init(struct vmm_guest *guest) { int rc; u32 ovect_flags; virtual_addr_t ovect_va; struct cpu_page pg; if (!guest->reset_count) { guest->arch_priv = vmm_malloc(sizeof(arm_guest_priv_t)); if (!guest->arch_priv) { return VMM_EFAIL; } ovect_flags = 0x0; ovect_flags |= VMM_MEMORY_READABLE; ovect_flags |= VMM_MEMORY_WRITEABLE; ovect_flags |= VMM_MEMORY_CACHEABLE; ovect_flags |= VMM_MEMORY_EXECUTABLE; ovect_va = vmm_host_alloc_pages(1, ovect_flags); if (!ovect_va) { return VMM_EFAIL; } if ((rc = cpu_mmu_get_reserved_page(ovect_va, &pg))) { return rc; } if ((rc = cpu_mmu_unmap_reserved_page(&pg))) { return rc; } #if defined(CONFIG_ARMV5) pg.ap = TTBL_AP_SRW_UR; #else if (pg.ap == TTBL_AP_SR_U) { pg.ap = TTBL_AP_SR_UR; } else { pg.ap = TTBL_AP_SRW_UR; } #endif if ((rc = cpu_mmu_map_reserved_page(&pg))) { return rc; } arm_guest_priv(guest)->ovect = (u32 *)ovect_va; } return VMM_OK; }
struct mempool *mempool_create(u32 buf_size, u32 buf_count) { u32 b; virtual_addr_t va; struct mempool *mp; mp = vmm_zalloc(sizeof(struct mempool)); if (!mp) { return NULL; } mp->f = fifo_alloc(sizeof(virtual_addr_t), buf_count); if (!mp->f) { vmm_free(mp); return NULL; } mp->buf_count = buf_count; mp->buf_size = buf_size; mp->page_count = VMM_SIZE_TO_PAGE(buf_size * buf_count); mp->page_base = vmm_host_alloc_pages(mp->page_count, VMM_MEMORY_FLAGS_NORMAL); if (!mp->page_base) { fifo_free(mp->f); vmm_free(mp); return NULL; } for (b = 0; b < mp->buf_count; b++) { va = mp->page_base + b * buf_size; fifo_enqueue(mp->f, &va, FALSE); } return mp; }