int __init vmm_mbufpool_init(void) { u32 slab, b_size, b_count, epool_sz; memset(&mbpctrl, 0, sizeof(mbpctrl)); /* Create mbuf pool */ b_size = sizeof(struct vmm_mbuf); b_count = CONFIG_NET_MBUF_POOL_SIZE; mbpctrl.mpool = mempool_ram_create(b_size, VMM_SIZE_TO_PAGE(b_size * b_count), VMM_MEMORY_FLAGS_NORMAL); if (!mbpctrl.mpool) { return VMM_ENOMEM; } /* Create ext slab pools */ epool_sz = (CONFIG_NET_MBUF_EXT_POOL_SIZE_KB * 1024); for (slab = 0; slab < EPOOL_SLAB_COUNT; slab++) { b_size = epool_slab_buf_size(slab); b_count = epool_slab_buf_count(epool_sz, slab); if (b_count && b_size) { mbpctrl.epool_slabs[slab] = mempool_ram_create(b_size, VMM_SIZE_TO_PAGE(b_size * b_count), VMM_MEMORY_FLAGS_NORMAL); } else { mbpctrl.epool_slabs[slab] = NULL; } } return VMM_OK; }
static int heap_init(struct vmm_heap_control *heap, bool is_normal, const u32 size_kb, u32 mem_flags) { int rc = VMM_OK; memset(heap, 0, sizeof(*heap)); heap->heap_size = size_kb * 1024; heap->heap_start = (void *)vmm_host_alloc_pages( VMM_SIZE_TO_PAGE(heap->heap_size), mem_flags); if (!heap->heap_start) { return VMM_ENOMEM; } rc = vmm_host_va2pa((virtual_addr_t)heap->heap_start, &heap->heap_start_pa); if (rc) { goto fail_free_pages; } /* 12.5 percent for house-keeping */ heap->hk_size = (heap->heap_size) / 8; /* Always have book keeping area for * non-normal heaps in normal heap */ if (is_normal) { heap->hk_start = heap->heap_start; heap->mem_start = heap->heap_start + heap->hk_size; heap->mem_size = heap->heap_size - heap->hk_size; } else { heap->hk_start = vmm_malloc(heap->hk_size); if (!heap->hk_start) { rc = VMM_ENOMEM; goto fail_free_pages; } heap->mem_start = heap->heap_start; heap->mem_size = heap->heap_size; } rc = buddy_allocator_init(&heap->ba, heap->hk_start, heap->hk_size, (unsigned long)heap->mem_start, heap->mem_size, HEAP_MIN_BIN, HEAP_MAX_BIN); if (rc) { goto fail_free_pages; } return VMM_OK; fail_free_pages: vmm_host_free_pages((virtual_addr_t)heap->heap_start, VMM_SIZE_TO_PAGE(heap->heap_size)); return rc; }
static void *__arm_lpae_alloc_pages(size_t size, struct io_pgtable_cfg *cfg) { size_t p; virtual_addr_t pages = vmm_host_alloc_pages(VMM_SIZE_TO_PAGE(size), VMM_MEMORY_FLAGS_NORMAL_NOCACHE); for (p = 0; p < (VMM_SIZE_TO_PAGE(size) * VMM_PAGE_SIZE); p += 8) { *(u64 *)(pages + p) = 0x0; } return (!pages) ? NULL : (void *)pages; }
int arch_vcpu_init(struct vmm_vcpu *vcpu) { virtual_addr_t sp_exec; /* First time allocate exception stack */ if (!vcpu->reset_count) { sp_exec = vmm_pagepool_alloc(VMM_PAGEPOOL_NORMAL, VMM_SIZE_TO_PAGE(CONFIG_IRQ_STACK_SIZE)); if (!sp_exec) { return VMM_ENOMEM; } sp_exec += CONFIG_IRQ_STACK_SIZE; } else { sp_exec = riscv_regs(vcpu)->sp_exec; } /* For both Orphan & Normal VCPUs */ memset(riscv_regs(vcpu), 0, sizeof(arch_regs_t)); riscv_regs(vcpu)->sepc = vcpu->start_pc; riscv_regs(vcpu)->sstatus = SR_SPP|SR_SPIE; /* TODO: */ riscv_regs(vcpu)->sp = vcpu->stack_va + (vcpu->stack_sz - ARCH_CACHE_LINE_SIZE); riscv_regs(vcpu)->sp = riscv_regs(vcpu)->sp & ~0x7; riscv_regs(vcpu)->sp_exec = sp_exec; /* TODO: For Normal VCPUs */ return VMM_OK; }
int versatile_clcd_setup(struct clcd_fb *fb, unsigned long framesize) { int rc; u32 use_dma, val[2]; void *screen_base; unsigned long smem_len; physical_addr_t smem_pa; if (!fb->dev->node) { return VMM_EINVALID; } if (vmm_devtree_read_u32(fb->dev->node, "use_dma", &use_dma)) { use_dma = 0; } if (use_dma) { smem_len = framesize; screen_base = (void *)vmm_host_alloc_pages( VMM_SIZE_TO_PAGE(smem_len), VMM_MEMORY_READABLE | VMM_MEMORY_WRITEABLE); if (!screen_base) { vmm_printf("CLCD: unable to alloc framebuffer\n"); return VMM_ENOMEM; } rc = vmm_host_va2pa((virtual_addr_t)screen_base, &smem_pa); if (rc) { return rc; } } else { rc = vmm_devtree_read_u32_array(fb->dev->node, "framebuffer", val, 2); if (rc) { return rc; } smem_pa = val[0]; smem_len = val[1]; if (smem_len < framesize) { return VMM_ENOMEM; } screen_base = (void *)vmm_host_iomap(smem_pa, smem_len); if (!screen_base) { vmm_printf("CLCD: unable to map framebuffer\n"); return VMM_ENOMEM; } } fb->fb.screen_base = screen_base; fb->fb.fix.smem_start = smem_pa; fb->fb.fix.smem_len = smem_len; return 0; }
physical_addr_t cpu_create_vcpu_intercept_table(size_t size, virtual_addr_t *tbl_vaddr) { physical_addr_t phys = 0; virtual_addr_t vaddr = vmm_host_alloc_pages(VMM_SIZE_TO_PAGE(size), VMM_MEMORY_FLAGS_NORMAL); if (vmm_host_va2pa(vaddr, &phys) != VMM_OK) return 0; memset((void *)vaddr, 0x00, size); *tbl_vaddr = vaddr; return phys; }
int arch_vcpu_deinit(struct vmm_vcpu *vcpu) { virtual_addr_t sp_exec = riscv_regs(vcpu)->sp_exec - CONFIG_IRQ_STACK_SIZE; /* TODO: For Normal VCPUs */ /* For both Orphan & Normal VCPUs */ /* Free-up excepiton stack */ vmm_pagepool_free(VMM_PAGEPOOL_NORMAL, sp_exec, VMM_SIZE_TO_PAGE(CONFIG_IRQ_STACK_SIZE)); /* Clear arch registers */ memset(riscv_regs(vcpu), 0, sizeof(arch_regs_t)); return VMM_OK; }
struct mempool *mempool_create(u32 buf_size, u32 buf_count) { u32 b; virtual_addr_t va; struct mempool *mp; mp = vmm_zalloc(sizeof(struct mempool)); if (!mp) { return NULL; } mp->f = fifo_alloc(sizeof(virtual_addr_t), buf_count); if (!mp->f) { vmm_free(mp); return NULL; } mp->buf_count = buf_count; mp->buf_size = buf_size; mp->page_count = VMM_SIZE_TO_PAGE(buf_size * buf_count); mp->page_base = vmm_host_alloc_pages(mp->page_count, VMM_MEMORY_FLAGS_NORMAL); if (!mp->page_base) { fifo_free(mp->f); vmm_free(mp); return NULL; } for (b = 0; b < mp->buf_count; b++) { va = mp->page_base + b * buf_size; fifo_enqueue(mp->f, &va, FALSE); } return mp; }
int arch_vcpu_init(struct vmm_vcpu *vcpu) { int rc = VMM_OK; const char *attr; virtual_addr_t sp_exec; /* First time allocate exception stack */ if (!vcpu->reset_count) { sp_exec = vmm_pagepool_alloc(VMM_PAGEPOOL_NORMAL, VMM_SIZE_TO_PAGE(CONFIG_IRQ_STACK_SIZE)); if (!sp_exec) { return VMM_ENOMEM; } sp_exec += CONFIG_IRQ_STACK_SIZE; } else { sp_exec = riscv_regs(vcpu)->sp_exec; } /* For both Orphan & Normal VCPUs */ memset(riscv_regs(vcpu), 0, sizeof(arch_regs_t)); riscv_regs(vcpu)->sepc = vcpu->start_pc; riscv_regs(vcpu)->sstatus = SSTATUS_SPP | SSTATUS_SPIE; /* TODO: */ riscv_regs(vcpu)->sp = vcpu->stack_va + (vcpu->stack_sz - ARCH_CACHE_LINE_SIZE); riscv_regs(vcpu)->sp = riscv_regs(vcpu)->sp & ~0x7; riscv_regs(vcpu)->sp_exec = sp_exec; riscv_regs(vcpu)->hstatus = 0; /* For Orphan VCPUs we are done */ if (!vcpu->is_normal) { return VMM_OK; } /* Following initialization for normal VCPUs only */ rc = vmm_devtree_read_string(vcpu->node, VMM_DEVTREE_COMPATIBLE_ATTR_NAME, &attr); if (rc) { goto done; } #if __riscv_xlen == 64 if (strcmp(attr, "riscv64,generic") != 0) { #elif __riscv_xlen == 32 if (strcmp(attr, "riscv32,generic") != 0) { #else #error "Unexpected __riscv_xlen" #endif rc = VMM_EINVALID; goto done; } /* Update HSTATUS */ riscv_regs(vcpu)->hstatus |= HSTATUS_SP2V; riscv_regs(vcpu)->hstatus |= HSTATUS_SP2P; riscv_regs(vcpu)->hstatus |= HSTATUS_SPV; /* First time initialization of private context */ if (!vcpu->reset_count) { /* Alloc private context */ vcpu->arch_priv = vmm_zalloc(sizeof(struct riscv_priv)); if (!vcpu->arch_priv) { rc = VMM_ENOMEM; goto done; } } /* Update BS<xyz> */ riscv_priv(vcpu)->bsstatus = 0; /* TODO: ??? */ riscv_priv(vcpu)->bsie = 0; riscv_priv(vcpu)->bstvec = 0; riscv_priv(vcpu)->bsscratch = 0; riscv_priv(vcpu)->bsepc = 0; riscv_priv(vcpu)->bscause = 0; riscv_priv(vcpu)->bstval = 0; riscv_priv(vcpu)->bsip = 0; riscv_priv(vcpu)->bsatp = 0; /* Update HIDELEG */ riscv_priv(vcpu)->hideleg = 0; riscv_priv(vcpu)->hideleg |= SIP_SSIP; riscv_priv(vcpu)->hideleg |= SIP_STIP; riscv_priv(vcpu)->hideleg |= SIP_SEIP; /* Update HEDELEG */ riscv_priv(vcpu)->hedeleg = 0; riscv_priv(vcpu)->hedeleg |= (1U << CAUSE_MISALIGNED_FETCH); riscv_priv(vcpu)->hedeleg |= (1U << CAUSE_BREAKPOINT); riscv_priv(vcpu)->hedeleg |= (1U << CAUSE_USER_ECALL); riscv_priv(vcpu)->hedeleg |= (1U << CAUSE_FETCH_PAGE_FAULT); riscv_priv(vcpu)->hedeleg |= (1U << CAUSE_LOAD_PAGE_FAULT); riscv_priv(vcpu)->hedeleg |= (1U << CAUSE_STORE_PAGE_FAULT); done: return rc; } int arch_vcpu_deinit(struct vmm_vcpu *vcpu) { virtual_addr_t sp_exec = riscv_regs(vcpu)->sp_exec - CONFIG_IRQ_STACK_SIZE; /* For both Orphan & Normal VCPUs */ /* Free-up excepiton stack */ vmm_pagepool_free(VMM_PAGEPOOL_NORMAL, sp_exec, VMM_SIZE_TO_PAGE(CONFIG_IRQ_STACK_SIZE)); /* Clear arch registers */ memset(riscv_regs(vcpu), 0, sizeof(arch_regs_t)); /* For Orphan VCPUs do nothing else */ if (!vcpu->is_normal) { return VMM_OK; } /* Free private context */ vmm_free(vcpu->arch_priv); vcpu->arch_priv = NULL; return VMM_OK; }
int cpu_free_vcpu_intercept_table(virtual_addr_t vaddr, size_t size) { return vmm_host_free_pages(vaddr, VMM_SIZE_TO_PAGE(size)); }
void versatile_clcd_remove(struct clcd_fb *fb) { vmm_host_free_pages((virtual_addr_t)fb->fb.screen_base, VMM_SIZE_TO_PAGE(fb->fb.fix.smem_len)); }
static void __arm_lpae_free_pages(void *pages, size_t size, struct io_pgtable_cfg *cfg) { vmm_host_free_pages((virtual_addr_t)pages, VMM_SIZE_TO_PAGE(size)); }