Exemplo n.º 1
0
int __init arch_smp_prepare_cpus(unsigned int max_cpus)
{
	int i, rc;
	physical_addr_t _start_secondary_pa;

	/* Get physical address secondary startup code */
	rc = vmm_host_va2pa((virtual_addr_t)&_start_secondary, 
			    &_start_secondary_pa);
	if (rc) {
		return rc;
	}

	/* Update the cpu_present bitmap */
	for (i = 0; i < max_cpus; i++) {
		vmm_set_cpu_present(i, TRUE);
	}

	if (scu_base) {
		/* Enable snooping through SCU */
		scu_enable((void *)scu_base);
	}

	if (pmu_base) {
		/* Write the entry address for the secondary cpus */
		vmm_writel((u32)_start_secondary_pa, (void *)pmu_base + 0x814);
	}

	return VMM_OK;
}
Exemplo n.º 2
0
static int __init scu_cpu_prepare(unsigned int cpu)
{
	int rc;
	physical_addr_t _start_secondary_pa;

	/* Get physical address secondary startup code */
	rc = vmm_host_va2pa((virtual_addr_t)&_start_secondary_nopen,
			    &_start_secondary_pa);
	if (rc) {
		return rc;
	}

	/* Enable snooping through SCU */
	if (scu_base) {
		scu_enable((void *)scu_base);
	}

	/* Write to clear address */
	if (clear_addr[cpu]) {
		vmm_writel(~0x0, (void *)clear_addr[cpu]);
	}

	/* Write to release address */
	if (release_addr[cpu]) {
		vmm_writel((u32)_start_secondary_pa,
					(void *)release_addr[cpu]);
	}

	return VMM_OK;
}
Exemplo n.º 3
0
int versatile_clcd_setup(struct clcd_fb *fb, unsigned long framesize)
{
	int rc;
	u32 use_dma, val[2];
	void *screen_base;
	unsigned long smem_len;
	physical_addr_t smem_pa;

	if (!fb->dev->node) {
		return VMM_EINVALID;
	}

	if (vmm_devtree_read_u32(fb->dev->node, "use_dma", &use_dma)) {
		use_dma = 0;
	}
	
	if (use_dma) {
		smem_len = framesize;

		screen_base = (void *)vmm_host_alloc_pages(
				VMM_SIZE_TO_PAGE(smem_len),
				VMM_MEMORY_READABLE | VMM_MEMORY_WRITEABLE);
		if (!screen_base) {
			vmm_printf("CLCD: unable to alloc framebuffer\n");
			return VMM_ENOMEM;
		}

		rc = vmm_host_va2pa((virtual_addr_t)screen_base, &smem_pa);
		if (rc) {
			return rc;
		}
	} else {
		rc = vmm_devtree_read_u32_array(fb->dev->node,
						"framebuffer", val, 2);
		if (rc) {
			return rc;
		}

		smem_pa = val[0];
		smem_len = val[1];

		if (smem_len < framesize) {
			return VMM_ENOMEM;
		}

		screen_base = (void *)vmm_host_iomap(smem_pa, smem_len);
		if (!screen_base) {
			vmm_printf("CLCD: unable to map framebuffer\n");
			return VMM_ENOMEM;
		}
	}

	fb->fb.screen_base	= screen_base;
	fb->fb.fix.smem_start	= smem_pa;
	fb->fb.fix.smem_len	= smem_len;

	return 0;
}
Exemplo n.º 4
0
static int heap_info(struct vmm_chardev *cdev,
		     bool is_normal, virtual_addr_t heap_va,
		     u64 heap_sz, u64 heap_hksz, u64 heap_freesz)
{
	int rc;
	physical_addr_t heap_pa;
	u64 pre, heap_usesz;

	if (is_normal) {
		heap_usesz = heap_sz - heap_hksz - heap_freesz;
	} else {
		heap_usesz = heap_sz - heap_freesz;
	}

	if ((rc = vmm_host_va2pa(heap_va, &heap_pa))) {
		vmm_cprintf(cdev, "Error: Failed to get heap base PA\n");
		return rc;
	}

	vmm_cprintf(cdev, "Base Virtual Addr  : ");
	if (sizeof(virtual_addr_t) == sizeof(u64)) {
		vmm_cprintf(cdev, "0x%016llx\n", heap_va);
	} else {
		vmm_cprintf(cdev, "0x%08x\n", heap_va);
	}

	vmm_cprintf(cdev, "Base Physical Addr : ");
	if (sizeof(physical_addr_t) == sizeof(u64)) {
		vmm_cprintf(cdev, "0x%016llx\n", heap_pa);
	} else {
		vmm_cprintf(cdev, "0x%08x\n", heap_pa);
	}

	pre = 1000; /* Division correct upto 3 decimal points */

	vmm_cprintf(cdev, "House-Keeping Size : ");
	heap_hksz = (heap_hksz * pre) >> 10;
	vmm_cprintf(cdev, "%ll.%03ll KB\n", 
			udiv64(heap_hksz, pre), umod64(heap_hksz, pre));

	vmm_cprintf(cdev, "Used Space Size    : ");
	heap_usesz = (heap_usesz * pre) >> 10;
	vmm_cprintf(cdev, "%ll.%03ll KB\n", 
			udiv64(heap_usesz, pre), umod64(heap_usesz, pre));

	vmm_cprintf(cdev, "Free Space Size    : ");
	heap_freesz = (heap_freesz * pre) >> 10;
	vmm_cprintf(cdev, "%ll.%03ll KB\n", 
			udiv64(heap_freesz, pre), umod64(heap_freesz, pre));

	vmm_cprintf(cdev, "Total Size         : ");
	heap_sz = (heap_sz * pre) >> 10;
	vmm_cprintf(cdev, "%ll.%03ll KB\n", 
			udiv64(heap_sz, pre), umod64(heap_sz, pre));

	return VMM_OK;
}
Exemplo n.º 5
0
static int heap_init(struct vmm_heap_control *heap,
		     bool is_normal, const u32 size_kb, u32 mem_flags)
{
	int rc = VMM_OK;

	memset(heap, 0, sizeof(*heap));

	heap->heap_size = size_kb * 1024;
	heap->heap_start = (void *)vmm_host_alloc_pages(
					VMM_SIZE_TO_PAGE(heap->heap_size),
					mem_flags);
	if (!heap->heap_start) {
		return VMM_ENOMEM;
	}

	rc = vmm_host_va2pa((virtual_addr_t)heap->heap_start,
			    &heap->heap_start_pa);
	if (rc) {
		goto fail_free_pages;
	}

	/* 12.5 percent for house-keeping */
	heap->hk_size = (heap->heap_size) / 8;

	/* Always have book keeping area for
	 * non-normal heaps in normal heap
	 */
	if (is_normal) {
		heap->hk_start = heap->heap_start;
		heap->mem_start = heap->heap_start + heap->hk_size;
		heap->mem_size = heap->heap_size - heap->hk_size;
	} else {
		heap->hk_start = vmm_malloc(heap->hk_size);
		if (!heap->hk_start) {
			rc = VMM_ENOMEM;
			goto fail_free_pages;
		}
		heap->mem_start = heap->heap_start;
		heap->mem_size = heap->heap_size;
	}

	rc = buddy_allocator_init(&heap->ba,
			  heap->hk_start, heap->hk_size,
			  (unsigned long)heap->mem_start, heap->mem_size,
			  HEAP_MIN_BIN, HEAP_MAX_BIN);
	if (rc) {
		goto fail_free_pages;
	}

	return VMM_OK;

fail_free_pages:
	vmm_host_free_pages((virtual_addr_t)heap->heap_start,
			    VMM_SIZE_TO_PAGE(heap->heap_size));
	return rc;
}
Exemplo n.º 6
0
static int heap_va2pa(struct vmm_heap_control *heap,
		      virtual_addr_t va, physical_addr_t *pa)
{
	int rc = VMM_OK;

	if (((virtual_addr_t)heap->heap_start <= va) &&
	    (va < ((virtual_addr_t)heap->heap_start + heap->heap_size))) {
		*pa = (physical_addr_t)(va - (virtual_addr_t)heap->heap_start) +
			heap->heap_start_pa;
	} else {
		rc = vmm_host_va2pa(va, pa);
	}

	return rc;
}
Exemplo n.º 7
0
void imx_set_cpu_jump(int cpu, void *jump_addr)
{
	physical_addr_t paddr;

#if 0
	cpu = cpu_logical_map(cpu);
#endif /* 0 */
	if (VMM_OK != vmm_host_va2pa((virtual_addr_t)jump_addr,
				     &paddr)) {
		vmm_printf("Failed to get cpu jump physical address (0x%X)\n",
			   jump_addr);
	}
	writel_relaxed(paddr,
		       src_base + SRC_GPR1 + cpu * 8);
}
Exemplo n.º 8
0
physical_addr_t cpu_create_vcpu_intercept_table(size_t size, virtual_addr_t *tbl_vaddr)
{
	physical_addr_t phys = 0;

	virtual_addr_t vaddr = vmm_host_alloc_pages(VMM_SIZE_TO_PAGE(size),
						    VMM_MEMORY_FLAGS_NORMAL);

	if (vmm_host_va2pa(vaddr, &phys) != VMM_OK)
		return 0;

	memset((void *)vaddr, 0x00, size);

	*tbl_vaddr = vaddr;

	return phys;
}
Exemplo n.º 9
0
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data,
			  physical_addr_t iova,
			  physical_addr_t paddr,
			  size_t size,
			  arm_lpae_iopte prot,
			  int lvl, arm_lpae_iopte *ptep)
{
	int rc;
	physical_addr_t pa;
	arm_lpae_iopte *cptep, pte;
	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
	struct io_pgtable_cfg *cfg = &data->iop.cfg;

	/* Find our entry at the current level */
	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);

	/* If we can install a leaf entry at this level, then do so */
	if (size == block_size && (size & cfg->pgsize_bitmap))
		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);

	/* We can't allocate tables at the final level */
	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
		return VMM_EINVALID;

	/* Grab a pointer to the next level */
	pte = *ptep;
	if (!pte) {
		cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
					       cfg);
		if (!cptep)
			return VMM_ENOMEM;

		rc = vmm_host_va2pa((virtual_addr_t)cptep, &pa);
		if (rc)
			return rc;

		pte = pa | ARM_LPAE_PTE_TYPE_TABLE;
		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
			pte |= ARM_LPAE_PTE_NSTABLE;
		__arm_lpae_set_pte(ptep, pte, cfg);
	} else {
		cptep = iopte_deref(pte, data);
	}

	/* Rinse, repeat */
	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
}
Exemplo n.º 10
0
void *vmm_dma_zalloc_phy(virtual_size_t size,
			 physical_addr_t *paddr)
{
	int ret;
	void *cpu_addr;
	dma_addr_t dma_addr = 0;

#if defined(CONFIG_IOMMU)
	/* TODO: Manage cases with IOMMU */
	BUG();
#endif /* defined(CONFIG_IOMMU) */

	cpu_addr = vmm_dma_zalloc(size);
	if (!cpu_addr)
		return cpu_addr;

	ret = vmm_host_va2pa((virtual_addr_t)cpu_addr, &dma_addr);
	if (VMM_OK == ret) {
		*paddr = dma_addr;
	}

	return cpu_addr;
}
Exemplo n.º 11
0
static int __init scu_cpu_prepare(unsigned int cpu)
{
	int rc;
	u32 val = 0;
	physical_addr_t _start_secondary_pa;

	/* Get physical address secondary startup code */
	rc = vmm_host_va2pa((virtual_addr_t)&_start_secondary_nopen,
			    &_start_secondary_pa);
	if (rc) {
		return rc;
	}

	/* Enable snooping through SCU */
	if (scu_base) {
		scu_enable((void *)scu_base);
	}

	/* Write to clear address */
	if (clear_addr[cpu]) {
		arch_wmb();
		val = ~0x0;
		vmm_host_memory_write(clear_addr[cpu],
				      &val, sizeof(u32), FALSE);
	}

	/* Write to release address */
	if (release_addr[cpu]) {
		arch_wmb();
		val = _start_secondary_pa;
		vmm_host_memory_write(release_addr[cpu],
				      &val, sizeof(u32), FALSE);
	}

	return VMM_OK;
}
Exemplo n.º 12
0
static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
	int rc;
	u64 reg;
	physical_addr_t pa;
	struct arm_lpae_io_pgtable *data;

	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
		return NULL;

	data = arm_lpae_alloc_pgtable(cfg);
	if (!data)
		return NULL;

	/* TCR */
	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
	      (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
	      (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);

	switch (ARM_LPAE_GRANULE(data)) {
	case SZ_4K:
		reg |= ARM_LPAE_TCR_TG0_4K;
		break;
	case SZ_16K:
		reg |= ARM_LPAE_TCR_TG0_16K;
		break;
	case SZ_64K:
		reg |= ARM_LPAE_TCR_TG0_64K;
		break;
	}

	switch (cfg->oas) {
	case 32:
		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	case 36:
		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	case 40:
		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	case 42:
		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	case 44:
		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	case 48:
		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	default:
		goto out_free_data;
	}

	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;

	/* Disable speculative walks through TTBR1 */
	reg |= ARM_LPAE_TCR_EPD1;
	cfg->arm_lpae_s1_cfg.tcr = reg;

	/* MAIRs */
	reg = (ARM_LPAE_MAIR_ATTR_NC
	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
	      (ARM_LPAE_MAIR_ATTR_WBRWA
	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
	      (ARM_LPAE_MAIR_ATTR_DEVICE
	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));

	cfg->arm_lpae_s1_cfg.mair[0] = reg;
	cfg->arm_lpae_s1_cfg.mair[1] = 0;

	/* Looking good; allocate a pgd */
	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, cfg);
	if (!data->pgd)
		goto out_free_data;

	/* Ensure the empty pgd is visible before any actual TTBR write */
	arch_smp_wmb();

	rc = vmm_host_va2pa((virtual_addr_t)data->pgd, &pa);
	if (rc)
		goto out_free_pgd;

	/* TTBRs */
	cfg->arm_lpae_s1_cfg.ttbr[0] = pa;
	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
	return &data->iop;

out_free_pgd:
	__arm_lpae_free_pages(data->pgd, data->pgd_size, cfg);
out_free_data:
	vmm_free(data);
	return NULL;
}