Example #1
0
int arch_map_device(void *paddr, void *vaddr, unsigned long size)
{
	return paging_create(&hv_paging_structs, (unsigned long)paddr, size,
			(unsigned long)vaddr,
			PAGE_DEFAULT_FLAGS | S1_PTE_FLAG_DEVICE,
			PAGING_NON_COHERENT);
}
Example #2
0
void __attribute__((noreturn)) arch_shutdown_mmu(struct per_cpu *cpu_data)
{
	static DEFINE_SPINLOCK(map_lock);

	virt2phys_t virt2phys = paging_hvirt2phys;
	void *stack_virt = cpu_data->stack;
	unsigned long stack_phys = virt2phys((void *)stack_virt);
	unsigned long trampoline_phys = virt2phys((void *)&trampoline_start);
	struct registers *regs_phys =
			(struct registers *)virt2phys(guest_regs(cpu_data));

	/* Jump to the identity-mapped trampoline page before shutting down */
	void (*shutdown_fun_phys)(struct registers*, unsigned long);
	shutdown_fun_phys = (void*)virt2phys(shutdown_el2);

	/*
	 * No need to check for size or overlapping here, it has already be
	 * done, and the paging structures will soon be deleted. However, the
	 * cells' CPUs may execute this concurrently.
	 */
	spin_lock(&map_lock);
	paging_create(&hv_paging_structs, stack_phys, PAGE_SIZE, stack_phys,
		      PAGE_DEFAULT_FLAGS, PAGING_NON_COHERENT);
	paging_create(&hv_paging_structs, trampoline_phys, PAGE_SIZE,
		      trampoline_phys, PAGE_DEFAULT_FLAGS,
		      PAGING_NON_COHERENT);
	spin_unlock(&map_lock);

	arch_cpu_dcaches_flush(CACHES_CLEAN);

	/*
	 * Final shutdown:
	 * - disable the MMU whilst inside the trampoline page
	 * - reset the vectors
	 * - return to EL1
	 */
	shutdown_fun_phys(regs_phys, saved_vectors);

	__builtin_unreachable();
}
Example #3
0
static enum mmio_result mmio_handle_subpage(void *arg, struct mmio_access *mmio)
{
	const struct jailhouse_memory *mem = arg;
	u64 perm = mmio->is_write ? JAILHOUSE_MEM_WRITE : JAILHOUSE_MEM_READ;
	unsigned long page_virt = TEMPORARY_MAPPING_BASE +
		this_cpu_id() * PAGE_SIZE * NUM_TEMPORARY_PAGES;
	unsigned long page_phys =
		((unsigned long)mem->phys_start + mmio->address) & PAGE_MASK;
	unsigned long virt_base;
	int err;

	/* check read/write access permissions */
	if (!(mem->flags & perm))
		goto invalid_access;

	/* width bit according to access size needs to be set */
	if (!((mmio->size << JAILHOUSE_MEM_IO_WIDTH_SHIFT) & mem->flags))
		goto invalid_access;

	/* naturally unaligned access needs to be allowed explicitly */
	if (mmio->address & (mmio->size - 1) &&
	    !(mem->flags & JAILHOUSE_MEM_IO_UNALIGNED))
		goto invalid_access;

	err = paging_create(&hv_paging_structs, page_phys, PAGE_SIZE,
			    page_virt, PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
			    PAGING_NON_COHERENT);
	if (err)
		goto invalid_access;

	/*
	 * This virt_base gives the following effective virtual address in
	 * mmio_perform_access:
	 *
	 *     page_virt + (mem->phys_start & ~PAGE_MASK) +
	 *         (mmio->address & ~PAGE_MASK)
	 *
	 * Reason: mmio_perform_access does addr = base + mmio->address.
	 */
	virt_base = page_virt + (mem->phys_start & ~PAGE_MASK) -
		(mmio->address & PAGE_MASK);
	mmio_perform_access((void *)virt_base, mmio);
	return MMIO_HANDLED;

invalid_access:
	panic_printk("FATAL: Invalid MMIO %s, address: %x, size: %x\n",
		     mmio->is_write ? "write" : "read",
		     mem->phys_start + mmio->address, mmio->size);
	return MMIO_ERROR;
}
Example #4
0
void init_system()
{
    uint8_t cpuid = smp_processor_id();

    setup_vector();

    setup_httbr((uint32_t) &__HYP_PGTABLE);

    setup_mem_attr();

    if (cpuid == 0) {
        // TODO(wonseok) console init will be moved dev_init().
        console_init();

        libc_init();
    }

    irq_init();

    //enable_traps();

    if (cpuid == 0) {
        paging_create((addr_t) &__HYP_PGTABLE);

        platform_init();

        dev_init(); /* we don't have */

        vdev_init(); /* Already we have */

        timer_hw_init(NS_PL2_PTIMER_IRQ);

        setup_vm_mmap();

#ifdef CONFIG_SMP
        printf("wake up...other CPUs\n");
        secondary_smp_pen = 1;
#endif
    }
    printf("%s[%d]: CPU[%d]\n", __func__, __LINE__, cpuid);

    enable_mmu();

    start_hypervisor();
}
Example #5
0
int apic_init(void)
{
	unsigned long apicbase = read_msr(MSR_IA32_APICBASE);
	int err;

	if (apicbase & APIC_BASE_EXTD) {
		apic_ops.read = read_x2apic;
		apic_ops.read_id = read_x2apic_id;
		apic_ops.write = write_x2apic;
		apic_ops.send_ipi = send_x2apic_ipi;
		using_x2apic = true;
	} else if (apicbase & APIC_BASE_EN) {
		xapic_page = page_alloc(&remap_pool, 1);
		if (!xapic_page)
			return trace_error(-ENOMEM);
		err = paging_create(&hv_paging_structs, XAPIC_BASE, PAGE_SIZE,
				    (unsigned long)xapic_page,
				    PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
				    PAGING_NON_COHERENT);
		if (err)
			return err;
		apic_ops.read = read_xapic;
		apic_ops.read_id = read_xapic_id;
		apic_ops.write = write_xapic;
		apic_ops.send_ipi = send_xapic_ipi;

		/* adjust reserved bits to xAPIC mode */
		apic_reserved_bits[APIC_REG_ID] = 0;  /* writes are ignored */
		apic_reserved_bits[APIC_REG_LDR] = 0; /* separately filtered */
		apic_reserved_bits[APIC_REG_DFR] = 0; /* separately filtered */
		apic_reserved_bits[APIC_REG_ICR_HI] = 0x00ffffff;
		apic_reserved_bits[APIC_REG_SELF_IPI] = -1; /* not available */
	} else
		return trace_error(-EIO);

	printk("Using x%sAPIC\n", using_x2apic ? "2" : "");

	return 0;
}
Example #6
0
int apic_init(void)
{
	unsigned long apicbase = read_msr(MSR_IA32_APICBASE);
	int err;

	if (apicbase & APIC_BASE_EXTD) {
		apic_ops.read = read_x2apic;
		apic_ops.read_id = read_x2apic_id;
		apic_ops.write = write_x2apic;
		apic_ops.send_ipi = send_x2apic_ipi;
		using_x2apic = true;
	} else if (apicbase & APIC_BASE_EN) {
		xapic_page = page_alloc(&remap_pool, 1);
		if (!xapic_page)
			return -ENOMEM;
		err = paging_create(&hv_paging_structs, XAPIC_BASE, PAGE_SIZE,
				    (unsigned long)xapic_page,
				    PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
				    PAGING_NON_COHERENT);
		if (err)
			return err;
		apic_ops.read = read_xapic;
		apic_ops.read_id = read_xapic_id;
		apic_ops.write = write_xapic;
		apic_ops.send_ipi = send_xapic_ipi;

		/* adjust reserved bits to xAPIC mode */
		apic_reserved_bits[0x0d] = 0; /* LDR, separately filtered */
		apic_reserved_bits[0x0e] = 0; /* DFR, separately filtered */
		apic_reserved_bits[0x31] = 0x00ffffff; /* ICR (32..63) */
		apic_reserved_bits[0x3f] = -1; /* no Self IPI register */
	} else
		return -EIO;

	printk("Using x%sAPIC\n", using_x2apic ? "2" : "");

	return 0;
}
Example #7
0
static void create_id_maps(void)
{
	unsigned long i;
	bool conflict;

	for (i = 0; i < ARRAY_SIZE(id_maps); i++) {
		conflict = (paging_virt2phys(&hv_paging_structs,
				id_maps[i].addr, PAGE_PRESENT_FLAGS) !=
				INVALID_PHYS_ADDR);
		if (conflict) {
			/*
			 * TODO: Get the flags, and update them if they are
			 * insufficient. Save the current flags in id_maps.
			 * This extraction should be implemented in the core.
			 */
		} else {
			paging_create(&hv_paging_structs, id_maps[i].addr,
				PAGE_SIZE, id_maps[i].addr, id_maps[i].flags,
				PAGING_NON_COHERENT);
		}
		id_maps[i].conflict = conflict;
	}
}
Example #8
0
static struct phys_ioapic *
ioapic_get_or_add_phys(const struct jailhouse_irqchip *irqchip)
{
	struct phys_ioapic *phys_ioapic;
	unsigned int n, index;
	int err;

	for_each_phys_ioapic(phys_ioapic, n)
		if (phys_ioapic->base_addr == irqchip->address)
			return phys_ioapic;

	if (num_phys_ioapics == IOAPIC_MAX_CHIPS)
		return trace_error(NULL);

	phys_ioapic->reg_base = page_alloc(&remap_pool, 1);
	if (!phys_ioapic->reg_base)
		return trace_error(NULL);
	err = paging_create(&hv_paging_structs, irqchip->address, PAGE_SIZE,
			    (unsigned long)phys_ioapic->reg_base,
			    PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
			    PAGING_NON_COHERENT);
	if (err) {
		page_free(&remap_pool, phys_ioapic->reg_base, 1);
		return NULL;
	}

	phys_ioapic->base_addr = irqchip->address;
	num_phys_ioapics++;

	for (index = 0; index < IOAPIC_NUM_PINS * 2; index++)
		phys_ioapic->shadow_redir_table[index / 2].raw[index % 2] =
			ioapic_reg_read(phys_ioapic,
					IOAPIC_REDIR_TBL_START + index);

	return phys_ioapic;
}