Пример #1
0
static int hyp_debug_mem_unmap_set(void *data, u64 val)
{
	struct hvc_desc desc = { {0}, {0} };
	int ret;

	desc.arg[0] = mem_addr;
	desc.arg[1] = mem_size;
	ret = hvc(HVC_FN_DBG_UNMAP_RANGE, &desc);
	if (ret)
		hyp_dbg_err("user specified hvc unmap range failed: %d\n", ret);

	return ret;
}
Пример #2
0
static int hyp_debug_mem_map_set(void *data, u64 val)
{
	struct hvc_desc desc = { {0}, {0} };
	int ret;

	desc.arg[0] = mem_addr;
	desc.arg[1] = mem_size;
	desc.arg[2] = mem_perm_attr;
	desc.arg[3] = mem_cache_attr;
	desc.arg[4] = mem_share_attr;
	ret = hvc(HVC_FN_DBG_MAP_RANGE, &desc);
	if (ret)
		hyp_dbg_err("user specified hvc map range failed: %d\n", ret);

	return ret;
}
Пример #3
0
int __spdm_hyp_call(struct spdm_args *args, int num_args)
{
	struct hvc_desc desc = { { 0 } };
	int status;

	memcpy(desc.arg, args->arg,
		COPY_SIZE(sizeof(desc.arg), sizeof(args->arg)));
	SPDM_IPC_LOG("hvc call fn:0x%x, cmd:%llu, num_args:%d\n",
		HVC_FN_SIP(SPDM_HYP_FNID), desc.arg[0], num_args);

	status = hvc(HVC_FN_SIP(SPDM_HYP_FNID), &desc);

	memcpy(args->ret, desc.ret,
		COPY_SIZE(sizeof(args->ret), sizeof(desc.ret)));
	SPDM_IPC_LOG("hvc return fn:0x%x cmd:%llu Ret[0]:%llu Ret[1]:%llu\n",
			HVC_FN_SIP(SPDM_HYP_FNID), desc.arg[0],
			desc.ret[0], desc.ret[1]);
	return status;
}
Пример #4
0
/*
 * Jumping to EL2 in the same C code represents an interesting challenge, since
 * it will switch from virtual addresses to physical ones, and then back to
 * virtual after setting up the EL2 MMU.
 * To this end, the setup_mmu and cpu_switch_el2 functions are naked and must
 * handle the stack themselves.
 */
int switch_exception_level(struct per_cpu *cpu_data)
{
	extern unsigned long bootstrap_vectors;
	extern unsigned long hyp_vectors;

	/* Save the virtual address of the phys2virt function for later */
	phys2virt_t phys2virt = paging_phys2hvirt;
	virt2phys_t virt2phys = paging_hvirt2phys;
	unsigned long phys_bootstrap = virt2phys(&bootstrap_vectors);
	struct per_cpu *phys_cpu_data = (struct per_cpu *)virt2phys(cpu_data);
	unsigned long trampoline_phys = virt2phys((void *)&trampoline_start);
	unsigned long trampoline_size = &trampoline_end - &trampoline_start;
	unsigned long stack_virt = (unsigned long)cpu_data->stack;
	unsigned long stack_phys = virt2phys((void *)stack_virt);
	u64 ttbr_el2;

	/* Check the paging structures as well as the MMU initialisation */
	unsigned long jailhouse_base_phys =
		paging_virt2phys(&hv_paging_structs, JAILHOUSE_BASE,
				 PAGE_DEFAULT_FLAGS);

	/*
	 * The hypervisor stub allows to fetch its current vector base by doing
	 * an HVC with r0 = -1. They will need to be restored when disabling
	 * jailhouse.
	 */
	if (saved_vectors == 0)
		saved_vectors = hvc(-1);

	/*
	 * paging struct won't be easily accessible when initializing el2, only
	 * the CPU datas will be readable at their physical address
	 */
	ttbr_el2 = (u64)virt2phys(hv_paging_structs.root_table) & TTBR_MASK;

	/*
	 * Mirror the mmu setup code, so that we are able to jump to the virtual
	 * address after enabling it.
	 * Those regions must fit on one page.
	 */

	if (set_id_map(0, trampoline_phys, trampoline_size) != 0)
		return -E2BIG;
	if (set_id_map(1, stack_phys, PAGE_SIZE) != 0)
		return -E2BIG;
	create_id_maps();

	/*
	 * Before doing anything hairy, we need to sync the caches with memory:
	 * they will be off at EL2. From this point forward and until the caches
	 * are re-enabled, we cannot write anything critical to memory.
	 */
	arch_cpu_dcaches_flush(CACHES_CLEAN);

	cpu_switch_el2(phys_bootstrap, virt2phys);
	/*
	 * At this point, we are at EL2, and we work with physical addresses.
	 * The MMU needs to be initialised and execution must go back to virtual
	 * addresses before returning, or else we are pretty much doomed.
	 */

	setup_mmu_el2(phys_cpu_data, phys2virt, ttbr_el2);

	/* Sanity check */
	check_mmu_map(JAILHOUSE_BASE, jailhouse_base_phys);

	/* Set the new vectors once we're back to a sane, virtual state */
	arm_write_sysreg(HVBAR, &hyp_vectors);

	/* Remove the identity mapping */
	destroy_id_maps();

	return 0;
}