Exemplo n.º 1
0
bool vcpu_handle_io_access(struct registers *guest_regs,
			   struct vcpu_io_intercept *io)
{
	struct per_cpu *cpu_data = this_cpu_data();
	int result = 0;

	/* string and REP-prefixed instructions are not supported */
	if (io->rep_or_str)
		goto invalid_access;

	result = x86_pci_config_handler(guest_regs, cpu_data->cell, io->port,
					io->in, io->size);
	if (result == 0)
		result = i8042_access_handler(guest_regs, io->port,
				              io->in, io->size);

	if (result == 1) {
		vcpu_skip_emulated_instruction(io->inst_len);
		return true;
	}

invalid_access:
	panic_printk("FATAL: Invalid PIO %s, port: %x size: %d\n",
		     io->in ? "read" : "write", io->port, io->size);
	panic_printk("PCI address port: %x\n",
		     cpu_data->cell->pci_addr_port_val);
	return false;
}
Exemplo n.º 2
0
/*
 * Program Priority Mask to the original Non-secure priority such that
 * Non-secure interrupts may preempt Secure execution, viz. during Yielding SMC
 * calls. The 'preempt_ret_code' parameter indicates the Yielding SMC's return
 * value in case the call was preempted.
 *
 * This API is expected to be invoked before delegating a yielding SMC to Secure
 * EL1. I.e. within the window of secure execution after Non-secure context is
 * saved (after entry into EL3) and Secure context is restored (before entering
 * Secure EL1).
 */
void ehf_allow_ns_preemption(uint64_t preempt_ret_code)
{
	cpu_context_t *ns_ctx;
	unsigned int old_pmr __unused;
	pe_exc_data_t *pe_data = this_cpu_data();

	/*
	 * We should have been notified earlier of entering secure world, and
	 * therefore have stashed the Non-secure priority mask.
	 */
	assert(pe_data->ns_pri_mask != 0);

	/* Make sure no priority levels are active when requesting this */
	if (has_valid_pri_activations(pe_data)) {
		ERROR("PE %lx has priority activations: 0x%x\n",
				read_mpidr_el1(), pe_data->active_pri_bits);
		panic();
	}

	/*
	 * Program preempted return code to x0 right away so that, if the
	 * Yielding SMC was indeed preempted before a dispatcher gets a chance
	 * to populate it, the caller would find the correct return value.
	 */
	ns_ctx = cm_get_context(NON_SECURE);
	assert(ns_ctx);
	write_ctx_reg(get_gpregs_ctx(ns_ctx), CTX_GPREG_X0, preempt_ret_code);

	old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask);

	EHF_LOG("Priority Mask: 0x%x => 0x%x\n", old_pmr, pe_data->ns_pri_mask);

	pe_data->ns_pri_mask = 0;
}
Exemplo n.º 3
0
/*
 * After leaving Non-secure world, stash current Non-secure Priority Mask, and
 * set Priority Mask to the highest Non-secure priority so that Non-secure
 * interrupts cannot preempt Secure execution.
 *
 * If the current running priority is in the secure range, or if there are
 * outstanding priority activations, this function does nothing.
 *
 * This function subscribes to the 'cm_exited_normal_world' event published by
 * the Context Management Library.
 */
static void *ehf_exited_normal_world(const void *arg)
{
	unsigned int run_pri;
	pe_exc_data_t *pe_data = this_cpu_data();

	/* If the running priority is in the secure range, do nothing */
	run_pri = plat_ic_get_running_priority();
	if (IS_PRI_SECURE(run_pri))
		return 0;

	/* Do nothing if there are explicit activations */
	if (has_valid_pri_activations(pe_data))
		return 0;

	assert(pe_data->ns_pri_mask == 0);

	pe_data->ns_pri_mask =
		plat_ic_set_priority_mask(GIC_HIGHEST_NS_PRIORITY);

	/* The previous Priority Mask is not expected to be in secure range */
	if (IS_PRI_SECURE(pe_data->ns_pri_mask)) {
		ERROR("Priority Mask (0x%x) already in secure range\n",
				pe_data->ns_pri_mask);
		panic();
	}

	EHF_LOG("Priority Mask: 0x%x => 0x%x\n", pe_data->ns_pri_mask,
			GIC_HIGHEST_NS_PRIORITY);

	return 0;
}
Exemplo n.º 4
0
void vcpu_handle_hypercall(struct registers *guest_regs,
			   struct vcpu_execution_state *x_state)
{
	bool long_mode = !!(x_state->efer & EFER_LMA);
	unsigned long arg_mask = long_mode ? (u64)-1 : (u32)-1;
	struct per_cpu *cpu_data = this_cpu_data();
	unsigned long code = guest_regs->rax;

	vcpu_skip_emulated_instruction(X86_INST_LEN_VMCALL);

	if ((!long_mode && (x_state->rflags & X86_RFLAGS_VM)) ||
	    (x_state->cs & 3) != 0) {
		guest_regs->rax = -EPERM;
		return;
	}

	guest_regs->rax = hypercall(code, guest_regs->rdi & arg_mask,
				    guest_regs->rsi & arg_mask);
	if (guest_regs->rax == -ENOSYS)
		printk("CPU %d: Unknown vmcall %d, RIP: %p\n",
		       cpu_data->cpu_id, code,
		       x_state->rip - X86_INST_LEN_VMCALL);

	if (code == JAILHOUSE_HC_DISABLE && guest_regs->rax == 0)
		vcpu_deactivate_vmm(guest_regs);
}
Exemplo n.º 5
0
int i8042_access_handler(u16 port, bool dir_in, unsigned int size)
{
	union registers *guest_regs = &this_cpu_data()->guest_regs;
	const struct jailhouse_cell_desc *config = this_cell()->config;
	const u8 *pio_bitmap = jailhouse_cell_pio_bitmap(config);
	u8 val;

	if (port == I8042_CMD_REG &&
	    config->pio_bitmap_size >= (I8042_CMD_REG + 7) / 8 &&
	    !(pio_bitmap[I8042_CMD_REG / 8] & (1 << (I8042_CMD_REG % 8)))) {
		if (size != 1)
			goto invalid_access;
		if (dir_in) {
			guest_regs->rax &= ~BYTE_MASK(1);
			guest_regs->rax |= inb(I8042_CMD_REG);
		} else {
			val = (u8)guest_regs->rax;
			if (val == I8042_CMD_WRITE_CTRL_PORT ||
			    (val & I8042_CMD_PULSE_CTRL_PORT) ==
			    I8042_CMD_PULSE_CTRL_PORT)
				goto invalid_access;
			outb(val, I8042_CMD_REG);
		}
		return 1;
	}
	return 0;

invalid_access:
	panic_printk("FATAL: Invalid write to i8042 controller port\n");
	return -1;
}
Exemplo n.º 6
0
/*
 * Mark priority active by setting the corresponding bit in active_pri_bits and
 * programming the priority mask.
 *
 * This API is to be used as part of delegating to lower ELs other than for
 * interrupts; e.g. while handling synchronous exceptions.
 *
 * This API is expected to be invoked before restoring context (Secure or
 * Non-secure) in preparation for the respective dispatch.
 */
void ehf_activate_priority(unsigned int priority)
{
	int cur_pri_idx;
	unsigned int old_mask, run_pri, idx;
	pe_exc_data_t *pe_data = this_cpu_data();

	/*
	 * Query interrupt controller for the running priority, or idle priority
	 * if no interrupts are being handled. The requested priority must be
	 * less (higher priority) than the active running priority.
	 */
	run_pri = plat_ic_get_running_priority();
	if (priority >= run_pri) {
		ERROR("Running priority higher (0x%x) than requested (0x%x)\n",
				run_pri, priority);
		panic();
	}

	/*
	 * If there were priority activations already, the requested priority
	 * must be less (higher priority) than the current highest priority
	 * activation so far.
	 */
	cur_pri_idx = get_pe_highest_active_idx(pe_data);
	idx = pri_to_idx(priority);
	if ((cur_pri_idx != EHF_INVALID_IDX) &&
			(idx >= ((unsigned int) cur_pri_idx))) {
		ERROR("Activation priority mismatch: req=0x%x current=0x%x\n",
				priority, IDX_TO_PRI(cur_pri_idx));
		panic();
	}

	/* Set the bit corresponding to the requested priority */
	pe_data->active_pri_bits |= PRI_BIT(idx);

	/*
	 * Program priority mask for the activated level. Check that the new
	 * priority mask is setting a higher priority level than the existing
	 * mask.
	 */
	old_mask = plat_ic_set_priority_mask(priority);
	if (priority >= old_mask) {
		ERROR("Requested priority (0x%x) lower than Priority Mask (0x%x)\n",
				priority, old_mask);
		panic();
	}

	/*
	 * If this is the first activation, save the priority mask. This will be
	 * restored after the last deactivation.
	 */
	if (cur_pri_idx == EHF_INVALID_IDX)
		pe_data->init_pri_mask = (uint8_t) old_mask;

	EHF_LOG("activate prio=%d\n", get_pe_highest_active_idx(pe_data));
}
Exemplo n.º 7
0
/*
 * Mark priority inactive by clearing the corresponding bit in active_pri_bits,
 * and programming the priority mask.
 *
 * This API is expected to be used as part of delegating to to lower ELs other
 * than for interrupts; e.g. while handling synchronous exceptions.
 *
 * This API is expected to be invoked after saving context (Secure or
 * Non-secure), having concluded the respective dispatch.
 */
void ehf_deactivate_priority(unsigned int priority)
{
	int cur_pri_idx;
	pe_exc_data_t *pe_data = this_cpu_data();
	unsigned int old_mask, run_pri, idx;

	/*
	 * Query interrupt controller for the running priority, or idle priority
	 * if no interrupts are being handled. The requested priority must be
	 * less (higher priority) than the active running priority.
	 */
	run_pri = plat_ic_get_running_priority();
	if (priority >= run_pri) {
		ERROR("Running priority higher (0x%x) than requested (0x%x)\n",
				run_pri, priority);
		panic();
	}

	/*
	 * Deactivation is allowed only when there are priority activations, and
	 * the deactivation priority level must match the current activated
	 * priority.
	 */
	cur_pri_idx = get_pe_highest_active_idx(pe_data);
	idx = pri_to_idx(priority);
	if ((cur_pri_idx == EHF_INVALID_IDX) ||
			(idx != ((unsigned int) cur_pri_idx))) {
		ERROR("Deactivation priority mismatch: req=0x%x current=0x%x\n",
				priority, IDX_TO_PRI(cur_pri_idx));
		panic();
	}

	/* Clear bit corresponding to highest priority */
	pe_data->active_pri_bits &= (pe_data->active_pri_bits - 1u);

	/*
	 * Restore priority mask corresponding to the next priority, or the
	 * one stashed earlier if there are no more to deactivate.
	 */
	cur_pri_idx = get_pe_highest_active_idx(pe_data);
	if (cur_pri_idx == EHF_INVALID_IDX)
		old_mask = plat_ic_set_priority_mask(pe_data->init_pri_mask);
	else
		old_mask = plat_ic_set_priority_mask(priority);

	if (old_mask > priority) {
		ERROR("Deactivation priority (0x%x) lower than Priority Mask (0x%x)\n",
				priority, old_mask);
		panic();
	}

	EHF_LOG("deactivate prio=%d\n", get_pe_highest_active_idx(pe_data));
}
Exemplo n.º 8
0
static enum mmio_result mmio_handle_subpage(void *arg, struct mmio_access *mmio)
{
	const struct jailhouse_memory *mem = arg;
	u64 perm = mmio->is_write ? JAILHOUSE_MEM_WRITE : JAILHOUSE_MEM_READ;
	unsigned long page_phys =
		((unsigned long)mem->phys_start + mmio->address) & PAGE_MASK;
	unsigned long virt_base;
	int err;

	/* check read/write access permissions */
	if (!(mem->flags & perm))
		goto invalid_access;

	/* width bit according to access size needs to be set */
	if (!((mmio->size << JAILHOUSE_MEM_IO_WIDTH_SHIFT) & mem->flags))
		goto invalid_access;

	/* naturally unaligned access needs to be allowed explicitly */
	if (mmio->address & (mmio->size - 1) &&
	    !(mem->flags & JAILHOUSE_MEM_IO_UNALIGNED))
		goto invalid_access;

	err = paging_create(&this_cpu_data()->pg_structs, page_phys, PAGE_SIZE,
			    TEMPORARY_MAPPING_BASE,
			    PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
			    PAGING_NON_COHERENT);
	if (err)
		goto invalid_access;

	/*
	 * This virt_base gives the following effective virtual address in
	 * mmio_perform_access:
	 *
	 *     TEMPORARY_MAPPING_BASE + (mem->phys_start & ~PAGE_MASK) +
	 *         (mmio->address & ~PAGE_MASK)
	 *
	 * Reason: mmio_perform_access does addr = base + mmio->address.
	 */
	virt_base = TEMPORARY_MAPPING_BASE + (mem->phys_start & ~PAGE_MASK) -
		(mmio->address & PAGE_MASK);
	mmio_perform_access((void *)virt_base, mmio);
	return MMIO_HANDLED;

invalid_access:
	panic_printk("FATAL: Invalid MMIO %s, address: %lx, size: %x\n",
		     mmio->is_write ? "write" : "read",
		     (unsigned long)mem->phys_start + mmio->address,
		     mmio->size);
	return MMIO_ERROR;
}
Exemplo n.º 9
0
void apic_irq_handler(void)
{
	struct per_cpu *cpu_data = this_cpu_data();

	cpu_data->num_clear_apic_irqs++;
	if (cpu_data->num_clear_apic_irqs > 256)
		/*
		 * Do not try to ack infinitely. Once we should have handled
		 * all possible vectors, raise the task priority to prevent
		 * further interrupts. TPR will be cleared again on exit from
		 * apic_clear(). This way we will leave with some bits in IRR
		 * set - better than spinning endlessly.
		 */
		apic_ops.write(APIC_REG_TPR, 0xff);

	apic_ops.write(APIC_REG_EOI, APIC_EOI_ACK);
}
Exemplo n.º 10
0
bool vcpu_handle_pt_violation(struct registers *guest_regs,
			      struct vcpu_pf_intercept *pf)
{
	struct per_cpu *cpu_data = this_cpu_data();
	struct guest_paging_structures pg_structs;
	struct vcpu_execution_state x_state;
	struct mmio_access access;
	int result = 0;
	u32 val;

	vcpu_vendor_get_execution_state(&x_state);

	if (!vcpu_get_guest_paging_structs(&pg_structs))
		goto invalid_access;

	access = mmio_parse(x_state.rip, &pg_structs, pf->is_write);
	if (!access.inst_len || access.size != 4)
		goto invalid_access;

	if (pf->is_write)
		val = ((unsigned long *)guest_regs)[access.reg];

	result = ioapic_access_handler(cpu_data->cell, pf->is_write,
			               pf->phys_addr, &val);
	if (result == 0)
		result = pci_mmio_access_handler(cpu_data->cell, pf->is_write,
						 pf->phys_addr, &val);

	if (result == 0)
		result = iommu_mmio_access_handler(pf->is_write,
				                   pf->phys_addr, &val);

	if (result == 1) {
		if (!pf->is_write)
			((unsigned long *)guest_regs)[access.reg] = val;
		vcpu_skip_emulated_instruction(access.inst_len);
		return true;
	}

invalid_access:
	/* report only unhandled access failures */
	if (result == 0)
		panic_printk("FATAL: Invalid MMIO/RAM %s, addr: %p\n",
			     pf->is_write ? "write" : "read", pf->phys_addr);
	return false;
}
Exemplo n.º 11
0
void apic_clear(void)
{
	unsigned int maxlvt = (apic_ops.read(APIC_REG_LVR) >> 16) & 0xff;
	unsigned int xlc = (apic_ext_features() >> 16) & 0xff;
	int n;

	/* Enable the APIC - the cell may have turned it off */
	apic_ops.write(APIC_REG_SVR, APIC_SVR_ENABLE_APIC | 0xff);

	/* Mask all available LVTs */
	apic_mask_lvt(APIC_REG_LVTERR);
	if (maxlvt >= 6)
		apic_mask_lvt(APIC_REG_LVTCMCI);
	apic_mask_lvt(APIC_REG_LVTT);
	if (maxlvt >= 5)
		apic_mask_lvt(APIC_REG_LVTTHMR);
	if (maxlvt >= 4)
		apic_mask_lvt(APIC_REG_LVTPC);
	apic_mask_lvt(APIC_REG_LVT0);
	apic_mask_lvt(APIC_REG_LVT1);
	for (n = 0; n < xlc; n++)
		apic_mask_lvt(APIC_REG_XLVT0 + n);

	/* Clear ISR. This is done in reverse direction as EOI
	 * clears highest-priority interrupt ISR bit. */
	for (n = APIC_NUM_INT_REGS-1; n >= 0; n--)
		while (apic_ops.read(APIC_REG_ISR0 + n) != 0)
			apic_ops.write(APIC_REG_EOI, APIC_EOI_ACK);

	/* Consume pending interrupts to clear IRR.
	 * Need to reset TPR to ensure interrupt delivery. */
	apic_ops.write(APIC_REG_TPR, 0);
	this_cpu_data()->num_clear_apic_irqs = 0;
	enable_irq();
	cpu_relax();
	disable_irq();

	/* Finally, reset the TPR again and disable the APIC */
	apic_ops.write(APIC_REG_TPR, 0);
	apic_ops.write(APIC_REG_SVR, 0xff);
}
Exemplo n.º 12
0
/*
 * Conclude Secure execution and prepare for return to Non-secure world. Restore
 * the Non-secure Priority Mask previously stashed upon leaving Non-secure
 * world.
 *
 * If there the current running priority is in the secure range, or if there are
 * outstanding priority activations, this function does nothing.
 *
 * This function subscribes to the 'cm_entering_normal_world' event published by
 * the Context Management Library.
 */
static void *ehf_entering_normal_world(const void *arg)
{
	unsigned int old_pmr, run_pri;
	pe_exc_data_t *pe_data = this_cpu_data();

	/* If the running priority is in the secure range, do nothing */
	run_pri = plat_ic_get_running_priority();
	if (IS_PRI_SECURE(run_pri))
		return 0;

	/*
	 * If there are explicit activations, do nothing. The Priority Mask will
	 * be restored upon the last deactivation.
	 */
	if (has_valid_pri_activations(pe_data))
		return 0;

	/* Do nothing if we don't have a valid Priority Mask to restore */
	if (pe_data->ns_pri_mask == 0)
		return 0;

	old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask);

	/*
	 * When exiting secure world, the current Priority Mask must be
	 * GIC_HIGHEST_NS_PRIORITY (as set during entry), or the Non-secure
	 * priority mask set upon calling ehf_allow_ns_preemption()
	 */
	if ((old_pmr != GIC_HIGHEST_NS_PRIORITY) &&
			(old_pmr != pe_data->ns_pri_mask)) {
		ERROR("Invalid Priority Mask (0x%x) restored\n", old_pmr);
		panic();
	}

	EHF_LOG("Priority Mask: 0x%x => 0x%x\n", old_pmr, pe_data->ns_pri_mask);

	pe_data->ns_pri_mask = 0;

	return 0;
}
Exemplo n.º 13
0
void x86_check_events(void)
{
	struct per_cpu *cpu_data = this_cpu_data();
	int sipi_vector = -1;

	spin_lock(&cpu_data->control_lock);

	do {
		if (cpu_data->init_signaled && !cpu_data->suspend_cpu) {
			x86_enter_wait_for_sipi(cpu_data);
			break;
		}

		cpu_data->cpu_suspended = true;

		spin_unlock(&cpu_data->control_lock);

		while (cpu_data->suspend_cpu)
			cpu_relax();

		if (cpu_data->shutdown_cpu) {
			apic_clear();
			vcpu_exit(cpu_data);
			asm volatile("1: hlt; jmp 1b");
		}

		spin_lock(&cpu_data->control_lock);

		cpu_data->cpu_suspended = false;

		if (cpu_data->sipi_vector >= 0) {
			if (!cpu_data->failed) {
				cpu_data->wait_for_sipi = false;
				sipi_vector = cpu_data->sipi_vector;
			}
			cpu_data->sipi_vector = -1;
		}
	} while (cpu_data->init_signaled);
Exemplo n.º 14
0
/*
 * Return whether Secure execution has explicitly allowed Non-secure interrupts
 * to preempt itself, viz. during Yielding SMC calls.
 */
unsigned int ehf_is_ns_preemption_allowed(void)
{
	unsigned int run_pri;
	pe_exc_data_t *pe_data = this_cpu_data();

	/* If running priority is in secure range, return false */
	run_pri = plat_ic_get_running_priority();
	if (IS_PRI_SECURE(run_pri))
		return 0;

	/*
	 * If Non-secure preemption was permitted by calling
	 * ehf_allow_ns_preemption() earlier:
	 *
	 * - There wouldn't have been priority activations;
	 * - We would have cleared the stashed the Non-secure Priority Mask.
	 */
	if (has_valid_pri_activations(pe_data))
		return 0;
	if (pe_data->ns_pri_mask != 0)
		return 0;

	return 1;
}