Exemple #1
0
void unload_guest_pcore(struct proc *p, int guest_pcoreid)
{
	struct guest_pcore *gpc;
	struct per_cpu_info *pcpui = &per_cpu_info[core_id()];

	gpc = lookup_guest_pcore(p, guest_pcoreid);
	assert(gpc);
	spin_lock(&p->vmm.lock);
	assert(gpc->cpu != -1);
	vmx_unload_guest_pcore(gpc);
	gpc->cpu = -1;

	/* Save guest's xcr0 and restore Akaros's default. */
	gpc->xcr0 = rxcr0();
	lxcr0(__proc_global_info.x86_default_xcr0);

	/* We manage these MSRs manually. */
	gpc->msr_kern_gs_base = read_kern_gsbase();
	gpc->msr_star = read_msr(MSR_STAR);
	gpc->msr_lstar = read_msr(MSR_LSTAR);
	gpc->msr_sfmask = read_msr(MSR_SFMASK);

	write_kern_gsbase((uint64_t)pcpui);
	if (gpc->msr_star != AKAROS_MSR_STAR)
		write_msr(MSR_STAR, AKAROS_MSR_STAR);
	if (gpc->msr_lstar != AKAROS_MSR_LSTAR)
		write_msr(MSR_LSTAR, AKAROS_MSR_LSTAR);
	if (gpc->msr_sfmask, AKAROS_MSR_SFMASK)
		write_msr(MSR_SFMASK, AKAROS_MSR_SFMASK);

	/* As soon as we unlock, this gpc can be started on another core */
	spin_unlock(&p->vmm.lock);
	pcpui->guest_pcoreid = -1;
}
Exemple #2
0
struct guest_pcore *load_guest_pcore(struct proc *p, int guest_pcoreid,
                                     bool *should_vmresume)
{
	struct guest_pcore *gpc;
	struct per_cpu_info *pcpui = &per_cpu_info[core_id()];

	gpc = lookup_guest_pcore(p, guest_pcoreid);
	if (!gpc)
		return 0;
	assert(pcpui->guest_pcoreid == -1);
	spin_lock(&p->vmm.lock);
	if (gpc->cpu != -1) {
		spin_unlock(&p->vmm.lock);
		return 0;
	}
	gpc->cpu = core_id();
	spin_unlock(&p->vmm.lock);
	/* We've got dibs on the gpc; we don't need to hold the lock any longer. */
	pcpui->guest_pcoreid = guest_pcoreid;
	vmx_load_guest_pcore(gpc, should_vmresume);
	/* Load guest's xcr0 */
	lxcr0(gpc->xcr0);

	/* Manual MSR save/restore */
	write_kern_gsbase(gpc->msr_kern_gs_base);
	if (gpc->msr_star != AKAROS_MSR_STAR)
		write_msr(MSR_STAR, gpc->msr_star);
	if (gpc->msr_lstar != AKAROS_MSR_LSTAR)
		write_msr(MSR_LSTAR, gpc->msr_lstar);
	if (gpc->msr_sfmask != AKAROS_MSR_SFMASK)
		write_msr(MSR_SFMASK, gpc->msr_sfmask);

	return gpc;
}
Exemple #3
0
void unload_guest_pcore(struct proc *p, int guest_pcoreid)
{
	struct guest_pcore *gpc;
	struct per_cpu_info *pcpui = &per_cpu_info[core_id()];

	gpc = lookup_guest_pcore(p, guest_pcoreid);
	assert(gpc);
	spin_lock(&p->vmm.lock);
	assert(gpc->cpu != -1);
	ept_sync_context(gpc_get_eptp(gpc));
	vmx_unload_guest_pcore(gpc);
	gpc->cpu = -1;

	/* Save guest's xcr0 and restore Akaros's default. */
	gpc->xcr0 = rxcr0();
	lxcr0(x86_default_xcr0);

	/* As soon as we unlock, this gpc can be started on another core */
	spin_unlock(&p->vmm.lock);
	pcpui->guest_pcoreid = -1;
}
Exemple #4
0
int vmm_poke_guest(struct proc *p, int guest_pcoreid)
{
	struct guest_pcore *gpc;
	int pcoreid;

	gpc = lookup_guest_pcore(p, guest_pcoreid);
	if (!gpc) {
		set_error(ENOENT, "Bad guest_pcoreid %d", guest_pcoreid);
		return -1;
	}
	/* We're doing an unlocked peek; it could change immediately.  This is a
	 * best effort service. */
	pcoreid = ACCESS_ONCE(gpc->cpu);
	if (pcoreid == -1) {
		/* So we know that we'll miss the poke for the posted IRQ.  We could
		 * return an error.  However, error handling for this case isn't
		 * particularly helpful (yet).  The absence of the error does not mean
		 * the IRQ was posted.  We'll still return 0, meaning "the user didn't
		 * mess up; we tried." */
		return 0;
	}
	send_ipi(pcoreid, I_POKE_CORE);
	return 0;
}
Exemple #5
0
struct guest_pcore *load_guest_pcore(struct proc *p, int guest_pcoreid)
{
	struct guest_pcore *gpc;
	struct per_cpu_info *pcpui = &per_cpu_info[core_id()];

	gpc = lookup_guest_pcore(p, guest_pcoreid);
	if (!gpc)
		return 0;
	assert(pcpui->guest_pcoreid == -1);
	spin_lock(&p->vmm.lock);
	if (gpc->cpu != -1) {
		spin_unlock(&p->vmm.lock);
		return 0;
	}
	gpc->cpu = core_id();
	spin_unlock(&p->vmm.lock);
	/* We've got dibs on the gpc; we don't need to hold the lock any longer. */
	pcpui->guest_pcoreid = guest_pcoreid;
	ept_sync_context(gpc_get_eptp(gpc));
	vmx_load_guest_pcore(gpc);
	/* Load guest's xcr0 */
	lxcr0(gpc->xcr0);
	return gpc;
}