コード例 #1
0
/*
 * Return 0 if the selector 'sel' in within the limits of the GDT/LDT
 * and non-zero otherwise.
 */
static int
desc_table_limit_check(struct vmctx *ctx, int vcpu, uint16_t sel)
{
	uint64_t base;
	uint32_t limit, access;
	int error, reg;

	reg = ISLDT(sel) ? VM_REG_GUEST_LDTR : VM_REG_GUEST_GDTR;
	error = vm_get_desc(ctx, vcpu, reg, &base, &limit, &access);
	assert(error == 0);

	if (reg == VM_REG_GUEST_LDTR) {
		if (SEG_DESC_UNUSABLE(access) || !SEG_DESC_PRESENT(access))
			return (-1);
	}

	if (limit < SEL_LIMIT(sel))
		return (-1);
	else
		return (0);
}
コード例 #2
0
ファイル: db_interface.c プロジェクト: 2trill2spill/freebsd
int
db_segsize(struct trapframe *tfp)
{
	struct proc_ldt *plp;
	struct segment_descriptor *sdp;
	int sel;

	if (tfp == NULL)
	    return (32);
	if (tfp->tf_eflags & PSL_VM)
	    return (16);
	sel = tfp->tf_cs & 0xffff;
	if (sel == GSEL(GCODE_SEL, SEL_KPL))
	    return (32);
	/* Rare cases follow.  User mode cases are currently unreachable. */
	if (ISLDT(sel)) {
	    plp = curthread->td_proc->p_md.md_ldt;
	    sdp = (plp != NULL) ? &plp->ldt_sd : &ldt[0].sd;
	} else {
	    sdp = &gdt[PCPU_GET(cpuid) * NGDT].sd;
	}
	return (sdp[IDXSEL(sel)].sd_def32 == 0 ? 16 : 32);
}
コード例 #3
0
/*
 * Read the TSS descriptor referenced by 'sel' into 'desc'.
 *
 * Returns 0 on success.
 * Returns 1 if an exception was injected into the guest.
 * Returns -1 otherwise.
 */
static int
read_tss_descriptor(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
    uint16_t sel, struct user_segment_descriptor *desc)
{
	struct vm_guest_paging sup_paging;
	int error;

	assert(!ISLDT(sel));
	assert(IDXSEL(sel) != 0);

	/* Fetch the new TSS descriptor */
	if (desc_table_limit_check(ctx, vcpu, sel)) {
		if (ts->reason == TSR_IRET)
			sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
		else
			sel_exception(ctx, vcpu, IDT_GP, sel, ts->ext);
		return (1);
	}

	sup_paging = ts->paging;
	sup_paging.cpl = 0;		/* implicit supervisor mode */
	error = desc_table_read(ctx, vcpu, &sup_paging, sel, desc);
	return (error);
}
コード例 #4
0
/*
 * Read/write the segment descriptor 'desc' into the GDT/LDT slot referenced
 * by the selector 'sel'.
 *
 * Returns 0 on success.
 * Returns 1 if an exception was injected into the guest.
 * Returns -1 otherwise.
 */
static int
desc_table_rw(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
    uint16_t sel, struct user_segment_descriptor *desc, bool doread)
{
	struct iovec iov[2];
	uint64_t base;
	uint32_t limit, access;
	int error, reg;

	reg = ISLDT(sel) ? VM_REG_GUEST_LDTR : VM_REG_GUEST_GDTR;
	error = vm_get_desc(ctx, vcpu, reg, &base, &limit, &access);
	assert(error == 0);
	assert(limit >= SEL_LIMIT(sel));

	error = vm_copy_setup(ctx, vcpu, paging, base + SEL_START(sel),
	    sizeof(*desc), doread ? PROT_READ : PROT_WRITE, iov, nitems(iov));
	if (error == 0) {
		if (doread)
			vm_copyin(ctx, vcpu, iov, desc, sizeof(*desc));
		else
			vm_copyout(ctx, vcpu, desc, iov, sizeof(*desc));
	}
	return (error);
}
コード例 #5
0
ファイル: sys_machdep.c プロジェクト: avsm/openbsd-xen-sys
int
amd64_set_ldt(struct proc *p, void *args, register_t *retval)
{
	int error, i, n;
	struct pcb *pcb = &p->p_addr->u_pcb;
	pmap_t pmap = p->p_vmspace->vm_map.pmap;
	struct amd64_set_ldt_args ua;
	union descriptor desc;

	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
		return (error);

#ifdef	LDT_DEBUG
	printf("amd64_set_ldt: start=%d num=%d descs=%p\n", ua.start,
	    ua.num, ua.desc);
#endif

	if (ua.start < 0 || ua.num < 0)
		return (EINVAL);
	if (ua.start > 8192 || (ua.start + ua.num) > 8192)
		return (EINVAL);

	/*
	 * XXX LOCKING
	 */

	/* allocate user ldt */
	if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) {
		size_t old_len, new_len;
		union descriptor *old_ldt, *new_ldt;

		if (pmap->pm_flags & PMF_USER_LDT) {
			old_len = pmap->pm_ldt_len * sizeof(union descriptor);
			old_ldt = pmap->pm_ldt;
		} else {
			old_len = NLDT * sizeof(union descriptor);
			old_ldt = ldt;
			pmap->pm_ldt_len = 512;
		}
		while ((ua.start + ua.num) > pmap->pm_ldt_len)
			pmap->pm_ldt_len *= 2;
		new_len = pmap->pm_ldt_len * sizeof(union descriptor);
		new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, new_len);
		memcpy(new_ldt, old_ldt, old_len);
		memset((caddr_t)new_ldt + old_len, 0, new_len - old_len);
		pmap->pm_ldt = new_ldt;

		if (pmap->pm_flags & PCB_USER_LDT)
			ldt_free(pmap);
		else
			pmap->pm_flags |= PCB_USER_LDT;
		ldt_alloc(pmap, new_ldt, new_len);
		pcb->pcb_ldt_sel = pmap->pm_ldt_sel;
		if (pcb == curpcb)
			lldt(pcb->pcb_ldt_sel);

		/*
		 * XXX Need to notify other processors which may be
		 * XXX currently using this pmap that they need to
		 * XXX re-load the LDT.
		 */

		if (old_ldt != ldt)
			uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len);
#ifdef LDT_DEBUG
		printf("amd64_set_ldt(%d): new_ldt=%p\n", p->p_pid, new_ldt);
#endif
	}

	if (pcb == curpcb)
		savectx(curpcb);
	error = 0;

	/* Check descriptors for access violations. */
	for (i = 0, n = ua.start; i < ua.num; i++, n++) {
		if ((error = copyin(&ua.desc[i], &desc, sizeof(desc))) != 0)
			return (error);

		switch (desc.sd.sd_type) {
		case SDT_SYSNULL:
			desc.sd.sd_p = 0;
			break;
		case SDT_SYS286CGT:
		case SDT_SYS386CGT:
			/*
			 * Only allow call gates targeting a segment
			 * in the LDT or a user segment in the fixed
			 * part of the gdt.  Segments in the LDT are
			 * constrained (below) to be user segments.
			 */
			if (desc.gd.gd_p != 0 && !ISLDT(desc.gd.gd_selector) &&
			    ((IDXSEL(desc.gd.gd_selector) >= NGDT) ||
			     (gdt[IDXSEL(desc.gd.gd_selector)].sd.sd_dpl !=
				 SEL_UPL)))
				return (EACCES);
			break;
		case SDT_MEMEC:
		case SDT_MEMEAC:
		case SDT_MEMERC:
		case SDT_MEMERAC:
			/* Must be "present" if executable and conforming. */
			if (desc.sd.sd_p == 0)
				return (EACCES);
			break;
		case SDT_MEMRO:
		case SDT_MEMROA:
		case SDT_MEMRW:
		case SDT_MEMRWA:
		case SDT_MEMROD:
		case SDT_MEMRODA:
		case SDT_MEMRWD:
		case SDT_MEMRWDA:
		case SDT_MEME:
		case SDT_MEMEA:
		case SDT_MEMER:
		case SDT_MEMERA:
			break;
		default:
			/* Only care if it's present. */
			if (desc.sd.sd_p != 0)
				return (EACCES);
			break;
		}

		if (desc.sd.sd_p != 0) {
			/* Only user (ring-3) descriptors may be present. */
			if (desc.sd.sd_dpl != SEL_UPL)
				return (EACCES);
		}
	}

	/* Now actually replace the descriptors. */
	for (i = 0, n = ua.start; i < ua.num; i++, n++) {
		if ((error = copyin(&ua.desc[i], &desc, sizeof(desc))) != 0)
			goto out;

		pmap->pm_ldt[n] = desc;
	}

	*retval = ua.start;

out:
	return (error);
}
コード例 #6
0
static int
fasttrap_do_seg(fasttrap_tracepoint_t *tp, struct reg *rp, uintptr_t *addr)
{
	proc_t *p = curproc;
#ifdef __i386__
	struct segment_descriptor *desc;
#else
	struct user_segment_descriptor *desc;
#endif
	uint16_t sel = 0, ndx, type;
	uintptr_t limit;

	switch (tp->ftt_segment) {
	case FASTTRAP_SEG_CS:
		sel = rp->r_cs;
		break;
	case FASTTRAP_SEG_DS:
		sel = rp->r_ds;
		break;
	case FASTTRAP_SEG_ES:
		sel = rp->r_es;
		break;
	case FASTTRAP_SEG_FS:
		sel = rp->r_fs;
		break;
	case FASTTRAP_SEG_GS:
		sel = rp->r_gs;
		break;
	case FASTTRAP_SEG_SS:
		sel = rp->r_ss;
		break;
	}

	/*
	 * Make sure the given segment register specifies a user priority
	 * selector rather than a kernel selector.
	 */
	if (ISPL(sel) != SEL_UPL)
		return (-1);

	ndx = IDXSEL(sel);

	/*
	 * Check the bounds and grab the descriptor out of the specified
	 * descriptor table.
	 */
	if (ISLDT(sel)) {
#ifdef __i386__
		if (ndx > p->p_md.md_ldt->ldt_len)
			return (-1);

		desc = (struct segment_descriptor *)
		    p->p_md.md_ldt[ndx].ldt_base;
#else
		if (ndx > max_ldt_segment)
			return (-1);

		desc = (struct user_segment_descriptor *)
		    p->p_md.md_ldt[ndx].ldt_base;
#endif

	} else {
		if (ndx >= NGDT)
			return (-1);

#ifdef __i386__
		desc = &gdt[ndx].sd;
#else
		desc = &gdt[ndx];
#endif
	}

	/*
	 * The descriptor must have user privilege level and it must be
	 * present in memory.
	 */
	if (desc->sd_dpl != SEL_UPL || desc->sd_p != 1)
		return (-1);

	type = desc->sd_type;

	/*
	 * If the S bit in the type field is not set, this descriptor can
	 * only be used in system context.
	 */
	if ((type & 0x10) != 0x10)
		return (-1);

	limit = USD_GETLIMIT(desc) * (desc->sd_gran ? PAGESIZE : 1);

	if (tp->ftt_segment == FASTTRAP_SEG_CS) {
		/*
		 * The code/data bit and readable bit must both be set.
		 */
		if ((type & 0xa) != 0xa)
			return (-1);

		if (*addr > limit)
			return (-1);
	} else {
		/*
		 * The code/data bit must be clear.
		 */
		if ((type & 0x8) != 0)
			return (-1);

		/*
		 * If the expand-down bit is clear, we just check the limit as
		 * it would naturally be applied. Otherwise, we need to check
		 * that the address is the range [limit + 1 .. 0xffff] or
		 * [limit + 1 ... 0xffffffff] depending on if the default
		 * operand size bit is set.
		 */
		if ((type & 0x4) == 0) {
			if (*addr > limit)
				return (-1);
		} else if (desc->sd_def32) {
			if (*addr < limit + 1 || 0xffff < *addr)
				return (-1);
		} else {
			if (*addr < limit + 1 || 0xffffffff < *addr)
				return (-1);
		}
	}

	*addr += USD_GETBASE(desc);

	return (0);
}
コード例 #7
0
int
vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
	struct seg_desc nt;
	struct tss32 oldtss, newtss;
	struct vm_task_switch *task_switch;
	struct vm_guest_paging *paging, sup_paging;
	struct user_segment_descriptor nt_desc, ot_desc;
	struct iovec nt_iov[2], ot_iov[2];
	uint64_t cr0, ot_base;
	uint32_t eip, ot_lim, access;
	int error, ext, minlimit, nt_type, ot_type, vcpu;
	enum task_switch_reason reason;
	uint16_t nt_sel, ot_sel;

	task_switch = &vmexit->u.task_switch;
	nt_sel = task_switch->tsssel;
	ext = vmexit->u.task_switch.ext;
	reason = vmexit->u.task_switch.reason;
	paging = &vmexit->u.task_switch.paging;
	vcpu = *pvcpu;

	assert(paging->cpu_mode == CPU_MODE_PROTECTED);

	/*
	 * Section 4.6, "Access Rights" in Intel SDM Vol 3.
	 * The following page table accesses are implicitly supervisor mode:
	 * - accesses to GDT or LDT to load segment descriptors
	 * - accesses to the task state segment during task switch
	 */
	sup_paging = *paging;
	sup_paging.cpl = 0;	/* implicit supervisor mode */

	/* Fetch the new TSS descriptor */
	error = read_tss_descriptor(ctx, vcpu, task_switch, nt_sel, &nt_desc);
	CHKERR(error);

	nt = usd_to_seg_desc(&nt_desc);

	/* Verify the type of the new TSS */
	nt_type = SEG_DESC_TYPE(nt.access);
	if (nt_type != SDT_SYS386BSY && nt_type != SDT_SYS386TSS &&
	    nt_type != SDT_SYS286BSY && nt_type != SDT_SYS286TSS) {
		sel_exception(ctx, vcpu, IDT_TS, nt_sel, ext);
		goto done;
	}

	/* TSS descriptor must have present bit set */
	if (!SEG_DESC_PRESENT(nt.access)) {
		sel_exception(ctx, vcpu, IDT_NP, nt_sel, ext);
		goto done;
	}

	/*
	 * TSS must have a minimum length of 104 bytes for a 32-bit TSS and
	 * 44 bytes for a 16-bit TSS.
	 */
	if (nt_type == SDT_SYS386BSY || nt_type == SDT_SYS386TSS)
		minlimit = 104 - 1;
	else if (nt_type == SDT_SYS286BSY || nt_type == SDT_SYS286TSS)
		minlimit = 44 - 1;
	else
		minlimit = 0;

	assert(minlimit > 0);
	if (nt.limit < minlimit) {
		sel_exception(ctx, vcpu, IDT_TS, nt_sel, ext);
		goto done;
	}

	/* TSS must be busy if task switch is due to IRET */
	if (reason == TSR_IRET && !TSS_BUSY(nt_type)) {
		sel_exception(ctx, vcpu, IDT_TS, nt_sel, ext);
		goto done;
	}

	/*
	 * TSS must be available (not busy) if task switch reason is
	 * CALL, JMP, exception or interrupt.
	 */
	if (reason != TSR_IRET && TSS_BUSY(nt_type)) {
		sel_exception(ctx, vcpu, IDT_GP, nt_sel, ext);
		goto done;
	}

	/* Fetch the new TSS */
	error = vm_copy_setup(ctx, vcpu, &sup_paging, nt.base, minlimit + 1,
	    PROT_READ | PROT_WRITE, nt_iov, nitems(nt_iov));
	CHKERR(error);
	vm_copyin(ctx, vcpu, nt_iov, &newtss, minlimit + 1);

	/* Get the old TSS selector from the guest's task register */
	ot_sel = GETREG(ctx, vcpu, VM_REG_GUEST_TR);
	if (ISLDT(ot_sel) || IDXSEL(ot_sel) == 0) {
		/*
		 * This might happen if a task switch was attempted without
		 * ever loading the task register with LTR. In this case the
		 * TR would contain the values from power-on:
		 * (sel = 0, base = 0, limit = 0xffff).
		 */
		sel_exception(ctx, vcpu, IDT_TS, ot_sel, task_switch->ext);
		goto done;
	}

	/* Get the old TSS base and limit from the guest's task register */
	error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_TR, &ot_base, &ot_lim,
	    &access);
	assert(error == 0);
	assert(!SEG_DESC_UNUSABLE(access) && SEG_DESC_PRESENT(access));
	ot_type = SEG_DESC_TYPE(access);
	assert(ot_type == SDT_SYS386BSY || ot_type == SDT_SYS286BSY);

	/* Fetch the old TSS descriptor */
	error = read_tss_descriptor(ctx, vcpu, task_switch, ot_sel, &ot_desc);
	CHKERR(error);

	/* Get the old TSS */
	error = vm_copy_setup(ctx, vcpu, &sup_paging, ot_base, minlimit + 1,
	    PROT_READ | PROT_WRITE, ot_iov, nitems(ot_iov));
	CHKERR(error);
	vm_copyin(ctx, vcpu, ot_iov, &oldtss, minlimit + 1);

	/*
	 * Clear the busy bit in the old TSS descriptor if the task switch
	 * due to an IRET or JMP instruction.
	 */
	if (reason == TSR_IRET || reason == TSR_JMP) {
		ot_desc.sd_type &= ~0x2;
		error = desc_table_write(ctx, vcpu, &sup_paging, ot_sel,
		    &ot_desc);
		CHKERR(error);
	}

	if (nt_type == SDT_SYS286BSY || nt_type == SDT_SYS286TSS) {
		fprintf(stderr, "Task switch to 16-bit TSS not supported\n");
		return (VMEXIT_ABORT);
	}

	/* Save processor state in old TSS */
	eip = vmexit->rip + vmexit->inst_length;
	tss32_save(ctx, vcpu, task_switch, eip, &oldtss, ot_iov);

	/*
	 * If the task switch was triggered for any reason other than IRET
	 * then set the busy bit in the new TSS descriptor.
	 */
	if (reason != TSR_IRET) {
		nt_desc.sd_type |= 0x2;
		error = desc_table_write(ctx, vcpu, &sup_paging, nt_sel,
		    &nt_desc);
		CHKERR(error);
	}

	/* Update task register to point at the new TSS */
	SETREG(ctx, vcpu, VM_REG_GUEST_TR, nt_sel);

	/* Update the hidden descriptor state of the task register */
	nt = usd_to_seg_desc(&nt_desc);
	update_seg_desc(ctx, vcpu, VM_REG_GUEST_TR, &nt);

	/* Set CR0.TS */
	cr0 = GETREG(ctx, vcpu, VM_REG_GUEST_CR0);
	SETREG(ctx, vcpu, VM_REG_GUEST_CR0, cr0 | CR0_TS);

	/*
	 * We are now committed to the task switch. Any exceptions encountered
	 * after this point will be handled in the context of the new task and
	 * the saved instruction pointer will belong to the new task.
	 */
	vmexit->rip = newtss.tss_eip;
	vmexit->inst_length = 0;

	/* Load processor state from new TSS */
	error = tss32_restore(ctx, vcpu, task_switch, ot_sel, &newtss, nt_iov);
	CHKERR(error);

	/*
	 * Section "Interrupt Tasks" in Intel SDM, Vol 3: if an exception
	 * caused an error code to be generated, this error code is copied
	 * to the stack of the new task.
	 */
	if (task_switch->errcode_valid) {
		assert(task_switch->ext);
		assert(task_switch->reason == TSR_IDT_GATE);
		error = push_errcode(ctx, vcpu, &task_switch->paging, nt_type,
		    task_switch->errcode);
		CHKERR(error);
	}

	/*
	 * Treatment of virtual-NMI blocking if NMI is delivered through
	 * a task gate.
	 *
	 * Section "Architectural State Before A VM Exit", Intel SDM, Vol3:
	 * If the virtual NMIs VM-execution control is 1, VM entry injects
	 * an NMI, and delivery of the NMI causes a task switch that causes
	 * a VM exit, virtual-NMI blocking is in effect before the VM exit
	 * commences.
	 *
	 * Thus, virtual-NMI blocking is in effect at the time of the task
	 * switch VM exit.
	 */

	/*
	 * Treatment of virtual-NMI unblocking on IRET from NMI handler task.
	 *
	 * Section "Changes to Instruction Behavior in VMX Non-Root Operation"
	 * If "virtual NMIs" control is 1 IRET removes any virtual-NMI blocking.
	 * This unblocking of virtual-NMI occurs even if IRET causes a fault.
	 *
	 * Thus, virtual-NMI blocking is cleared at the time of the task switch
	 * VM exit.
	 */

	/*
	 * If the task switch was triggered by an event delivered through
	 * the IDT then extinguish the pending event from the vcpu's
	 * exitintinfo.
	 */
	if (task_switch->reason == TSR_IDT_GATE) {
		error = vm_set_intinfo(ctx, vcpu, 0);
		assert(error == 0);
	}

	/*
	 * XXX should inject debug exception if 'T' bit is 1
	 */
done:
	return (VMEXIT_CONTINUE);
}
コード例 #8
0
/*
 * Validate the descriptor 'seg_desc' associated with 'segment'.
 *
 * Returns 0 on success.
 * Returns 1 if an exception was injected into the guest.
 * Returns -1 otherwise.
 */
static int
validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
    int segment, struct seg_desc *seg_desc)
{
	struct vm_guest_paging sup_paging;
	struct user_segment_descriptor usd;
	int error, idtvec;
	int cpl, dpl, rpl;
	uint16_t sel, cs;
	bool ldtseg, codeseg, stackseg, dataseg, conforming;

	ldtseg = codeseg = stackseg = dataseg = false;
	switch (segment) {
	case VM_REG_GUEST_LDTR:
		ldtseg = true;
		break;
	case VM_REG_GUEST_CS:
		codeseg = true;
		break;
	case VM_REG_GUEST_SS:
		stackseg = true;
		break;
	case VM_REG_GUEST_DS:
	case VM_REG_GUEST_ES:
	case VM_REG_GUEST_FS:
	case VM_REG_GUEST_GS:
		dataseg = true;
		break;
	default:
		assert(0);
	}

	/* Get the segment selector */
	sel = GETREG(ctx, vcpu, segment);

	/* LDT selector must point into the GDT */
	if (ldtseg && ISLDT(sel)) {
		sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
		return (1);
	}

	/* Descriptor table limit check */
	if (desc_table_limit_check(ctx, vcpu, sel)) {
		sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
		return (1);
	}

	/* NULL selector */
	if (IDXSEL(sel) == 0) {
		/* Code and stack segment selectors cannot be NULL */
		if (codeseg || stackseg) {
			sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
			return (1);
		}
		seg_desc->base = 0;
		seg_desc->limit = 0;
		seg_desc->access = 0x10000;	/* unusable */
		return (0);
	}

	/* Read the descriptor from the GDT/LDT */
	sup_paging = ts->paging;
	sup_paging.cpl = 0;	/* implicit supervisor mode */
	error = desc_table_read(ctx, vcpu, &sup_paging, sel, &usd);
	if (error)
		return (error);

	/* Verify that the descriptor type is compatible with the segment */
	if ((ldtseg && !ldt_desc(usd.sd_type)) ||
	    (codeseg && !code_desc(usd.sd_type)) ||
	    (dataseg && !data_desc(usd.sd_type)) ||
	    (stackseg && !stack_desc(usd.sd_type))) {
		sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
		return (1);
	}

	/* Segment must be marked present */
	if (!usd.sd_p) {
		if (ldtseg)
			idtvec = IDT_TS;
		else if (stackseg)
			idtvec = IDT_SS;
		else
			idtvec = IDT_NP;
		sel_exception(ctx, vcpu, idtvec, sel, ts->ext);
		return (1);
	}

	cs = GETREG(ctx, vcpu, VM_REG_GUEST_CS);
	cpl = cs & SEL_RPL_MASK;
	rpl = sel & SEL_RPL_MASK;
	dpl = usd.sd_dpl;

	if (stackseg && (rpl != cpl || dpl != cpl)) {
		sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
		return (1);
	}

	if (codeseg) {
		conforming = (usd.sd_type & 0x4) ? true : false;
		if ((conforming && (cpl < dpl)) ||
		    (!conforming && (cpl != dpl))) {
			sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
			return (1);
		}
	}

	if (dataseg) {
		/*
		 * A data segment is always non-conforming except when it's
		 * descriptor is a readable, conforming code segment.
		 */
		if (code_desc(usd.sd_type) && (usd.sd_type & 0x4) != 0)
			conforming = true;
		else
			conforming = false;

		if (!conforming && (rpl > dpl || cpl > dpl)) {
			sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
			return (1);
		}
	}
	*seg_desc = usd_to_seg_desc(&usd);
	return (0);
}
コード例 #9
0
int
i386_set_ldt(struct proc *p, void *args, register_t *retval)
{
    int error, i, n;
    struct pcb *pcb = &p->p_addr->u_pcb;
    pmap_t pmap = p->p_vmspace->vm_map.pmap;
    struct i386_set_ldt_args ua;
    union descriptor *descv;
    size_t old_len, new_len, ldt_len;
    union descriptor *old_ldt, *new_ldt;

    if (user_ldt_enable == 0)
        return (ENOSYS);

    if ((error = copyin(args, &ua, sizeof(ua))) != 0)
        return (error);

    if (ua.start < 0 || ua.num < 0 || ua.start > 8192 || ua.num > 8192 ||
            ua.start + ua.num > 8192)
        return (EINVAL);

    descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
    if (descv == NULL)
        return (ENOMEM);

    if ((error = copyin(ua.desc, descv, sizeof (*descv) * ua.num)) != 0)
        goto out;

    /* Check descriptors for access violations. */
    for (i = 0; i < ua.num; i++) {
        union descriptor *desc = &descv[i];

        switch (desc->sd.sd_type) {
        case SDT_SYSNULL:
            desc->sd.sd_p = 0;
            break;
        case SDT_SYS286CGT:
        case SDT_SYS386CGT:
            /*
             * Only allow call gates targeting a segment
             * in the LDT or a user segment in the fixed
             * part of the gdt.  Segments in the LDT are
             * constrained (below) to be user segments.
             */
            if (desc->gd.gd_p != 0 &&
                    !ISLDT(desc->gd.gd_selector) &&
                    ((IDXSEL(desc->gd.gd_selector) >= NGDT) ||
                     (gdt[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl !=
                      SEL_UPL))) {
                error = EACCES;
                goto out;
            }
            break;
        case SDT_MEMEC:
        case SDT_MEMEAC:
        case SDT_MEMERC:
        case SDT_MEMERAC:
            /* Must be "present" if executable and conforming. */
            if (desc->sd.sd_p == 0) {
                error = EACCES;
                goto out;
            }
            break;
        case SDT_MEMRO:
        case SDT_MEMROA:
        case SDT_MEMRW:
        case SDT_MEMRWA:
        case SDT_MEMROD:
        case SDT_MEMRODA:
        case SDT_MEMRWD:
        case SDT_MEMRWDA:
        case SDT_MEME:
        case SDT_MEMEA:
        case SDT_MEMER:
        case SDT_MEMERA:
            break;
        default:
            /*
             * Make sure that unknown descriptor types are
             * not marked present.
             */
            if (desc->sd.sd_p != 0) {
                error = EACCES;
                goto out;
            }
            break;
        }

        if (desc->sd.sd_p != 0) {
            /* Only user (ring-3) descriptors may be present. */
            if (desc->sd.sd_dpl != SEL_UPL) {
                error = EACCES;
                goto out;
            }
        }
    }

    /* allocate user ldt */
    simple_lock(&pmap->pm_lock);
    if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) {
        if (pmap->pm_flags & PMF_USER_LDT)
            ldt_len = pmap->pm_ldt_len;
        else
            ldt_len = 512;
        while ((ua.start + ua.num) > ldt_len)
            ldt_len *= 2;
        new_len = ldt_len * sizeof(union descriptor);

        simple_unlock(&pmap->pm_lock);
        new_ldt = km_alloc(round_page(new_len), &kv_any,
                           &kp_dirty, &kd_nowait);
        if (new_ldt == NULL) {
            error = ENOMEM;
            goto out;
        }
        simple_lock(&pmap->pm_lock);

        if (pmap->pm_ldt != NULL && ldt_len <= pmap->pm_ldt_len) {
            /*
             * Another thread (re)allocated the LDT to
             * sufficient size while we were blocked in
             * km_alloc. Oh well. The new entries
             * will quite probably not be right, but
             * hey.. not our problem if user applications
             * have race conditions like that.
             */
            km_free(new_ldt, round_page(new_len), &kv_any,
                    &kp_dirty);
            goto copy;
        }

        old_ldt = pmap->pm_ldt;

        if (old_ldt != NULL) {
            old_len = pmap->pm_ldt_len * sizeof(union descriptor);
        } else {
            old_len = NLDT * sizeof(union descriptor);
            old_ldt = ldt;
        }

        memcpy(new_ldt, old_ldt, old_len);
        memset((caddr_t)new_ldt + old_len, 0, new_len - old_len);

        if (old_ldt != ldt)
            km_free(old_ldt, round_page(old_len),
                    &kv_any, &kp_dirty);

        pmap->pm_ldt = new_ldt;
        pmap->pm_ldt_len = ldt_len;

        if (pmap->pm_flags & PMF_USER_LDT)
            ldt_free(pmap);
        else
            pmap->pm_flags |= PMF_USER_LDT;
        ldt_alloc(pmap, new_ldt, new_len);
        pcb->pcb_ldt_sel = pmap->pm_ldt_sel;
        if (pcb == curpcb)
            lldt(pcb->pcb_ldt_sel);

    }
copy:
    /* Now actually replace the descriptors. */
    for (i = 0, n = ua.start; i < ua.num; i++, n++)
        pmap->pm_ldt[n] = descv[i];

    simple_unlock(&pmap->pm_lock);

    *retval = ua.start;

out:
    free(descv, M_TEMP);
    return (error);
}
コード例 #10
0
/*
 * sysi86
 */
int
svr4_sys_sysarch(struct proc *p, void *v, register_t *retval)
{
	struct svr4_sys_sysarch_args *uap = v;
	int error;
#ifdef USER_LDT
	caddr_t sg = stackgap_init(p->p_emul);
#endif
	*retval = 0;	/* XXX: What to do */

	switch (SCARG(uap, op)) {
	case SVR4_SYSARCH_FPHW:
		return 0;

	case SVR4_SYSARCH_DSCR:
#ifdef USER_LDT
		if (user_ldt_enable == 0)
			return (ENOSYS);
		else {
			struct i386_set_ldt_args sa, *sap;
			struct sys_sysarch_args ua;

			struct svr4_ssd ssd;
			union descriptor bsd;

			if ((error = copyin(SCARG(uap, a1), &ssd,
					    sizeof(ssd))) != 0) {
				printf("Cannot copy arg1\n");
				return error;
			}

			printf("s=%x, b=%x, l=%x, a1=%x a2=%x\n",
			       ssd.selector, ssd.base, ssd.limit,
			       ssd.access1, ssd.access2);

			/* We can only set ldt's for now. */
			if (!ISLDT(ssd.selector)) {
				printf("Not an ldt\n");
				return EPERM;
			}

			/* Oh, well we don't cleanup either */
			if (ssd.access1 == 0)
				return 0;

			bsd.sd.sd_lobase = ssd.base & 0xffffff;
			bsd.sd.sd_hibase = (ssd.base >> 24) & 0xff;

			bsd.sd.sd_lolimit = ssd.limit & 0xffff;
			bsd.sd.sd_hilimit = (ssd.limit >> 16) & 0xf;

			bsd.sd.sd_type = ssd.access1 & 0x1f;
			bsd.sd.sd_dpl =  (ssd.access1 >> 5) & 0x3;
			bsd.sd.sd_p = (ssd.access1 >> 7) & 0x1;

			bsd.sd.sd_xx = ssd.access2 & 0x3;
			bsd.sd.sd_def32 = (ssd.access2 >> 2) & 0x1;
			bsd.sd.sd_gran = (ssd.access2 >> 3)& 0x1;

			sa.start = IDXSEL(ssd.selector);
			sa.desc = stackgap_alloc(&sg, sizeof(union descriptor));
			sa.num = 1;
			sap = stackgap_alloc(&sg,
					     sizeof(struct i386_set_ldt_args));

			if ((error = copyout(&sa, sap, sizeof(sa))) != 0) {
				printf("Cannot copyout args\n");
				return error;
			}

			SCARG(&ua, op) = I386_SET_LDT;
			SCARG(&ua, parms) = (char *) sap;

			if ((error = copyout(&bsd, sa.desc, sizeof(bsd))) != 0) {
				printf("Cannot copyout desc\n");
				return error;
			}

			return sys_sysarch(p, &ua, retval);
		}
#endif
	case SVR4_SYSARCH_GOSF:
		{
				/* just as SCO Openserver 5.0 says */
			char features[] = {1,1,1,1,1,1,1,1,2,1,1,1};

			if ((error = copyout(features, SCARG(uap, a1),
					     sizeof(features))) != 0) {
				printf("Cannot copyout vector\n");
				return error;
			}

			return 0;
		}

	default:
		printf("svr4_sysarch(%d), a1 %p\n", SCARG(uap, op),
		       SCARG(uap, a1));
		return 0;
	}
}