コード例 #1
0
ファイル: trap.c プロジェクト: AustenConrad/plan-9
void
dumpregs(Ureg* ureg)
{
	vlong mca, mct;

	dumpregs2(ureg);

	/*
	 * Processor control registers.
	 * If machine check exception, time stamp counter, page size extensions
	 * or enhanced virtual 8086 mode extensions are supported, there is a
	 * CR4. If there is a CR4 and machine check extensions, read the machine
	 * check address and machine check type registers if RDMSR supported.
	 */
	iprint("  CR0 %8.8lux CR2 %8.8lux CR3 %8.8lux",
		getcr0(), getcr2(), getcr3());
	if(m->cpuiddx & 0x9A){
		iprint(" CR4 %8.8lux", getcr4());
		if((m->cpuiddx & 0xA0) == 0xA0){
			rdmsr(0x00, &mca);
			rdmsr(0x01, &mct);
			iprint("\n  MCA %8.8llux MCT %8.8llux", mca, mct);
		}
	}
	iprint("\n  ur %#p up %#p\n", ureg, up);
}
コード例 #2
0
static void
opt_pcbe_allstop(void)
{
	int		i;

	for (i = 0; i < 4; i++)
		wrmsr(PES_BASE_ADDR + i, 0ULL);

	/*
	 * Disable non-privileged access to the counter registers.
	 */
	setcr4(getcr4() & ~CR4_PCE);
}
コード例 #3
0
static void
opt_pcbe_program(void *token)
{
	opt_pcbe_config_t	*cfgs[4] = { &nullcfgs[0], &nullcfgs[1],
						&nullcfgs[2], &nullcfgs[3] };
	opt_pcbe_config_t	*pcfg = NULL;
	int			i;
	ulong_t			curcr4 = getcr4();

	/*
	 * Allow nonprivileged code to read the performance counters if desired.
	 */
	if (kcpc_allow_nonpriv(token))
		setcr4(curcr4 | CR4_PCE);
	else
		setcr4(curcr4 & ~CR4_PCE);

	/*
	 * Query kernel for all configs which will be co-programmed.
	 */
	do {
		pcfg = (opt_pcbe_config_t *)kcpc_next_config(token, pcfg, NULL);

		if (pcfg != NULL) {
			ASSERT(pcfg->opt_picno < 4);
			cfgs[pcfg->opt_picno] = pcfg;
		}
	} while (pcfg != NULL);

	/*
	 * Program in two loops. The first configures and presets the counter,
	 * and the second loop enables the counters. This ensures that the
	 * counters are all enabled as closely together in time as possible.
	 */

	for (i = 0; i < 4; i++) {
		wrmsr(PES_BASE_ADDR + i, cfgs[i]->opt_evsel);
		wrmsr(PIC_BASE_ADDR + i, cfgs[i]->opt_rawpic);
	}

	for (i = 0; i < 4; i++) {
		wrmsr(PES_BASE_ADDR + i, cfgs[i]->opt_evsel |
		    (uint64_t)(uintptr_t)OPT_PES_ENABLE);
	}
}
コード例 #4
0
ファイル: trap.c プロジェクト: grobe0ba/plan9front
void
dumpregs(Ureg* ureg)
{
	dumpregs2(ureg);

	/*
	 * Processor control registers.
	 * If machine check exception, time stamp counter, page size extensions
	 * or enhanced virtual 8086 mode extensions are supported, there is a
	 * CR4. If there is a CR4 and machine check extensions, read the machine
	 * check address and machine check type registers if RDMSR supported.
	 */
	iprint("  CR0 %8.8lux CR2 %8.8lux CR3 %8.8lux",
		getcr0(), getcr2(), getcr3());
	if(m->cpuiddx & (Mce|Tsc|Pse|Vmex)){
		iprint(" CR4 %8.8lux\n", getcr4());
		if(ureg->trap == 18)
			dumpmcregs();
	}
	iprint("\n  ur %#p up %#p\n", ureg, up);
}
コード例 #5
0
/*
 * Fill in the remaining CPU context and initialize it.
 */
static int
mp_set_cpu_context(vcpu_guest_context_t *vgc, cpu_t *cp)
{
	uint_t vec, iopl;

	vgc->flags = VGCF_IN_KERNEL;

	/*
	 * fpu_ctx we leave as zero; on first fault we'll store
	 * sse_initial into it anyway.
	 */

#if defined(__amd64)
	vgc->user_regs.cs = KCS_SEL | SEL_KPL;	/* force to ring 3 */
#else
	vgc->user_regs.cs = KCS_SEL;
#endif
	vgc->user_regs.ds = KDS_SEL;
	vgc->user_regs.es = KDS_SEL;
	vgc->user_regs.ss = KDS_SEL;
	vgc->kernel_ss = KDS_SEL;

	/*
	 * Allow I/O privilege level for Dom0 kernel.
	 */
	if (DOMAIN_IS_INITDOMAIN(xen_info))
		iopl = (PS_IOPL & 0x1000); /* ring 1 */
	else
		iopl = 0;

#if defined(__amd64)
	vgc->user_regs.fs = 0;
	vgc->user_regs.gs = 0;
	vgc->user_regs.rflags = F_OFF | iopl;
#elif defined(__i386)
	vgc->user_regs.fs = KFS_SEL;
	vgc->user_regs.gs = KGS_SEL;
	vgc->user_regs.eflags = F_OFF | iopl;
	vgc->event_callback_cs = vgc->user_regs.cs;
	vgc->failsafe_callback_cs = vgc->user_regs.cs;
#endif

	/*
	 * Initialize the trap_info_t from the IDT
	 */
#if !defined(__lint)
	ASSERT(NIDT == sizeof (vgc->trap_ctxt) / sizeof (vgc->trap_ctxt[0]));
#endif
	for (vec = 0; vec < NIDT; vec++) {
		trap_info_t *ti = &vgc->trap_ctxt[vec];

		if (xen_idt_to_trap_info(vec,
		    &cp->cpu_m.mcpu_idt[vec], ti) == 0) {
			ti->cs = KCS_SEL;
			ti->vector = vec;
		}
	}

	/*
	 * No LDT
	 */

	/*
	 * (We assert in various places that the GDT is (a) aligned on a
	 * page boundary and (b) one page long, so this really should fit..)
	 */
#ifdef CRASH_XEN
	vgc->gdt_frames[0] = pa_to_ma(mmu_btop(cp->cpu_m.mcpu_gdtpa));
#else
	vgc->gdt_frames[0] = pfn_to_mfn(mmu_btop(cp->cpu_m.mcpu_gdtpa));
#endif
	vgc->gdt_ents = NGDT;

	vgc->ctrlreg[0] = CR0_ENABLE_FPU_FLAGS(getcr0());

#if defined(__i386)
	if (mmu.pae_hat)
		vgc->ctrlreg[3] =
		    xen_pfn_to_cr3(pfn_to_mfn(kas.a_hat->hat_htable->ht_pfn));
	else
#endif
		vgc->ctrlreg[3] =
		    pa_to_ma(mmu_ptob(kas.a_hat->hat_htable->ht_pfn));

	vgc->ctrlreg[4] = getcr4();

	vgc->event_callback_eip = (uintptr_t)xen_callback;
	vgc->failsafe_callback_eip = (uintptr_t)xen_failsafe_callback;
	vgc->flags |= VGCF_failsafe_disables_events;

#if defined(__amd64)
	/*
	 * XXPV should this be moved to init_cpu_syscall?
	 */
	vgc->syscall_callback_eip = (uintptr_t)sys_syscall;
	vgc->flags |= VGCF_syscall_disables_events;

	ASSERT(vgc->user_regs.gs == 0);
	vgc->gs_base_kernel = (uintptr_t)cp;
#endif

	return (xen_vcpu_initialize(cp->cpu_id, vgc));
}
コード例 #6
0
ファイル: memory.c プロジェクト: 99years/plan9
ulong
mmukmap(ulong pa, ulong va, int size)
{
	ulong pae, *table, *pdb, pgsz, *pte, x;
	int pse, sync;
	extern int cpuidax, cpuiddx;

	pdb = KADDR(getcr3());
	if((cpuiddx & 0x08) && (getcr4() & 0x10))
		pse = 1;
	else
		pse = 0;
	sync = 0;

	pa = PPN(pa);
	if(va == 0)
		va = (ulong)KADDR(pa);
	else
		va = PPN(va);

	pae = pa + size;
	lock(&mmukmaplock);
	while(pa < pae){
		table = &pdb[PDX(va)];
		/*
		 * Possibly already mapped.
		 */
		if(*table & PTEVALID){
			if(*table & PTESIZE){
				/*
				 * Big page. Does it fit within?
				 * If it does, adjust pgsz so the correct end can be
				 * returned and get out.
				 * If not, adjust pgsz up to the next 4MiB boundary
				 * and continue.
				 */
				x = PPN(*table);
				if(x != pa)
					panic("mmukmap1: pa 0x%ux  entry 0x%ux\n",
						pa, *table);
				x += 4*MiB;
				if(pae <= x){
					pa = pae;
					break;
				}
				pgsz = x - pa;
				pa += pgsz;
				va += pgsz;

				continue;
			}
			else{
				/*
				 * Little page. Walk to the entry.
				 * If the entry is valid, set pgsz and continue.
				 * If not, make it so, set pgsz, sync and continue.
				 */
				pte = mmuwalk(pdb, va, 2, 0);
				if(pte && *pte & PTEVALID){
					x = PPN(*pte);
					if(x != pa)
						panic("mmukmap2: pa 0x%ux entry 0x%ux\n",
							pa, *pte);
					pgsz = BY2PG;
					pa += pgsz;
					va += pgsz;
					sync++;

					continue;
				}
			}
		}

		/*
		 * Not mapped. Check if it can be mapped using a big page -
		 * starts on a 4MiB boundary, size >= 4MiB and processor can do it.
		 * If not a big page, walk the walk, talk the talk.
		 * Sync is set.
		 */
		if(pse && (pa % (4*MiB)) == 0 && (pae >= pa+4*MiB)){
			*table = pa|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
			pgsz = 4*MiB;
		}
		else{
			pte = mmuwalk(pdb, va, 2, 1);
			*pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
			pgsz = BY2PG;
		}
		pa += pgsz;
		va += pgsz;
		sync++;
	}
	unlock(&mmukmaplock);

	/*
	 * If something was added
	 * then need to sync up.
	 */
	if(sync)
		putcr3(PADDR(pdb));

	return pa;
}
コード例 #7
0
void *
mach_cpucontext_alloc(struct cpu *cp)
{
	rm_platter_t *rm = (rm_platter_t *)rm_platter_va;
	struct cpu_tables *ct;
	struct tss *ntss;

	/*
	 * Allocate space for stack, tss, gdt and idt. We round the size
	 * allotted for cpu_tables up, so that the TSS is on a unique page.
	 * This is more efficient when running in virtual machines.
	 */
	ct = kmem_zalloc(P2ROUNDUP(sizeof (*ct), PAGESIZE), KM_SLEEP);
	if ((uintptr_t)ct & PAGEOFFSET)
		panic("mp_startup_init: cpu%d misaligned tables", cp->cpu_id);

	ntss = cp->cpu_tss = &ct->ct_tss;

#if defined(__amd64)

	/*
	 * #DF (double fault).
	 */
	ntss->tss_ist1 = (uint64_t)&ct->ct_stack[sizeof (ct->ct_stack)];

#elif defined(__i386)

	ntss->tss_esp0 = ntss->tss_esp1 = ntss->tss_esp2 = ntss->tss_esp =
	    (uint32_t)&ct->ct_stack[sizeof (ct->ct_stack)];

	ntss->tss_ss0 = ntss->tss_ss1 = ntss->tss_ss2 = ntss->tss_ss = KDS_SEL;

	ntss->tss_eip = (uint32_t)cp->cpu_thread->t_pc;

	ntss->tss_cs = KCS_SEL;
	ntss->tss_ds = ntss->tss_es = KDS_SEL;
	ntss->tss_fs = KFS_SEL;
	ntss->tss_gs = KGS_SEL;

#endif	/* __i386 */

	/*
	 * Set I/O bit map offset equal to size of TSS segment limit
	 * for no I/O permission map. This will cause all user I/O
	 * instructions to generate #gp fault.
	 */
	ntss->tss_bitmapbase = sizeof (*ntss);

	/*
	 * Setup kernel tss.
	 */
	set_syssegd((system_desc_t *)&cp->cpu_gdt[GDT_KTSS], cp->cpu_tss,
	    sizeof (*cp->cpu_tss) - 1, SDT_SYSTSS, SEL_KPL);

	/*
	 * Now copy all that we've set up onto the real mode platter
	 * for the real mode code to digest as part of starting the cpu.
	 */

	rm->rm_idt_base = cp->cpu_idt;
	rm->rm_idt_lim = sizeof (*cp->cpu_idt) * NIDT - 1;
	rm->rm_gdt_base = cp->cpu_gdt;
	rm->rm_gdt_lim = sizeof (*cp->cpu_gdt) * NGDT - 1;

	rm->rm_pdbr = getcr3();
	rm->rm_cpu = cp->cpu_id;
	rm->rm_x86feature = x86_feature;
	rm->rm_cr4 = getcr4();

	rmp_gdt_init(rm);

	return (ct);
}