Ejemplo n.º 1
0
Archivo: init.c Proyecto: jjhlzn/6.828
// Setup code for APs
void
mp_main(void)
{
	// We are in high EIP now, safe to switch to kern_pgdir 
	uint32_t cr4 = rcr4();
	cr4 |= (1<<4); //Open PSE
	lcr4(cr4);
	lcr3(PADDR(kern_pgdir));
	cprintf("SMP: CPU %d starting\n", cpunum());

	lapic_init();
	env_init_percpu();
	trap_init_percpu();
	xchg(&thiscpu->cpu_status, CPU_STARTED); // tell boot_aps() we're up

	// Now that we have finished some basic setup, call sched_yield()
	// to start running processes on this CPU.  But make sure that
	// only one CPU can enter the scheduler at a time!
	//
	// Your code here:
	lock_kernel();
	sched_yield();

	// Remove this after you finish Exercise 4
	//for (;;);
}
Ejemplo n.º 2
0
/*
 * Initialize the floating point unit.
 */
void
fpuinit(void)
{
	register_t saveintr;
	u_int mxcsr;
	u_short control;

	if (IS_BSP())
		fpuinit_bsp1();

	if (use_xsave) {
		load_cr4(rcr4() | CR4_XSAVE);
		xsetbv(XCR0, xsave_mask);
	}

	/*
	 * XCR0 shall be set up before CPU can report the save area size.
	 */
	if (IS_BSP())
		fpuinit_bsp2();

	/*
	 * It is too early for critical_enter() to work on AP.
	 */
	saveintr = intr_disable();
	stop_emulating();
	fninit();
	control = __INITIAL_FPUCW__;
	fldcw(control);
	mxcsr = __INITIAL_MXCSR__;
	ldmxcsr(mxcsr);
	start_emulating();
	intr_restore(saveintr);
}
Ejemplo n.º 3
0
Archivo: pmap.c Proyecto: 7perl/akaros
/* Flushes a TLB, including global pages.  We should always have the CR4_PGE
 * flag set, but just in case, we'll check.  Toggling this bit flushes the TLB.
 */
void tlb_flush_global(void)
{
	uint32_t cr4 = rcr4();
	if (cr4 & CR4_PGE) {
		lcr4(cr4 & ~CR4_PGE);
		lcr4(cr4);
	} else 
		lcr3(rcr3());
}
Ejemplo n.º 4
0
/*
 * Initialize CR4 (Control register 4) to enable SSE instructions.
 */
void
enable_sse(void)
{
#ifndef CPU_DISABLE_SSE
	if ((cpu_feature & CPUID_SSE) && (cpu_feature & CPUID_FXSR)) {
		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
		cpu_fxsr = hw_instruction_sse = 1;
	}
#endif
}
Ejemplo n.º 5
0
/*
 * Initialize CR4 (Control register 4) to enable SSE instructions.
 */
void
enable_sse(void)
{
#if defined(CPU_ENABLE_SSE)
	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
		cpu_fxsr = hw_instruction_sse = 1;
	}
#endif
}
Ejemplo n.º 6
0
void
cpu_init(struct cpu_info *ci)
{
	u_int cr4 = 0;

	/* configure the CPU if needed */
	if (ci->cpu_setup != NULL)
		(*ci->cpu_setup)(ci);

	/*
	 * We do this here after identifycpu() because errata may affect
	 * what we do.
	 */
	patinit(ci);
 
	/*
	 * Enable ring 0 write protection (486 or above, but 386
	 * no longer supported).
	 */
	lcr0(rcr0() | CR0_WP);

	if (cpu_feature & CPUID_PGE)
		cr4 |= CR4_PGE;	/* enable global TLB caching */

	if (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMEP)
		cr4 |= CR4_SMEP;
#ifndef SMALL_KERNEL
	if (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMAP)
		cr4 |= CR4_SMAP;
	if (ci->ci_feature_sefflags_ecx & SEFF0ECX_UMIP)
		cr4 |= CR4_UMIP;
#endif

	/*
	 * If we have FXSAVE/FXRESTOR, use them.
	 */
	if (cpu_feature & CPUID_FXSR) {
		cr4 |= CR4_OSFXSR;

		/*
		 * If we have SSE/SSE2, enable XMM exceptions.
		 */
		if (cpu_feature & (CPUID_SSE|CPUID_SSE2))
			cr4 |= CR4_OSXMMEXCPT;
	}
	/* no cr4 on most 486s */
	if (cr4 != 0)
		lcr4(rcr4()|cr4);

#ifdef MULTIPROCESSOR
	ci->ci_flags |= CPUF_RUNNING;
	tlbflushg();
#endif
}
Ejemplo n.º 7
0
Archivo: pmap.c Proyecto: 7perl/akaros
bool enable_pse(void)
{
	uint32_t edx, cr4;
	cpuid(0x1, 0x0, 0, 0, 0, &edx);
	if (edx & CPUID_PSE_SUPPORT) {
		cr4 = rcr4();
		cr4 |= CR4_PSE;
		lcr4(cr4);
		return 1;
	} else
		return 0;
}
Ejemplo n.º 8
0
void
cpu_init(struct cpu_info *ci)
{
	/* configure the CPU if needed */
	if (ci->cpu_setup != NULL)
		(*ci->cpu_setup)(ci);

	lcr0(rcr0() | CR0_WP);
	lcr4(rcr4() | CR4_DEFAULT);

#ifdef MULTIPROCESSOR
	ci->ci_flags |= CPUF_RUNNING;
#endif
}
Ejemplo n.º 9
0
/*
 * Initialize CPU control registers
 */
void
initializecpu(void)
{
	uint64_t msr;

	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
		cpu_fxsr = hw_instruction_sse = 1;
	}
	if ((amd_feature & AMDID_NX) != 0) {
		msr = rdmsr(MSR_EFER) | EFER_NXE;
		wrmsr(MSR_EFER, msr);
		pg_nx = PG_NX;
	}
	if (cpu_vendor_id == CPU_VENDOR_CENTAUR)
		init_via();
}
Ejemplo n.º 10
0
static void
save_guest_fpustate(struct vcpu *vcpu)
{

	if ((rcr0() & CR0_TS) == 0)
		panic("fpu emulation not enabled in host!");

	/* save guest XCR0 and restore host XCR0 */
	if (rcr4() & CR4_XSAVE) {
		vcpu->guest_xcr0 = rxcr(0);
		load_xcr(0, vmm_get_host_xcr0());
	}

	/* save guest FPU state */
	fpu_stop_emulating();
	fpusave(vcpu->guestfpu);
	fpu_start_emulating();
}
Ejemplo n.º 11
0
void
vmm_host_state_init(void)
{

	vmm_host_efer = rdmsr(MSR_EFER);
	vmm_host_pat = rdmsr(MSR_PAT);

	/*
	 * We always want CR0.TS to be set when the processor does a VM exit.
	 *
	 * With emulation turned on unconditionally after a VM exit, we are
	 * able to trap inadvertent use of the FPU until the guest FPU state
	 * has been safely squirreled away.
	 */
	vmm_host_cr0 = rcr0() | CR0_TS;

	vmm_host_cr4 = rcr4();
}
Ejemplo n.º 12
0
void
cpu_init(struct cpu_info *ci)
{
	/* configure the CPU if needed */
	if (ci->cpu_setup != NULL)
		(*ci->cpu_setup)(ci);
	/*
	 * We do this here after identifycpu() because errata may affect
	 * what we do.
	 */
	patinit(ci);

	lcr0(rcr0() | CR0_WP);
	lcr4(rcr4() | CR4_DEFAULT);

#ifdef MULTIPROCESSOR
	ci->ci_flags |= CPUF_RUNNING;
	tlbflushg();
#endif
}
Ejemplo n.º 13
0
static void
restore_guest_fpustate(struct vcpu *vcpu)
{

	/* flush host state to the pcb */
	fpuexit(curthread);

	/* restore guest FPU state */
	fpu_stop_emulating();
	fpurestore(vcpu->guestfpu);

	/* restore guest XCR0 if XSAVE is enabled in the host */
	if (rcr4() & CR4_XSAVE)
		load_xcr(0, vcpu->guest_xcr0);

	/*
	 * The FPU is now "dirty" with the guest's state so turn on emulation
	 * to trap any access to the FPU by the host.
	 */
	fpu_start_emulating();
}
Ejemplo n.º 14
0
void
vmm_host_state_init(void)
{
	int regs[4];

	vmm_host_efer = rdmsr(MSR_EFER);
	vmm_host_pat = rdmsr(MSR_PAT);

	/*
	 * We always want CR0.TS to be set when the processor does a VM exit.
	 *
	 * With emulation turned on unconditionally after a VM exit, we are
	 * able to trap inadvertent use of the FPU until the guest FPU state
	 * has been safely squirreled away.
	 */
	vmm_host_cr0 = rcr0() | CR0_TS;

	vmm_host_cr4 = rcr4();

	/*
	 * Only permit a guest to use XSAVE if the host is using
	 * XSAVE.  Only permit a guest to use XSAVE features supported
	 * by the host.  This ensures that the FPU state used by the
	 * guest is always a subset of the saved guest FPU state.
	 *
	 * In addition, only permit known XSAVE features where the
	 * rules for which features depend on other features is known
	 * to properly emulate xsetbv.
	 */
	if (vmm_host_cr4 & CR4_XSAVE) {
		vmm_xsave_limits.xsave_enabled = 1;
		vmm_host_xcr0 = rxcr(0);
		vmm_xsave_limits.xcr0_allowed = vmm_host_xcr0 &
		    (XFEATURE_AVX | XFEATURE_MPX | XFEATURE_AVX512);

		cpuid_count(0xd, 0x0, regs);
		vmm_xsave_limits.xsave_max_size = regs[1];
	}
}
Ejemplo n.º 15
0
void perfmon_pcpu_init(void)
{
	int i;

	if (!perfmon_supported())
		return;
	/* Enable user level access to the performance counters */
	lcr4(rcr4() | CR4_PCE);

	/* Reset all the counters and selectors to zero.
	 */
	write_msr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
	for (i = 0; i < (int) cpu_caps.counters_x_proc; i++) {
		write_msr(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0);
		write_msr(MSR_IA32_PERFCTR0 + i, 0);
	}
	write_msr(MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
	for (i = 0; i < (int) cpu_caps.fix_counters_x_proc; i++)
		write_msr(MSR_CORE_PERF_FIXED_CTR0 + i, 0);

	perfmon_arm_irq();
}
Ejemplo n.º 16
0
//
// Map [va, va+size) of virtual address space to physical [pa, pa+size)
// in the page table rooted at pgdir.  Size is a multiple of PGSIZE, and
// va and pa are both page-aligned.
// Use permission bits perm|PTE_P for the entries.
//
// This function is only intended to set up the ``static'' mappings
// above UTOP. As such, it should *not* change the pp_ref field on the
// mapped pages.
//
// Hint: the TA solution uses pgdir_walk
static void
boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
{
	// Fill this function in
	int count,i;
	uintptr_t pd_index=0;
	physaddr_t pa_ptba;
	count  = size / (PGSIZE);
	pte_t *entry=NULL;
	int cr4_pse_enabled = rcr4() & CR4_PSE;
	//check for va
	if(!(perm & PTE_PS))
	{
		for(i=0;i<count;i++)
		{
			entry = pgdir_walk(pgdir, (void *)((char *)va + (i*PGSIZE)), 1);
			if(entry!=NULL)
				*entry = (physaddr_t)((char *)pa + (i*PGSIZE)) | perm | PTE_P;
			else
				break;
		}
	}
	else if((perm & PTE_PS) && cr4_pse_enabled)
	{
		//we need to setup 4MB pages in physical memory
		count = size / (NPTENTRIES*PGSIZE);
		for(i=0;i<count;i++)
		{
			pd_index = PDX(va);
			pa_ptba = *(pgdir+pd_index);
			*(pgdir + pd_index) = pa | perm;
			va = va + (1024 * 1024);
			pa = pa + (1024 * 1024); 

		} 
	}

}
Ejemplo n.º 17
0
/*
 * Initialize CPU control registers
 */
void
initializecpu(void)
{
	uint64_t msr;
	uint32_t cr4;

	cr4 = rcr4();
	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
		cr4 |= CR4_FXSR | CR4_XMM;
		cpu_fxsr = hw_instruction_sse = 1;
	}
	if (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE)
		cr4 |= CR4_FSGSBASE;

	/*
	 * Postpone enabling the SMEP on the boot CPU until the page
	 * tables are switched from the boot loader identity mapping
	 * to the kernel tables.  The boot loader enables the U bit in
	 * its tables.
	 */
	if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMEP))
		cr4 |= CR4_SMEP;
	load_cr4(cr4);
	if ((amd_feature & AMDID_NX) != 0) {
		msr = rdmsr(MSR_EFER) | EFER_NXE;
		wrmsr(MSR_EFER, msr);
		pg_nx = PG_NX;
	}
	switch (cpu_vendor_id) {
	case CPU_VENDOR_AMD:
		init_amd();
		break;
	case CPU_VENDOR_CENTAUR:
		init_via();
		break;
	}
}
int
ELFNAME2(linux,copyargs)(struct lwp *l, struct exec_package *pack,
	struct ps_strings *arginfo, char **stackp, void *argp)
{
	struct linux_extra_stack_data64 *esdp, esd;
	struct elf_args *ap;
	struct vattr *vap;
	Elf_Ehdr *eh;
	Elf_Phdr *ph;
	u_long phsize;
	Elf_Addr phdr = 0;
	int error;
	int i;

	if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0)
		return error;

	/*
	 * Push extra arguments on the stack needed by dynamically
	 * linked binaries and static binaries as well.
	 */
	memset(&esd, 0, sizeof(esd));
	esdp = (struct linux_extra_stack_data64 *)(*stackp);
	ap = (struct elf_args *)pack->ep_emul_arg;
	vap = pack->ep_vap;
	eh = (Elf_Ehdr *)pack->ep_hdr;

	/*
	 * We forgot this, so we need to reload it now. XXX keep track of it?
	 */
	if (ap == NULL) {
		phsize = eh->e_phnum * sizeof(Elf_Phdr);
		ph = (Elf_Phdr *)kmem_alloc(phsize, KM_SLEEP);
		error = exec_read_from(l, pack->ep_vp, eh->e_phoff, ph, phsize);
		if (error != 0) {
			for (i = 0; i < eh->e_phnum; i++) {
				if (ph[i].p_type == PT_PHDR) {
					phdr = ph[i].p_vaddr;
					break;
				}
			}
		}
		kmem_free(ph, phsize);
	}


	/*
	 * The exec_package doesn't have a proc pointer and it's not
	 * exactly trivial to add one since the credentials are
	 * changing. XXX Linux uses curlwp's credentials.
	 * Why can't we use them too?
	 */

	i = 0;
	esd.ai[i].a_type = LINUX_AT_HWCAP;
	esd.ai[i++].a_v = rcr4();

	esd.ai[i].a_type = AT_PAGESZ;
	esd.ai[i++].a_v = PAGE_SIZE;

	esd.ai[i].a_type = LINUX_AT_CLKTCK;
	esd.ai[i++].a_v = hz;

	esd.ai[i].a_type = AT_PHDR;
	esd.ai[i++].a_v = (ap ? ap->arg_phaddr: phdr);

	esd.ai[i].a_type = AT_PHENT;
	esd.ai[i++].a_v = (ap ? ap->arg_phentsize : eh->e_phentsize);

	esd.ai[i].a_type = AT_PHNUM;
	esd.ai[i++].a_v = (ap ? ap->arg_phnum : eh->e_phnum);

	esd.ai[i].a_type = AT_BASE;
	esd.ai[i++].a_v = (ap ? ap->arg_interp : 0);

	esd.ai[i].a_type = AT_FLAGS;
	esd.ai[i++].a_v = 0;

	esd.ai[i].a_type = AT_ENTRY;
	esd.ai[i++].a_v = (ap ? ap->arg_entry : eh->e_entry);

	esd.ai[i].a_type = LINUX_AT_EGID;
	esd.ai[i++].a_v = ((vap->va_mode & S_ISGID) ?
	    vap->va_gid : kauth_cred_getegid(l->l_cred));

	esd.ai[i].a_type = LINUX_AT_GID;
	esd.ai[i++].a_v = kauth_cred_getgid(l->l_cred);

	esd.ai[i].a_type = LINUX_AT_EUID;
	esd.ai[i++].a_v = ((vap->va_mode & S_ISUID) ? 
	    vap->va_uid : kauth_cred_geteuid(l->l_cred));

	esd.ai[i].a_type = LINUX_AT_UID;
	esd.ai[i++].a_v = kauth_cred_getuid(l->l_cred);

	esd.ai[i].a_type = LINUX_AT_SECURE;
	esd.ai[i++].a_v = 0;

	esd.ai[i].a_type = LINUX_AT_PLATFORM;
	esd.ai[i++].a_v = (Elf_Addr)&esdp->hw_platform[0];

	esd.ai[i].a_type = AT_NULL;
	esd.ai[i++].a_v = 0;

#ifdef DEBUG_LINUX
	if (i != LINUX_ELF_AUX_ENTRIES) {
		printf("linux_elf64_copyargs: %d Aux entries\n", i);
		return EINVAL;
	}
#endif
		
	strcpy(esd.hw_platform, LINUX_PLATFORM); 

	exec_free_emul_arg(pack);

	/*
	 * Copy out the ELF auxiliary table and hw platform name
	 */
	if ((error = copyout(&esd, esdp, sizeof(esd))) != 0)
		return error;
	*stackp += sizeof(esd);

	return 0;
}
Ejemplo n.º 19
0
/*
 * Initialize CPU control registers
 */
void
initializecpu(int cpu)
{
	uint64_t msr;

	/*Check for FXSR and SSE support and enable if available.*/
	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
		cpu_fxsr = hw_instruction_sse = 1;
	}

	if (cpu == 0) {
		/* Check if we are running in a hypervisor. */
		vmm_guest = detect_virtual();
		if (vmm_guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM))
			vmm_guest = VMM_GUEST_UNKNOWN;
	}

#if !defined(CPU_DISABLE_AVX)
	/*Check for XSAVE and AVX support and enable if available.*/
	if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE)
	     && (cpu_feature & CPUID_SSE)) {
		load_cr4(rcr4() | CR4_XSAVE);

		/* Adjust size of savefpu in npx.h before adding to mask.*/
		xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0);
		cpu_xsave = 1;
	}
#endif

	if (cpu_vendor_id == CPU_VENDOR_AMD) {
		switch((cpu_id & 0xFF0000)) {
		case 0x100000:
		case 0x120000:
			/*
			 * Errata 721 is the cpu bug found by your's truly
			 * (Matthew Dillon).  It is a bug where a sequence
			 * of 5 or more popq's + a retq, under involved
			 * deep recursion circumstances, can cause the %rsp
			 * to not be properly updated, almost always
			 * resulting in a seg-fault soon after.
			 *
			 * Do not install the workaround when we are running
			 * in a virtual machine.
			 */
			if (vmm_guest)
				break;

			msr = rdmsr(MSR_AMD_DE_CFG);
			if ((msr & 1) == 0) {
				if (cpu == 0)
					kprintf("Errata 721 workaround "
						"installed\n");
				msr |= 1;
				wrmsr(MSR_AMD_DE_CFG, msr);
			}
			break;
		}
	}

	if ((amd_feature & AMDID_NX) != 0) {
		msr = rdmsr(MSR_EFER) | EFER_NXE;
		wrmsr(MSR_EFER, msr);
#if 0 /* JG */
		pg_nx = PG_NX;
#endif
	}
	if (cpu_vendor_id == CPU_VENDOR_CENTAUR &&
	    CPUID_TO_FAMILY(cpu_id) == 0x6 &&
	    CPUID_TO_MODEL(cpu_id) >= 0xf)
		init_via();

	TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable);
	if (cpu_feature & CPUID_CLFSH) {
		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;

		if (hw_clflush_enable == 0 ||
		    ((hw_clflush_enable == -1) && vmm_guest))
			cpu_feature &= ~CPUID_CLFSH;
	}
Ejemplo n.º 20
0
void
initializecpu(void)
{

	switch (cpu) {
#ifdef I486_CPU
	case CPU_BLUE:
		init_bluelightning();
		break;
	case CPU_486DLC:
		init_486dlc();
		break;
	case CPU_CY486DX:
		init_cy486dx();
		break;
	case CPU_M1SC:
		init_5x86();
		break;
#ifdef CPU_I486_ON_386
	case CPU_486:
		init_i486_on_386();
		break;
#endif
	case CPU_M1:
		init_6x86();
		break;
#endif /* I486_CPU */
#ifdef I586_CPU
	case CPU_586:
		switch (cpu_vendor_id) {
		case CPU_VENDOR_AMD:
#ifdef CPU_WT_ALLOC
			if (((cpu_id & 0x0f0) > 0) &&
			    ((cpu_id & 0x0f0) < 0x60) &&
			    ((cpu_id & 0x00f) > 3))
				enable_K5_wt_alloc();
			else if (((cpu_id & 0x0f0) > 0x80) ||
			    (((cpu_id & 0x0f0) == 0x80) &&
				(cpu_id & 0x00f) > 0x07))
				enable_K6_2_wt_alloc();
			else if ((cpu_id & 0x0f0) > 0x50)
				enable_K6_wt_alloc();
#endif
			if ((cpu_id & 0xf0) == 0xa0)
				/*
				 * Make sure the TSC runs through
				 * suspension, otherwise we can't use
				 * it as timecounter
				 */
				wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL);
			break;
		case CPU_VENDOR_CENTAUR:
			init_winchip();
			break;
		case CPU_VENDOR_TRANSMETA:
			init_transmeta();
			break;
		case CPU_VENDOR_RISE:
			init_rise();
			break;
		}
		break;
#endif
#ifdef I686_CPU
	case CPU_M2:
		init_6x86MX();
		break;
	case CPU_686:
		switch (cpu_vendor_id) {
		case CPU_VENDOR_INTEL:
			switch (cpu_id & 0xff0) {
			case 0x610:
				init_ppro();
				break;
			case 0x660:
				init_mendocino();
				break;
			}
			break;
#ifdef CPU_ATHLON_SSE_HACK
		case CPU_VENDOR_AMD:
			/*
			 * Sometimes the BIOS doesn't enable SSE instructions.
			 * According to AMD document 20734, the mobile
			 * Duron, the (mobile) Athlon 4 and the Athlon MP
			 * support SSE. These correspond to cpu_id 0x66X
			 * or 0x67X.
			 */
			if ((cpu_feature & CPUID_XMM) == 0 &&
			    ((cpu_id & ~0xf) == 0x660 ||
			     (cpu_id & ~0xf) == 0x670 ||
			     (cpu_id & ~0xf) == 0x680)) {
				u_int regs[4];
				wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
				do_cpuid(1, regs);
				cpu_feature = regs[3];
			}
			break;
#endif
		case CPU_VENDOR_CENTAUR:
			init_via();
			break;
		case CPU_VENDOR_TRANSMETA:
			init_transmeta();
			break;
		}
#if defined(PAE) || defined(PAE_TABLES)
		if ((amd_feature & AMDID_NX) != 0) {
			uint64_t msr;

			msr = rdmsr(MSR_EFER) | EFER_NXE;
			wrmsr(MSR_EFER, msr);
			pg_nx = PG_NX;
			elf32_nxstack = 1;
		}
#endif
		break;
#endif
	default:
		break;
	}
#if defined(CPU_ENABLE_SSE)
	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
		cpu_fxsr = hw_instruction_sse = 1;
	}
#endif
}
Ejemplo n.º 21
0
void screen_pintar_info_debug(uint eax,
                              uint ebx,
                              uint ecx,
                              uint edx,
                              uint esi,
                              uint edi,
                              uint ebp,
                              uint cs,
                              uint ds,
                              uint fs,
                              uint gs,
                              uint ss,
                              uint eflags,
                              uint esp,
                              uint eip,
                              uint stack0,
                              uint stack1,
                              uint stack2,
                              uint stack3,
                              uint stack4){
    screen_guardar_atras_debug();

    screen_pintar_rect(' ', C_BG_BLACK, POSICION_CUADRO_DEBUG_Y, POSICION_CUADRO_DEBUG_X, ALTO_CUADRO_DEBUG, ANCHO_CUADRO_DEBUG);
    screen_pintar_rect(' ', C_BG_RED, POSICION_CUADRO_DEBUG_Y + 1, POSICION_CUADRO_DEBUG_X + 1, ALTO_BANDA_DEBUG, ANCHO_CUADRO_DEBUG - 2);
    screen_pintar_rect(' ', C_BG_LIGHT_GREY, POSICION_CUADRO_DEBUG_Y + 1 + ALTO_BANDA_DEBUG, POSICION_CUADRO_DEBUG_X + 1, ALTO_CUADRO_DEBUG - ALTO_BANDA_DEBUG - 2, ANCHO_CUADRO_DEBUG - 2);

    print("eax", POSICION_CUADRO_DEBUG_X + 2, POSICION_CUADRO_DEBUG_Y + 3, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(eax, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 3, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("ebx", POSICION_CUADRO_DEBUG_X + 2, POSICION_CUADRO_DEBUG_Y + 5, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(ebx, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 5, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("ecx", POSICION_CUADRO_DEBUG_X + 2, POSICION_CUADRO_DEBUG_Y + 7, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(ecx, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 7, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("edx", POSICION_CUADRO_DEBUG_X + 2, POSICION_CUADRO_DEBUG_Y + 9, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(edx, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 9, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("esi", POSICION_CUADRO_DEBUG_X + 2, POSICION_CUADRO_DEBUG_Y + 11, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(esi, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 11, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("edi", POSICION_CUADRO_DEBUG_X + 2, POSICION_CUADRO_DEBUG_Y + 13, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(edi, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 13, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("ebp", POSICION_CUADRO_DEBUG_X + 2, POSICION_CUADRO_DEBUG_Y + 15, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(ebp, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 15, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("esp", POSICION_CUADRO_DEBUG_X + 2, POSICION_CUADRO_DEBUG_Y + 17, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(esp, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 17, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("eip", POSICION_CUADRO_DEBUG_X + 2, POSICION_CUADRO_DEBUG_Y + 19, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(eip, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 19, C_BG_LIGHT_GREY | C_FG_WHITE);


    print("cs", POSICION_CUADRO_DEBUG_X + 3, POSICION_CUADRO_DEBUG_Y + 21, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(cs, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 21, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("ds", POSICION_CUADRO_DEBUG_X + 3, POSICION_CUADRO_DEBUG_Y + 23, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(ds, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 23, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("fs", POSICION_CUADRO_DEBUG_X + 3, POSICION_CUADRO_DEBUG_Y + 25, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(fs, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 25, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("gs", POSICION_CUADRO_DEBUG_X + 3, POSICION_CUADRO_DEBUG_Y + 27, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(gs, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 27, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("ss", POSICION_CUADRO_DEBUG_X + 3, POSICION_CUADRO_DEBUG_Y + 29, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(ss, 8, POSICION_CUADRO_DEBUG_X + 6, POSICION_CUADRO_DEBUG_Y + 29, C_BG_LIGHT_GREY | C_FG_WHITE);


    print("eflags", POSICION_CUADRO_DEBUG_X + 3, POSICION_CUADRO_DEBUG_Y + 31, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(eflags, 8, POSICION_CUADRO_DEBUG_X + 10, POSICION_CUADRO_DEBUG_Y + 31, C_BG_LIGHT_GREY | C_FG_WHITE);


    print("cr0", POSICION_CUADRO_DEBUG_X + 16, POSICION_CUADRO_DEBUG_Y + 3, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(rcr0(), 8, POSICION_CUADRO_DEBUG_X + 20, POSICION_CUADRO_DEBUG_Y + 3, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("cr2", POSICION_CUADRO_DEBUG_X + 16, POSICION_CUADRO_DEBUG_Y + 5, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(rcr2(), 8, POSICION_CUADRO_DEBUG_X + 20, POSICION_CUADRO_DEBUG_Y + 5, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("cr3", POSICION_CUADRO_DEBUG_X + 16, POSICION_CUADRO_DEBUG_Y + 7, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(rcr3(), 8, POSICION_CUADRO_DEBUG_X + 20, POSICION_CUADRO_DEBUG_Y + 7, C_BG_LIGHT_GREY | C_FG_WHITE);

    print("cr4", POSICION_CUADRO_DEBUG_X + 16, POSICION_CUADRO_DEBUG_Y + 9, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(rcr4(), 8, POSICION_CUADRO_DEBUG_X + 20, POSICION_CUADRO_DEBUG_Y + 9, C_BG_LIGHT_GREY | C_FG_WHITE);


    print("stack", POSICION_CUADRO_DEBUG_X + 16, POSICION_CUADRO_DEBUG_Y + 20, C_BG_LIGHT_GREY | C_FG_BLACK);
    print_hex(stack0, 8, POSICION_CUADRO_DEBUG_X + 16, POSICION_CUADRO_DEBUG_Y + 23, C_BG_LIGHT_GREY | C_FG_WHITE);
    print_hex(stack1, 8, POSICION_CUADRO_DEBUG_X + 16, POSICION_CUADRO_DEBUG_Y + 24, C_BG_LIGHT_GREY | C_FG_WHITE);
    print_hex(stack2, 8, POSICION_CUADRO_DEBUG_X + 16, POSICION_CUADRO_DEBUG_Y + 25, C_BG_LIGHT_GREY | C_FG_WHITE);
    print_hex(stack3, 8, POSICION_CUADRO_DEBUG_X + 16, POSICION_CUADRO_DEBUG_Y + 26, C_BG_LIGHT_GREY | C_FG_WHITE);
    print_hex(stack4, 8, POSICION_CUADRO_DEBUG_X + 16, POSICION_CUADRO_DEBUG_Y + 27, C_BG_LIGHT_GREY | C_FG_WHITE);
}