static void adjust_pge(void *on)
{
	if (on)
		write_cr4(read_cr4() | X86_CR4_PGE);
	else
		write_cr4(read_cr4() & ~X86_CR4_PGE);
}
Exemple #2
0
void __init cpu_tsc_init(void)
{
    if (cpu_capable(X86_FEATURE_TSC)) {
	write_cr4(read_cr4() & (~CPU_CR4_TSD));
	
	if (read_cr4() & CPU_CR4_TSD) {
	    printk(MOD_NAME "Tsc cannot be enabled\n");
	}
    }
}
Exemple #3
0
/* C entry point of secondary cpus */
asmlinkage void secondary_cpu_init(unsigned int index)
{
	atomic_inc(&active_cpus);

	if (!IS_ENABLED(CONFIG_PARALLEL_CPU_INIT))
		spin_lock(&start_cpu_lock);

#ifdef __SSE3__
	/*
	 * Seems that CR4 was cleared when AP start via lapic_start_cpu()
	 * Turn on CR4.OSFXSR and CR4.OSXMMEXCPT when SSE options enabled
	 */
	u32 cr4_val;
	cr4_val = read_cr4();
	cr4_val |= (CR4_OSFXSR | CR4_OSXMMEXCPT);
	write_cr4(cr4_val);
#endif
	cpu_initialize(index);

	if (!IS_ENABLED(CONFIG_PARALLEL_CPU_INIT))
		spin_unlock(&start_cpu_lock);

	atomic_dec(&active_cpus);

	stop_this_cpu();
}
Exemple #4
0
/*  Put the processor into a state where MTRRs can be safely set  */
void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
{
    unsigned int cr0;

    /*  Disable interrupts locally  */
    local_irq_save(ctxt->flags);

    if (use_intel() || is_cpu(CYRIX)) {

        /*  Save value of CR4 and clear Page Global Enable (bit 7)  */
        if ( cpu_has_pge ) {
            ctxt->cr4val = read_cr4();
            write_cr4(ctxt->cr4val & ~X86_CR4_PGE);
        }

        /*  Disable and flush caches. Note that wbinvd flushes the TLBs as
            a side-effect  */
        cr0 = read_cr0() | 0x40000000;
        wbinvd();
        write_cr0(cr0);
        wbinvd();

        if (use_intel())
            /*  Save MTRR state */
            rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
        else
            /* Cyrix ARRs - everything else were excluded at the top */
            ctxt->ccr3 = getCx86(CX86_CCR3);
    }
}
Exemple #5
0
void __save_processor_state(struct saved_context *ctxt)
{
	kernel_fpu_begin();

	/*
	 * descriptor tables
	 */
 	store_gdt(&ctxt->gdt);
 	store_idt(&ctxt->idt);
 	store_tr(ctxt->tr);

	/*
	 * segment registers
	 */
 	savesegment(es, ctxt->es);
 	savesegment(fs, ctxt->fs);
 	savesegment(gs, ctxt->gs);
 	savesegment(ss, ctxt->ss);

	/*
	 * control registers 
	 */
	ctxt->cr0 = read_cr0();
	ctxt->cr2 = read_cr2();
	ctxt->cr3 = read_cr3();
	ctxt->cr4 = read_cr4();
}
Exemple #6
0
void efi_call_phys_epilog(void)
{
	unsigned long cr4;
	struct desc_ptr gdt_descr;

	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);

	cr4 = read_cr4();

	if (cr4 & X86_CR4_PAE) {
		swapper_pg_dir[pgd_index(0)].pgd =
		    efi_bak_pg_dir_pointer[0].pgd;
	} else {
		swapper_pg_dir[pgd_index(0)].pgd =
		    efi_bak_pg_dir_pointer[0].pgd;
		swapper_pg_dir[pgd_index(0x400000)].pgd =
		    efi_bak_pg_dir_pointer[1].pgd;
	}

	/*
	 * After the lock is released, the original page table is restored.
	 */
	__flush_tlb_all();

	local_irq_restore(efi_rt_eflags);
}
Exemple #7
0
static int __init x86_create_initial_map()
{
	unsigned long phy= 0; 
	unsigned long *p = (unsigned long *)FAK_ARCH_X86_INIT_PGTABLE;				//The kernel topmost table
	int i;
	for (i = 0; i < 1024; i++) p[i] = 0;

	/*
		Kernel part in 4mb page;
	*/
	for (i = 0; i < 1024; i++)
	{	
		if (i == get_loaded_base() / 0x400000) phy = 0;							//如果到了内核的地址,还是从0开始映射,因为内核装的物理地址实际是在开头;
		p[i] = phy | 0x83;														//0x183 is the global
		phy += 0x400000;
	}
	write_cr3((unsigned long)p);

	phy = read_cr4();
	phy |= X86_CR4_PSE | X86_CR4_PGE;											//允许4MB页
	write_cr4(phy);

	phy = read_cr0();
	phy |= X86_CR0_PG;															//打开页表;
	phy &= ~(X86_CR0_CD | X86_CR0_NW);											//允许缓存;
	write_cr0(phy);

	return 0;
}
Exemple #8
0
static __init void xen_init_cpuid_mask(void)
{
	unsigned int ax, bx, cx, dx;

	cpuid_leaf1_edx_mask =
		~((1 << X86_FEATURE_MCE)  |  /* disable MCE */
		  (1 << X86_FEATURE_MCA)  |  /* disable MCA */
		  (1 << X86_FEATURE_ACC));   /* thermal monitoring */

	if (!xen_initial_domain())
		cpuid_leaf1_edx_mask &=
			~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
			  (1 << X86_FEATURE_ACPI));  /* disable ACPI */

	ax = 1;
	cx = 0;
	xen_cpuid(&ax, &bx, &cx, &dx);

	/* cpuid claims we support xsave; try enabling it to see what happens */
	if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
		unsigned long cr4;

		set_in_cr4(X86_CR4_OSXSAVE);
		
		cr4 = read_cr4();

		if ((cr4 & X86_CR4_OSXSAVE) == 0)
			cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));

		clear_in_cr4(X86_CR4_OSXSAVE);
	}
}
Exemple #9
0
/*
 * Initialise the FPU for this machine.
 */
BOOT_CODE void
Arch_initFpu(void)
{
    /* Enable FPU / SSE / SSE2 / SSE3 / SSSE3 / SSE4 Extensions. */
    write_cr4(read_cr4() | CR4_OSFXSR);

    /* Enable the FPU in general. */
    write_cr0((read_cr0() & ~CR0_EMULATION) | CR0_MONITOR_COPROC | CR0_NUMERIC_ERROR);
}
//VMX specific platform reboot
void xmhf_baseplatform_arch_x86vmx_reboot(VCPU *vcpu){
	(void)vcpu;

	//shut VMX off, else CPU ignores INIT signal!
	__asm__ __volatile__("vmxoff \r\n");
	write_cr4(read_cr4() & ~(CR4_VMXE));
	
	//fall back on generic x86 reboot
	xmhf_baseplatform_arch_x86_reboot();
}
Exemple #11
0
PRIVATE void fpu_init(void)
{
    unsigned short cw, sw;

    fninit();
    sw = fnstsw();
    fnstcw(&cw);

    if((sw & 0xff) == 0 &&
            (cw & 0x103f) == 0x3f) {
        /* We have some sort of FPU, but don't check exact model.
         * Set CR0_NE and CR0_MP to handle fpu exceptions
         * in native mode. */
        write_cr0(read_cr0() | CR0_MP_NE);
        fpu_presence = 1;
        if(_cpufeature(_CPUF_I386_FXSR)) {
            register struct proc *rp;
            phys_bytes aligned_fp_area;
            u32_t cr4 = read_cr4() | CR4_OSFXSR; /* Enable FXSR. */

            /* OSXMMEXCPT if supported
             * FXSR feature can be available without SSE
             */
            if(_cpufeature(_CPUF_I386_SSE))
                cr4 |= CR4_OSXMMEXCPT;

            write_cr4(cr4);
            osfxsr_feature = 1;

            for (rp = BEG_PROC_ADDR; rp < END_PROC_ADDR; ++rp) {
                /* FXSR requires 16-byte alignment of memory
                 * image, but unfortunately some old tools
                 * (probably linker) ignores ".balign 16"
                 * applied to our memory image.
                 * Thus we have to do manual alignment.
                 */
                aligned_fp_area =
                    (phys_bytes) &rp->p_fpu_state.fpu_image;
                if(aligned_fp_area % FPUALIGN) {
                    aligned_fp_area += FPUALIGN -
                                       (aligned_fp_area % FPUALIGN);
                }
                rp->p_fpu_state.fpu_save_area_p =
                    (void *) aligned_fp_area;
            }
        } else {
            osfxsr_feature = 0;
        }
    } else {
        /* No FPU presents. */
        fpu_presence = 0;
        osfxsr_feature = 0;
        return;
    }
}
Exemple #12
0
Fichier : fpu.c Projet : KGG814/AOS
/*
 * Initialise the FPU for this machine.
 */
BOOT_CODE void
Arch_initFpu(void)
{
    /* Enable FPU / SSE / SSE2 / SSE3 / SSSE3 / SSE4 Extensions. */
    write_cr4(read_cr4() | CR4_OSFXSR);

    /* Enable the FPU in general. Although leave it in a state where it will
     * generate a fault if someone tries to use it as we implement lazy
     * switching */
    write_cr0((read_cr0() & ~CR0_EMULATION) | CR0_MONITOR_COPROC | CR0_NUMERIC_ERROR | CR0_TASK_SWITCH);
}
Exemple #13
0
/*
 * This function selects if the context switch from prev to next
 * has to tweak the TSC disable bit in the cr4.
 */
static inline void disable_tsc(struct task_struct *prev_p,
			       struct task_struct *next_p)
{
	struct thread_info *prev, *next;

	/*
	 * gcc should eliminate the ->thread_info dereference if
	 * has_secure_computing returns 0 at compile time (SECCOMP=n).
	 */
	prev = task_thread_info(prev_p);
	next = task_thread_info(next_p);

	if (has_secure_computing(prev) || has_secure_computing(next)) {
		/* slow path here */
		if (has_secure_computing(prev) &&
		    !has_secure_computing(next)) {
			write_cr4(read_cr4() & ~X86_CR4_TSD);
		} else if (!has_secure_computing(prev) &&
			   has_secure_computing(next))
			write_cr4(read_cr4() | X86_CR4_TSD);
	}
}
Exemple #14
0
void dump_regs(struct irq_regs *regs)
{
	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
	unsigned long d0, d1, d2, d3, d6, d7;
	unsigned long sp;

	printf("EIP: %04x:[<%08lx>] EFLAGS: %08lx\n",
			(u16)regs->xcs, regs->eip, regs->eflags);

	printf("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
		regs->eax, regs->ebx, regs->ecx, regs->edx);
	printf("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
		regs->esi, regs->edi, regs->ebp, regs->esp);
	printf(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
	       (u16)regs->xds, (u16)regs->xes, (u16)regs->xfs, (u16)regs->xgs, (u16)regs->xss);

	cr0 = read_cr0();
	cr2 = read_cr2();
	cr3 = read_cr3();
	cr4 = read_cr4();

	printf("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
			cr0, cr2, cr3, cr4);

	d0 = get_debugreg(0);
	d1 = get_debugreg(1);
	d2 = get_debugreg(2);
	d3 = get_debugreg(3);

	printf("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
			d0, d1, d2, d3);

	d6 = get_debugreg(6);
	d7 = get_debugreg(7);
	printf("DR6: %08lx DR7: %08lx\n",
			d6, d7);

	printf("Stack:\n");
	sp = regs->esp;

	sp += 64;

	while (sp > (regs->esp - 16)) {
		if (sp == regs->esp)
			printf("--->");
		else
			printf("    ");
		printf("0x%8.8lx : 0x%8.8lx\n", sp, (ulong)readl(sp));
		sp -= 4;
	}
}
Exemple #15
0
void vm_enable_paging(void)
{
        u32_t cr0, cr4;
        int pgeok;

        pgeok = _cpufeature(_CPUF_I386_PGE);

        cr0= read_cr0();
        cr4= read_cr4();

	/* The boot loader should have put us in protected mode. */
	assert(cr0 & I386_CR0_PE);

        /* First clear PG and PGE flag, as PGE must be enabled after PG. */
        write_cr0(cr0 & ~I386_CR0_PG);
        write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE));

        cr0= read_cr0();
        cr4= read_cr4();

        /* Our page table contains 4MB entries. */
        cr4 |= I386_CR4_PSE;

        write_cr4(cr4);

        /* First enable paging, then enable global page flag. */
        cr0 |= I386_CR0_PG;
        write_cr0(cr0);
        cr0 |= I386_CR0_WP;
        write_cr0(cr0);

        /* May we enable these features? */
        if(pgeok)
                cr4 |= I386_CR4_PGE;

        write_cr4(cr4);
}
Exemple #16
0
static void vm_enable_paging(void)
{
	u32_t cr0, cr4;
	int pgeok;

	psok = _cpufeature(_CPUF_I386_PSE);
	pgeok = _cpufeature(_CPUF_I386_PGE);

	cr0= read_cr0();
	cr4= read_cr4();

	/* First clear PG and PGE flag, as PGE must be enabled after PG. */
	write_cr0(cr0 & ~I386_CR0_PG);
	write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE));

	cr0= read_cr0();
	cr4= read_cr4();

	/* Our first page table contains 4MB entries. */
	if(psok)
		cr4 |= I386_CR4_PSE;

	write_cr4(cr4);

	/* First enable paging, then enable global page flag. */
	cr0 |= I386_CR0_PG;
	write_cr0(cr0 );
	cr0 |= I386_CR0_WP;
	write_cr0(cr0);

	/* May we enable these features? */
	if(pgeok)
		cr4 |= I386_CR4_PGE;

	write_cr4(cr4);
}
Exemple #17
0
void baytrail_init_pre_device(void)
{
	struct soc_gpio_config *config;

	fill_in_pattrs();

	/* Allow for SSE instructions to be executed. */
	write_cr4(read_cr4() | CR4_OSFXSR | CR4_OSXMMEXCPT);

	/* Indicate S3 resume to rest of ramstage. */
	s3_resume_prepare();

	/* Get GPIO initial states from mainboard */
	config = mainboard_get_gpios();
	setup_soc_gpios(config);
}
Exemple #18
0
void vmx_cpu_down(void)
{
    struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list);
    unsigned long flags;

    local_irq_save(flags);

    while ( !list_empty(active_vmcs_list) )
        __vmx_clear_vmcs(list_entry(active_vmcs_list->next,
                                    struct vcpu, arch.hvm_vmx.active_list));

    BUG_ON(!(read_cr4() & X86_CR4_VMXE));
    __vmxoff();

    local_irq_restore(flags);
}
Exemple #19
0
void efi_call_phys_prelog(void)
{
	unsigned long cr4;
	unsigned long temp;
	struct desc_ptr gdt_descr;

	local_irq_save(efi_rt_eflags);

	/*
	 * If I don't have PAE, I should just duplicate two entries in page
	 * directory. If I have PAE, I just need to duplicate one entry in
	 * page directory.
	 */
	cr4 = read_cr4();

	if (cr4 & X86_CR4_PAE) {
		efi_bak_pg_dir_pointer[0].pgd =
		    swapper_pg_dir[pgd_index(0)].pgd;
		swapper_pg_dir[0].pgd =
		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
	} else {
		efi_bak_pg_dir_pointer[0].pgd =
		    swapper_pg_dir[pgd_index(0)].pgd;
		efi_bak_pg_dir_pointer[1].pgd =
		    swapper_pg_dir[pgd_index(0x400000)].pgd;
		swapper_pg_dir[pgd_index(0)].pgd =
		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
		temp = PAGE_OFFSET + 0x400000;
		swapper_pg_dir[pgd_index(0x400000)].pgd =
		    swapper_pg_dir[pgd_index(temp)].pgd;
	}

	/*
	 * After the lock is released, the original page table is restored.
	 */
	__flush_tlb_all();

	gdt_descr.address = __pa(get_cpu_gdt_table(0));
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);
}
Exemple #20
0
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
   This runs before bootmem is initialized and gets pages directly from the 
   physical memory. To access them they are temporarily mapped. */
void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
{ 
	unsigned long next; 

	Dprintk("init_memory_mapping\n");

	/* 
	 * Find space for the kernel direct mapping tables.
	 * Later we should allocate these tables in the local node of the memory
	 * mapped.  Unfortunately this is done currently before the nodes are 
	 * discovered.
	 */
	if (!after_bootmem)
		find_early_table_space(end);

	start = (unsigned long)__va(start);
	end = (unsigned long)__va(end);

	for (; start < end; start = next) {
		unsigned long pud_phys; 
		pgd_t *pgd = pgd_offset_k(start);
		pud_t *pud;

		if (after_bootmem)
			pud = pud_offset(pgd, start & PGDIR_MASK);
		else
			pud = alloc_low_page(&pud_phys);

		next = start + PGDIR_SIZE;
		if (next > end) 
			next = end; 
		phys_pud_init(pud, __pa(start), __pa(next));
		if (!after_bootmem)
			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
		unmap_low_page(pud);
	} 

	if (!after_bootmem)
		mmu_cr4_features = read_cr4();
	__flush_tlb_all();
}
Exemple #21
0
void baytrail_init_pre_device(struct soc_intel_baytrail_config *config)
{
	struct soc_gpio_config *gpio_config;

	fill_in_pattrs();

	if (!config->disable_ddr_2x_refresh_rate)
		baytrail_enable_2x_refresh_rate();

	/* Allow for SSE instructions to be executed. */
	write_cr4(read_cr4() | CR4_OSFXSR | CR4_OSXMMEXCPT);

	/* Indicate S3 resume to rest of ramstage. */
	s3_resume_prepare();

	/* Run reference code. */
	baytrail_run_reference_code();

	/* Get GPIO initial states from mainboard */
	gpio_config = mainboard_get_gpios();
	setup_soc_gpios(gpio_config, config->enable_xdp_tap);

	baytrail_init_scc();
}
Exemple #22
0
PUBLIC void fpu_init(void)
{
	unsigned short cw, sw;

	fninit();
	sw = fnstsw();
	fnstcw(&cw);

	if((sw & 0xff) == 0 &&
	   (cw & 0x103f) == 0x3f) {
		/* We have some sort of FPU, but don't check exact model.
		 * Set CR0_NE and CR0_MP to handle fpu exceptions
		 * in native mode. */
		write_cr0(read_cr0() | CR0_MP_NE);
		get_cpulocal_var(fpu_presence) = 1;
		if(_cpufeature(_CPUF_I386_FXSR)) {
			u32_t cr4 = read_cr4() | CR4_OSFXSR; /* Enable FXSR. */

			/* OSXMMEXCPT if supported
			 * FXSR feature can be available without SSE
			 */
			if(_cpufeature(_CPUF_I386_SSE))
				cr4 |= CR4_OSXMMEXCPT; 

			write_cr4(cr4);
			osfxsr_feature = 1;
		} else {
			osfxsr_feature = 0;
		}
	} else {
		/* No FPU presents. */
		get_cpulocal_var(fpu_presence) = 0;
                osfxsr_feature = 0;
                return;
        }
}
Exemple #23
0
void dump_regs(struct irq_regs *regs)
{
	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
	unsigned long d0, d1, d2, d3, d6, d7;

	printf("EIP: %04x:[<%08lx>] EFLAGS: %08lx\n",
			(u16)regs->xcs, regs->eip, regs->eflags);

	printf("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
		regs->eax, regs->ebx, regs->ecx, regs->edx);
	printf("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
		regs->esi, regs->edi, regs->ebp, regs->esp);
	printf(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
	       (u16)regs->xds, (u16)regs->xes, (u16)regs->xfs, (u16)regs->xgs, (u16)regs->xss);

	cr0 = read_cr0();
	cr2 = read_cr2();
	cr3 = read_cr3();
	cr4 = read_cr4();

	printf("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
			cr0, cr2, cr3, cr4);

	d0 = get_debugreg(0);
	d1 = get_debugreg(1);
	d2 = get_debugreg(2);
	d3 = get_debugreg(3);

	printf("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
			d0, d1, d2, d3);

	d6 = get_debugreg(6);
	d7 = get_debugreg(7);
	printf("DR6: %08lx DR7: %08lx\n",
			d6, d7);
}
Exemple #24
0
void kmain( void* mbd, unsigned int magic )
{
   if ( magic != 0x2BADB002 )
   {
      /* Something went not according to specs. Print an error */
      /* message and halt, but do *not* rely on the multiboot */
      /* data structure. */
   }
 
   /* You could either use multiboot.h */
   /* (http://www.gnu.org/software/grub/manual/multiboot/multiboot.html#multiboot_002eh) */
   /* or do your offsets yourself. The following is merely an example. */ 
   char * boot_loader_name =(char*) ((long*)mbd)[16];
   char* hello_str = "Hello, World!\n";
   
   clear_screen();
   print("CR0: ");
   print_hex(read_cr0());
   print("\n");
   print("CR4: ");
   print_hex(read_cr4());
   print("\n");
   
   print("GDTR Base: ");
   gdtr_t gdtr;
   read_gdtr(&gdtr.v);
   print_hex(gdtr.s.base);
   print("\n");
   print("GDTR Limit: ");
   print_hex(gdtr.s.limit);
   
   //print_hex(0x12345678);
   //print(boot_loader_name, 16);
 
   /* Write your kernel here. */
}
Exemple #25
0
/*
 * Initialise the FPU for this machine.
 */
BOOT_CODE bool_t
Arch_initFpu(void)
{
    /* Enable FPU / SSE / SSE2 / SSE3 / SSSE3 / SSE4 Extensions. */
    write_cr4(read_cr4() | CR4_OSFXSR);

    /* Enable the FPU in general. */
    write_cr0((read_cr0() & ~CR0_EMULATION) | CR0_MONITOR_COPROC | CR0_NUMERIC_ERROR);
    enableFpu();

    /* Initialize the fpu state */
    finit();

    if (config_set(CONFIG_XSAVE)) {
        uint64_t xsave_features;
        uint32_t xsave_instruction;
        uint64_t desired_features = config_ternary(CONFIG_XSAVE, CONFIG_XSAVE_FEATURE_SET, 1);
        xsave_state_t *nullFpuState = (xsave_state_t *) &x86KSnullFpuState;

        /* create NULL state for FPU to be used by XSAVE variants */
        memzero(&x86KSnullFpuState, sizeof(x86KSnullFpuState));

        /* check for XSAVE support */
        if (!(x86_cpuid_ecx(1, 0) & BIT(26))) {
            printf("XSAVE not supported\n");
            return false;
        }
        /* enable XSAVE support */
        write_cr4(read_cr4() | CR4_OSXSAVE);
        /* check feature mask */
        xsave_features = ((uint64_t)x86_cpuid_edx(0x0d, 0x0) << 32) | x86_cpuid_eax(0x0d, 0x0);
        if ((xsave_features & desired_features) != desired_features) {
            printf("Requested feature mask is 0x%llx, but only 0x%llx supported\n", desired_features, (long long)xsave_features);
            return false;
        }
        /* enable feature mask */
        write_xcr0(desired_features);
        /* validate the xsave buffer size and instruction */
        if (x86_cpuid_ebx(0x0d, 0x0) > CONFIG_XSAVE_SIZE) {
            printf("XSAVE buffer set set to %d, but needs to be at least %d\n", CONFIG_XSAVE_SIZE, x86_cpuid_ebx(0x0d, 0x0));
            return false;
        }
        if (x86_cpuid_ebx(0x0d, 0x0) < CONFIG_XSAVE_SIZE) {
            printf("XSAVE buffer set set to %d, but only needs to be %d.\n"
                   "Warning: Memory may be wasted with larger than needed TCBs.\n",
                   CONFIG_XSAVE_SIZE, x86_cpuid_ebx(0x0d, 0x0));
        }
        /* check if a specialized XSAVE instruction was requested */
        xsave_instruction = x86_cpuid_eax(0x0d, 0x1);
        if (config_set(CONFIG_XSAVE_XSAVEOPT)) {
            if (!(xsave_instruction & BIT(0))) {
                printf("XSAVEOPT requested, but not supported\n");
                return false;
            }
        } else if (config_set(CONFIG_XSAVE_XSAVEC)) {
            if (!(xsave_instruction & BIT(1))) {
                printf("XSAVEC requested, but not supported\n");
                return false;
            }
        } else if (config_set(CONFIG_XSAVE_XSAVES)) {
            if (!(xsave_instruction & BIT(3))) {
                printf("XSAVES requested, but not supported\n");
                return false;
            }

            /* AVX state from extended region should be in compacted format */
            nullFpuState->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;

            /* initialize the XSS MSR */
            x86_wrmsr(IA32_XSS_MSR, desired_features);
        }

        /* copy i387 FPU initial state from FPU */
        saveFpuState(&x86KSnullFpuState);
        nullFpuState->i387.mxcsr = MXCSR_INIT_VALUE;
    } else {
        /* Store the null fpu state */
        saveFpuState(&x86KSnullFpuState);
    }
    /* Set the FPU to lazy switch mode */
    disableFpu();
    return true;
}
Exemple #26
0
/* Main interface to do xen specific suspend/resume */
static int enter_state(u32 state)
{
    unsigned long flags;
    int error;
    unsigned long cr4;

    if ( (state <= ACPI_STATE_S0) || (state > ACPI_S_STATES_MAX) )
        return -EINVAL;

    if ( !spin_trylock(&pm_lock) )
        return -EBUSY;

    BUG_ON(system_state != SYS_STATE_active);
    system_state = SYS_STATE_suspend;

    printk(XENLOG_INFO "Preparing system for ACPI S%d state.\n", state);

    freeze_domains();

    acpi_dmar_reinstate();

    if ( (error = disable_nonboot_cpus()) )
    {
        system_state = SYS_STATE_resume;
        goto enable_cpu;
    }

    cpufreq_del_cpu(0);

    hvm_cpu_down();

    acpi_sleep_prepare(state);

    console_start_sync();
    printk("Entering ACPI S%d state.\n", state);

    local_irq_save(flags);
    spin_debug_disable();

    if ( (error = device_power_down()) )
    {
        printk(XENLOG_ERR "Some devices failed to power down.");
        system_state = SYS_STATE_resume;
        goto done;
    }

    ACPI_FLUSH_CPU_CACHE();

    switch ( state )
    {
    case ACPI_STATE_S3:
        do_suspend_lowlevel();
        system_reset_counter++;
        error = tboot_s3_resume();
        break;
    case ACPI_STATE_S5:
        acpi_enter_sleep_state(ACPI_STATE_S5);
        break;
    default:
        error = -EINVAL;
        break;
    }

    system_state = SYS_STATE_resume;

    /* Restore CR4 and EFER from cached values. */
    cr4 = read_cr4();
    write_cr4(cr4 & ~X86_CR4_MCE);
    write_efer(read_efer());

    device_power_up();

    mcheck_init(&boot_cpu_data, 0);
    write_cr4(cr4);

    printk(XENLOG_INFO "Finishing wakeup from ACPI S%d state.\n", state);

    if ( (state == ACPI_STATE_S3) && error )
        tboot_s3_error(error);

 done:
    spin_debug_enable();
    local_irq_restore(flags);
    console_end_sync();
    acpi_sleep_post(state);
    if ( hvm_cpu_up() )
        BUG();

 enable_cpu:
    cpufreq_add_cpu(0);
    microcode_resume_cpu(0);
    rcu_barrier();
    mtrr_aps_sync_begin();
    enable_nonboot_cpus();
    mtrr_aps_sync_end();
    adjust_vtd_irq_affinities();
    acpi_dmar_zap();
    thaw_domains();
    system_state = SYS_STATE_active;
    spin_unlock(&pm_lock);
    return error;
}
Exemple #27
0
int vmx_cpu_up(void)
{
    u32 eax, edx;
    int bios_locked, cpu = smp_processor_id();
    u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1;

    BUG_ON(!(read_cr4() & X86_CR4_VMXE));

    /* 
     * Ensure the current processor operating mode meets 
     * the requred CRO fixed bits in VMX operation. 
     */
    cr0 = read_cr0();
    rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0);
    rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1);
    if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) )
    {
        printk("CPU%d: some settings of host CR0 are " 
               "not allowed in VMX operation.\n", cpu);
        return 0;
    }

    rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);

    bios_locked = !!(eax & IA32_FEATURE_CONTROL_MSR_LOCK);
    if ( bios_locked )
    {
        if ( !(eax & (IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX |
                      IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
        {
            printk("CPU%d: VMX disabled by BIOS.\n", cpu);
            return 0;
        }
    }
    else
    {
        eax  = IA32_FEATURE_CONTROL_MSR_LOCK;
        eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX;
        if ( test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) )
            eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX;
        wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0);
    }

    vmx_init_vmcs_config();

    INIT_LIST_HEAD(&this_cpu(active_vmcs_list));

    if ( this_cpu(host_vmcs) == NULL )
    {
        this_cpu(host_vmcs) = vmx_alloc_vmcs();
        if ( this_cpu(host_vmcs) == NULL )
        {
            printk("CPU%d: Could not allocate host VMCS\n", cpu);
            return 0;
        }
    }

    switch ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
    {
    case -2: /* #UD or #GP */
        if ( bios_locked &&
             test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) &&
             (!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) ||
              !(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
        {
            printk("CPU%d: VMXON failed: perhaps because of TXT settings "
                   "in your BIOS configuration?\n", cpu);
            printk(" --> Disable TXT in your BIOS unless using a secure "
                   "bootloader.\n");
            return 0;
        }
        /* fall through */
    case -1: /* CF==1 or ZF==1 */
        printk("CPU%d: unexpected VMXON failure\n", cpu);
        return 0;
    case 0: /* success */
        break;
    default:
        BUG();
    }

    return 1;
}
Exemple #28
0
/* Main interface to do xen specific suspend/resume */
static int enter_state(u32 state)
{
    unsigned long flags;
    int error;

    if ( (state <= ACPI_STATE_S0) || (state > ACPI_S_STATES_MAX) )
        return -EINVAL;

    if ( !spin_trylock(&pm_lock) )
        return -EBUSY;

    printk(XENLOG_INFO "Preparing system for ACPI S%d state.", state);

    freeze_domains();

    disable_nonboot_cpus();
    if ( num_online_cpus() != 1 )
    {
        error = -EBUSY;
        goto enable_cpu;
    }

    cpufreq_del_cpu(0);

    hvm_cpu_down();

    acpi_sleep_prepare(state);

    console_start_sync();
    printk("Entering ACPI S%d state.\n", state);

    local_irq_save(flags);
    spin_debug_disable();

    if ( (error = device_power_down()) )
    {
        printk(XENLOG_ERR "Some devices failed to power down.");
        goto done;
    }

    ACPI_FLUSH_CPU_CACHE();

    switch ( state )
    {
    case ACPI_STATE_S3:
        do_suspend_lowlevel();
        system_reset_counter++;
        error = tboot_s3_resume();
        break;
    case ACPI_STATE_S5:
        acpi_enter_sleep_state(ACPI_STATE_S5);
        break;
    default:
        error = -EINVAL;
        break;
    }

    /* Restore CR4 and EFER from cached values. */
    write_cr4(read_cr4());
    if ( cpu_has_efer )
        write_efer(read_efer());

    device_power_up();

    printk(XENLOG_INFO "Finishing wakeup from ACPI S%d state.\n", state);

    if ( (state == ACPI_STATE_S3) && error )
        panic("Memory integrity was lost on resume (%d)\n", error);

 done:
    spin_debug_enable();
    local_irq_restore(flags);
    console_end_sync();
    acpi_sleep_post(state);
    if ( !hvm_cpu_up() )
        BUG();

 enable_cpu:
    cpufreq_add_cpu(0);
    microcode_resume_cpu(0);
    enable_nonboot_cpus();
    thaw_domains();
    spin_unlock(&pm_lock);
    return error;
}
Exemple #29
0
/**
 * x86_acpi_suspend_lowlevel - save kernel state
 *
 * Create an identity mapped page table and copy the wakeup routine to
 * low memory.
 */
int x86_acpi_suspend_lowlevel(void)
{
    struct wakeup_header *header =
        (struct wakeup_header *) __va(real_mode_header->wakeup_header);

    if (header->signature != WAKEUP_HEADER_SIGNATURE) {
        printk(KERN_ERR "wakeup header does not match\n");
        return -EINVAL;
    }

    header->video_mode = saved_video_mode;

    header->pmode_behavior = 0;

#ifndef CONFIG_64BIT
    native_store_gdt((struct desc_ptr *)&header->pmode_gdt);

    /*
     * We have to check that we can write back the value, and not
     * just read it.  At least on 90 nm Pentium M (Family 6, Model
     * 13), reading an invalid MSR is not guaranteed to trap, see
     * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
     * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
     * nm process with 512-KB L2 Cache Specification Update".
     */
    if (!rdmsr_safe(MSR_EFER,
                    &header->pmode_efer_low,
                    &header->pmode_efer_high) &&
            !wrmsr_safe(MSR_EFER,
                        header->pmode_efer_low,
                        header->pmode_efer_high))
        header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
#endif /* !CONFIG_64BIT */

    header->pmode_cr0 = read_cr0();
    if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
        header->pmode_cr4 = read_cr4();
        header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
    }
    if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
                    &header->pmode_misc_en_low,
                    &header->pmode_misc_en_high) &&
            !wrmsr_safe(MSR_IA32_MISC_ENABLE,
                        header->pmode_misc_en_low,
                        header->pmode_misc_en_high))
        header->pmode_behavior |=
            (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
    header->realmode_flags = acpi_realmode_flags;
    header->real_magic = 0x12345678;

#ifndef CONFIG_64BIT
    header->pmode_entry = (u32)&wakeup_pmode_return;
    header->pmode_cr3 = (u32)__pa_symbol(initial_page_table);
    saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
#ifdef CONFIG_SMP
    stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
    early_gdt_descr.address =
        (unsigned long)get_cpu_gdt_table(smp_processor_id());
    initial_gs = per_cpu_offset(smp_processor_id());
#endif
    initial_code = (unsigned long)wakeup_long64;
    saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */

    do_suspend_lowlevel();
    return 0;
}
Exemple #30
0
void switch_page_directory(page_directory_t *dir)
{
	write_cr3((uint32_t)&dir->tables_physical);
	write_cr4(read_cr4() | 0x00000010); // Set PSE;
	write_cr0(read_cr0() | 0x80000000); // Set PG;
}