示例#1
0
/*
 * Task entrypoint. A double fault switches into this code. All double
 * faults are fatal, so just print out something useful and panic. It
 * would be nice at some point to hook into the pthreads code so that
 * the current thread information could be dumped too.
 */
void
double_fault_handler(void)
{
	oskit_addr_t addr;
	struct x86_desc *sd;
	extern void tss_dump(struct x86_tss *);

	while (1) {
		addr = get_cr2();

		/* This seems to keep netboot from going nuts */
		clear_ts();

		printf("\nDOUBLE FAULT: possible stack overflow at %#x\n",
		       addr);

		/*
		 * Read the back_link field of the current TSS and use that
		 * to index into the GDT and obtain the previous TSS segment
		 * descriptor.  Use the base fields of that descriptor to get
		 * the linear address of the x86_tss struct.  Finally, pass
		 * lintokv of that address to tss_dump to get useful info!
		 */
		sd = &base_gdt[task_tss.back_link / 8];
		addr = ((sd->base_high << 24) | (sd->base_med << 16) |
			sd->base_low);
		tss_dump((struct x86_tss *)lintokv(addr));
		tss_dump_stack_trace((struct x86_tss *)lintokv(addr), 64);

		panic("DOUBLE FAULT panic");
	}
}
示例#2
0
文件: fpu.c 项目: ctos/bpi
/*
 * Look for FPU and initialize it.
 * Called on each CPU.
 */
void
init_fpu()
{
	unsigned short	status, control;

#ifdef	MACH_HYP
	clear_ts();
#else	/* MACH_HYP */
	unsigned int native = 0;

	if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486)
		native = CR0_NE;

	/*
	 * Check for FPU by initializing it,
	 * then trying to read the correct bit patterns from
	 * the control and status registers.
	 */
	set_cr0((get_cr0() & ~(CR0_EM|CR0_TS)) | native);	/* allow use of FPU */
#endif	/* MACH_HYP */

	fninit();
	status = fnstsw();
	fnstcw(&control);

	if ((status & 0xff) == 0 &&
	    (control & 0x103f) == 0x3f)
	{
	    /*
	     * We have a FPU of some sort.
	     * Compare -infinity against +infinity
	     * to check whether we have a 287 or a 387.
	     */
	    volatile double fp_infinity, fp_one, fp_zero;
	    fp_one = 1.0;
	    fp_zero = 0.0;
	    fp_infinity = fp_one / fp_zero;
	    if (fp_infinity == -fp_infinity) {
		/*
		 * We have an 80287.
		 */
		fp_kind = FP_287;
		asm volatile(".byte 0xdb; .byte 0xe4");	/* fnsetpm */
	    }
	    else {
		/*
		 * We have a 387.
		 */
		if (CPU_HAS_FEATURE(CPU_FEATURE_FXSR)) {
示例#3
0
kern_return_t
pal_efi_call_in_32bit_mode(uint32_t func,
                           struct pal_efi_registers *efi_reg,
                           void *stack_contents,
                           size_t stack_contents_size, /* 16-byte multiple */
                           uint32_t *efi_status)
{
    DBG("pal_efi_call_in_32bit_mode(0x%08x, %p, %p, %lu, %p)\n",
	func, efi_reg, stack_contents, stack_contents_size, efi_status);

    if (func == 0) {
        return KERN_INVALID_ADDRESS;
    }

    if ((efi_reg == NULL)
        || (stack_contents == NULL)
        || (stack_contents_size % 16 != 0)) {
        return KERN_INVALID_ARGUMENT;
    }

    if (!gPEEFISystemTable || !gPEEFIRuntimeServices) {
        return KERN_NOT_SUPPORTED;
    }

    DBG("pal_efi_call_in_32bit_mode() efi_reg:\n");
    DBG("  rcx: 0x%016llx\n", efi_reg->rcx);
    DBG("  rdx: 0x%016llx\n", efi_reg->rdx);
    DBG("   r8: 0x%016llx\n", efi_reg->r8);
    DBG("   r9: 0x%016llx\n", efi_reg->r9);
    DBG("  rax: 0x%016llx\n", efi_reg->rax);

    DBG("pal_efi_call_in_32bit_mode() stack:\n");
#if PAL_DEBUG
    size_t i;
    for (i = 0; i < stack_contents_size; i += sizeof(uint32_t)) {
	uint32_t *p = (uint32_t *) ((uintptr_t)stack_contents + i);
	DBG("  %p: 0x%08x\n", p, *p);
    } 
#endif

#ifdef __x86_64__
    /*
     * Ensure no interruptions.
     * Taking a spinlock for serialization is technically unnecessary
     * because the EFIRuntime kext should serialize.
     */
    boolean_t istate = ml_set_interrupts_enabled(FALSE);
    simple_lock(&pal_efi_lock);

    /*
     * Switch to special page tables with the entire high kernel space
     * double-mapped into the bottom 4GB.
     *
     * NB: We assume that all data passed exchanged with RuntimeServices is
     * located in the 4GB of KVA based at VM_MIN_ADDRESS. In particular, kexts
     * loaded the basement (below VM_MIN_ADDRESS) cannot pass static data.
     * Kernel stack and heap space is OK.
     */
    MARK_CPU_IDLE(cpu_number());
    pal_efi_saved_cr3 = get_cr3_raw();
    pal_efi_saved_cr0 = get_cr0();
    IDPML4[KERNEL_PML4_INDEX] = IdlePML4[KERNEL_PML4_INDEX];
    IDPML4[0]		      = IdlePML4[KERNEL_PML4_INDEX];
    clear_ts();
    set_cr3_raw((uint64_t) ID_MAP_VTOP(IDPML4));
    
    swapgs();			/* Save kernel's GS base */

    /* Set segment state ready for compatibility mode */
    set_gs(NULL_SEG);
    set_fs(NULL_SEG);
    set_es(KERNEL_DS);
    set_ds(KERNEL_DS);
    set_ss(KERNEL_DS);

    _pal_efi_call_in_32bit_mode_asm(func,
                                    efi_reg,
                                    stack_contents,
                                    stack_contents_size);
    
    /* Restore NULL segment state */
    set_ss(NULL_SEG);
    set_es(NULL_SEG);
    set_ds(NULL_SEG);

    swapgs();			/* Restore kernel's GS base */

    /* Restore the 64-bit user GS base we just destroyed */
    wrmsr64(MSR_IA32_KERNEL_GS_BASE,
	    current_cpu_datap()->cpu_uber.cu_user_gs_base);

    /* End of mapping games */
    set_cr3_raw(pal_efi_saved_cr3);
    set_cr0(pal_efi_saved_cr0);
    MARK_CPU_ACTIVE(cpu_number());
    
    simple_unlock(&pal_efi_lock);
    ml_set_interrupts_enabled(istate);
#else
    _pal_efi_call_in_32bit_mode_asm(func,
                                    efi_reg,
                                    stack_contents,
                                    stack_contents_size);
#endif

    *efi_status = (uint32_t)efi_reg->rax;
    DBG("pal_efi_call_in_32bit_mode() efi_status: 0x%x\n", *efi_status);

    return KERN_SUCCESS;
}