示例#1
0
/*
 * Create an environment for the EFI runtime code call.  The most
 * important part is creating the required 1:1 physical->virtual
 * mappings for the runtime segments.  To do that, we manually create
 * page table which unmap userspace but gives correct kernel mapping.
 * The 1:1 mappings for runtime segments usually occupy low 4G of the
 * physical address map.
 *
 * The 1:1 mappings were chosen over the SetVirtualAddressMap() EFI RT
 * service, because there are some BIOSes which fail to correctly
 * relocate itself on the call, requiring both 1:1 and virtual
 * mapping.  As result, we must provide 1:1 mapping anyway, so no
 * reason to bother with the virtual map, and no need to add a
 * complexity into loader.
 *
 * The fpu_kern_enter() call allows firmware to use FPU, as mandated
 * by the specification.  In particular, CR0.TS bit is cleared.  Also
 * it enters critical section, giving us neccessary protection against
 * context switch.
 *
 * There is no need to disable interrupts around the change of %cr3,
 * the kernel mappings are correct, while we only grabbed the
 * userspace portion of VA.  Interrupts handlers must not access
 * userspace.  Having interrupts enabled fixes the issue with
 * firmware/SMM long operation, which would negatively affect IPIs,
 * esp. TLB shootdown requests.
 */
int
efi_arch_enter(void)
{
	pmap_t curpmap;

	curpmap = PCPU_GET(curpmap);
	PMAP_LOCK_ASSERT(curpmap, MA_OWNED);

	/*
	 * IPI TLB shootdown handler invltlb_pcid_handler() reloads
	 * %cr3 from the curpmap->pm_cr3, which would disable runtime
	 * segments mappings.  Block the handler's action by setting
	 * curpmap to impossible value.  See also comment in
	 * pmap.c:pmap_activate_sw().
	 */
	if (pmap_pcid_enabled && !invpcid_works)
		PCPU_SET(curpmap, NULL);

	load_cr3(VM_PAGE_TO_PHYS(efi_pml4_page) | (pmap_pcid_enabled ?
	    curpmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid : 0));
	/*
	 * If PCID is enabled, the clear CR3_PCID_SAVE bit in the loaded %cr3
	 * causes TLB invalidation.
	 */
	if (!pmap_pcid_enabled)
		invltlb();
	return (0);
}
示例#2
0
static          vm_offset_t
dpt_physmap(u_int32_t req_paddr, vm_size_t req_size)
{
	vm_offset_t     va;
	int             ndx;
	vm_size_t       size;
	u_int32_t       paddr;
	u_int32_t       offset;



	size = (req_size / PAGE_SIZE + 1) * PAGE_SIZE;
	paddr = req_paddr & 0xfffff000;
	offset = req_paddr - paddr;

	va = kmem_alloc_pageable(kernel_map, size);
	if (va == (vm_offset_t) 0)
		return (va);

	for (ndx = 0; ndx < size; ndx += PAGE_SIZE) {
		pmap_kenter(va + ndx, paddr + ndx);
		invltlb();
	}

	return (va + offset);
}
示例#3
0
void
efi_arch_leave(void)
{
	pmap_t curpmap;

	curpmap = &curproc->p_vmspace->vm_pmap;
	if (pmap_pcid_enabled && !invpcid_works)
		PCPU_SET(curpmap, curpmap);
	load_cr3(curpmap->pm_cr3 | (pmap_pcid_enabled ?
	    curpmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid : 0));
	if (!pmap_pcid_enabled)
		invltlb();
}
示例#4
0
static int smp_process_pending_ici(int curr_cpu)
{
    struct smp_msg *msg;
    bool do_halt = false;
    int source_mailbox = 0;
    int retval = INT_NO_RESCHEDULE;

    msg = smp_check_for_message(curr_cpu, &source_mailbox);
    if(msg == 0)
        return retval;

    //	dprintf("  message = %d\n", msg->message);
    switch(msg->message) {
    case SMP_MSG_INVL_PAGE_RANGE:
        arch_cpu_invalidate_TLB_range((addr_t)msg->data, (addr_t)msg->data2);
        break;
    case SMP_MSG_INVL_PAGE_LIST:
        arch_cpu_invalidate_TLB_list((addr_t *)msg->data, (int)msg->data2);
        break;
    case SMP_MSG_GLOBAL_INVL_PAGE:
        //arch_cpu_global_TLB_invalidate();
        invltlb();
        break;
    case SMP_MSG_RESCHEDULE:
        retval = INT_RESCHEDULE;
        break;
    case SMP_MSG_CPU_HALT:
        do_halt = true;
        SHOW_INFO( 0, "cpu %d halted!", curr_cpu);
        break;
    case SMP_MSG_1:
    default:
        SHOW_ERROR( 0, "smp_intercpu_int_handler: got unknown message %d", msg->message);
    }

    // finish dealing with this message, possibly removing it from the list
    smp_finish_message_processing(curr_cpu, msg, source_mailbox);

    // special case for the halt message
    // we otherwise wouldn't have gotten the opportunity to clean up
    if(do_halt) {
        //int_disable_interrupts();
        hal_cli();
        halt();
        for(;;);
    }

    return retval;
}
示例#5
0
/*
 * Write bytes to kernel address space for debugger.
 */
int
db_write_bytes(vm_offset_t addr, size_t size, char *data)
{
	jmp_buf jb;
	void *prev_jb;
	char *dst;
	pt_entry_t	*ptep0 = NULL;
	pt_entry_t	oldmap0 = 0;
	vm_offset_t	addr1;
	pt_entry_t	*ptep1 = NULL;
	pt_entry_t	oldmap1 = 0;
	int ret;

	prev_jb = kdb_jmpbuf(jb);
	ret = setjmp(jb);
	if (ret == 0) {
		if (addr > trunc_page((vm_offset_t)btext) - size &&
		    addr < round_page((vm_offset_t)etext)) {

			ptep0 = vtopte(addr);
			oldmap0 = *ptep0;
			*ptep0 |= PG_RW;

			/*
			 * Map another page if the data crosses a page
			 * boundary.
			 */
			if ((*ptep0 & PG_PS) == 0) {
				addr1 = trunc_page(addr + size - 1);
				if (trunc_page(addr) != addr1) {
					ptep1 = vtopte(addr1);
					oldmap1 = *ptep1;
					*ptep1 |= PG_RW;
				}
			} else {
				addr1 = trunc_2mpage(addr + size - 1);
				if (trunc_2mpage(addr) != addr1) {
					ptep1 = vtopte(addr1);
					oldmap1 = *ptep1;
					*ptep1 |= PG_RW;
				}
			}

			invltlb();
		}

		dst = (char *)addr;

		while (size-- > 0)
			*dst++ = *data++;
	}

	(void)kdb_jmpbuf(prev_jb);

	if (ptep0) {
		*ptep0 = oldmap0;

		if (ptep1)
			*ptep1 = oldmap1;

		invltlb();
	}

	return (ret);
}
示例#6
0
/*
 * AP CPU's call this to initialize themselves.
 */
void
init_secondary(void)
{
	vm_offset_t addr;
	u_int	cpuid;
	int	gsel_tss;
	
	
	/* bootAP is set in start_ap() to our ID. */
	PCPU_SET(currentldt, _default_ldt);
	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
#if 0
	gdt[bootAP * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
#endif
	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
#if 0
	PCPU_SET(tss_gdt, &gdt[bootAP * NGDT + GPROC0_SEL].sd);

	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
#endif
	PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);

	/*
	 * Set to a known state:
	 * Set by mpboot.s: CR0_PG, CR0_PE
	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
	 */
	/*
	 * signal our startup to the BSP.
	 */
	mp_naps++;

	/* Spin until the BSP releases the AP's. */
	while (!aps_ready)
		ia32_pause();

	/* BSP may have changed PTD while we were waiting */
	invltlb();
	for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE)
		invlpg(addr);

	/* set up FPU state on the AP */
	npxinit();
#if 0
	
	/* set up SSE registers */
	enable_sse();
#endif
#if 0 && defined(PAE)
	/* Enable the PTE no-execute bit. */
	if ((amd_feature & AMDID_NX) != 0) {
		uint64_t msr;

		msr = rdmsr(MSR_EFER) | EFER_NXE;
		wrmsr(MSR_EFER, msr);
	}
#endif
#if 0
	/* A quick check from sanity claus */
	if (PCPU_GET(apic_id) != lapic_id()) {
		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
		printf("SMP: actual apic_id = %d\n", lapic_id());
		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
		panic("cpuid mismatch! boom!!");
	}
#endif
	
	/* Initialize curthread. */
	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
	PCPU_SET(curthread, PCPU_GET(idlethread));

	mtx_lock_spin(&ap_boot_mtx);
#if 0
	
	/* Init local apic for irq's */
	lapic_setup(1);
#endif
	smp_cpus++;

	cpuid = PCPU_GET(cpuid);
	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
	printf("SMP: AP CPU #%d Launched!\n", cpuid);

	/* Determine if we are a logical CPU. */
	if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
		CPU_SET(cpuid, &logical_cpus_mask);
	
	/* Determine if we are a hyperthread. */
	if (hyperthreading_cpus > 1 &&
	    PCPU_GET(apic_id) % hyperthreading_cpus != 0)
		CPU_SET(cpuid, &hyperthreading_cpus_mask);
#if 0
	if (bootverbose)
		lapic_dump("AP");
#endif
	if (smp_cpus == mp_ncpus) {
		/* enable IPI's, tlb shootdown, freezes etc */
		atomic_store_rel_int(&smp_started, 1);
		smp_active = 1;	 /* historic */
	}

	mtx_unlock_spin(&ap_boot_mtx);

	/* wait until all the AP's are up */
	while (smp_started == 0)
		ia32_pause();

	PCPU_SET(curthread, PCPU_GET(idlethread));

	/* Start per-CPU event timers. */
	cpu_initclocks_ap();

	/* enter the scheduler */
	sched_throw(NULL);

	panic("scheduler returned us to %s", __func__);
	/* NOTREACHED */
}
示例#7
0
文件: pc98_machdep.c 项目: kame/kame
static void
init_cpu_accel_mem(void)
{
    u_int target_page;
    /*
     * Certain 'CPU accelerator' supports over 16MB memory on
     * the machines whose BIOS doesn't store true size.
     * To support this, we don't trust BIOS values if Maxmem < 4096.
     */
    if (Maxmem < 4096) {
        for (target_page = ptoa(4096);		/* 16MB */
                target_page < ptoa(32768);		/* 128MB */
                target_page += 256 * PAGE_SIZE	/* 1MB step */) {
            u_int tmp, page_bad = FALSE, OrigMaxmem = Maxmem;

            *(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page;
            invltlb();

            tmp = *(u_int *)CADDR1;
            /*
             * Test for alternating 1's and 0's
             */
            *(volatile u_int *)CADDR1 = 0xaaaaaaaa;
            if (*(volatile u_int *)CADDR1 != 0xaaaaaaaa) {
                page_bad = TRUE;
            }
            /*
             * Test for alternating 0's and 1's
             */
            *(volatile u_int *)CADDR1 = 0x55555555;
            if (*(volatile u_int *)CADDR1 != 0x55555555) {
                page_bad = TRUE;
            }
            /*
             * Test for all 1's
             */
            *(volatile u_int *)CADDR1 = 0xffffffff;
            if (*(volatile u_int *)CADDR1 != 0xffffffff) {
                page_bad = TRUE;
            }
            /*
             * Test for all 0's
             */
            *(volatile u_int *)CADDR1 = 0x0;
            if (*(volatile u_int *)CADDR1 != 0x0) {
                /*
                 * test of page failed
                 */
                page_bad = TRUE;
            }
            /*
             * Restore original value.
             */
            *(u_int *)CADDR1 = tmp;
            if (page_bad == TRUE) {
                Maxmem = atop(target_page) + 256;
            } else
                break;
        }
        *(int *)CMAP1 = 0;
        invltlb();
    }
}
示例#8
0
static int
dcons_drv_init(int stage)
{
#if defined(__i386__) || defined(__amd64__)
	quad_t addr, size;
#endif

	if (drv_init)
		return(drv_init);

	drv_init = -1;

	bzero(&dg, sizeof(dg));
	dcons_conf = &dg;
	dg.cdev = &dcons_consdev;
	dg.buf = NULL;
	dg.size = DCONS_BUF_SIZE;

#if defined(__i386__) || defined(__amd64__)
	if (getenv_quad("dcons.addr", &addr) > 0 &&
	    getenv_quad("dcons.size", &size) > 0) {
#ifdef __i386__
		vm_paddr_t pa;
		/*
		 * Allow read/write access to dcons buffer.
		 */
		for (pa = trunc_page(addr); pa < addr + size; pa += PAGE_SIZE)
			*vtopte(KERNBASE + pa) |= PG_RW;
		invltlb();
#endif
		/* XXX P to V */
		dg.buf = (struct dcons_buf *)(vm_offset_t)(KERNBASE + addr);
		dg.size = size;
		if (dcons_load_buffer(dg.buf, dg.size, sc) < 0)
			dg.buf = NULL;
	}
#endif
	if (dg.buf != NULL)
		goto ok;

#ifndef KLD_MODULE
	if (stage == 0) { /* XXX or cold */
		/*
		 * DCONS_FORCE_CONSOLE == 1 and statically linked.
		 * called from cninit(). can't use contigmalloc yet .
		 */
		dg.buf = (struct dcons_buf *) bssbuf;
		dcons_init(dg.buf, dg.size, sc);
	} else
#endif
	{
		/*
		 * DCONS_FORCE_CONSOLE == 0 or kernel module case.
		 * if the module is loaded after boot,
		 * bssbuf could be non-continuous.
		 */ 
		dg.buf = (struct dcons_buf *) contigmalloc(dg.size,
			M_DEVBUF, 0, 0x10000, 0xffffffff, PAGE_SIZE, 0ul);
		if (dg.buf == NULL)
			return (-1);
		dcons_init(dg.buf, dg.size, sc);
	}

ok:
	dcons_buf = dg.buf;

	drv_init = 1;

	return 0;
}