Ejemplo n.º 1
0
/*
 * AP cpu's call this to sync up protected mode.
 *
 * WARNING!  We must ensure that the cpu is sufficiently initialized to
 * be able to use to the FP for our optimized bzero/bcopy code before
 * we enter more mainstream C code.
 *
 * WARNING! %fs is not set up on entry.  This routine sets up %fs.
 */
void
init_secondary(void)
{
	int	gsel_tss;
	int	x, myid = bootAP;
	u_int	cr0;
	struct mdglobaldata *md;
	struct privatespace *ps;

	ps = &CPU_prvspace[myid];

	gdt_segs[GPRIV_SEL].ssd_base = (int)ps;
	gdt_segs[GPROC0_SEL].ssd_base =
		(int) &ps->mdglobaldata.gd_common_tss;
	ps->mdglobaldata.mi.gd_prvspace = ps;

	for (x = 0; x < NGDT; x++) {
		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
	}

	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
	r_gdt.rd_base = (int) &gdt[myid * NGDT];
	lgdt(&r_gdt);			/* does magic intra-segment return */

	lidt(&r_idt);

	lldt(_default_ldt);
	mdcpu->gd_currentldt = _default_ldt;

	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;

	md = mdcpu;	/* loaded through %fs:0 (mdglobaldata.mi.gd_prvspace)*/

	md->gd_common_tss.tss_esp0 = 0;	/* not used until after switch */
	md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
	md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
	md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
	md->gd_common_tssd = *md->gd_tss_gdt;
	ltr(gsel_tss);

	/*
	 * Set to a known state:
	 * Set by mpboot.s: CR0_PG, CR0_PE
	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
	 */
	cr0 = rcr0();
	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
	load_cr0(cr0);
	pmap_set_opt();		/* PSE/4MB pages, etc */

	/* set up CPU registers and state */
	cpu_setregs();

	/* set up FPU state on the AP */
	npxinit(__INITIAL_NPXCW__);

	/* set up SSE registers */
	enable_sse();
}
Ejemplo n.º 2
0
/*
 * AP CPU's call this to initialize themselves.
 */
void
init_secondary(void)
{
	vm_offset_t addr;
	u_int	cpuid;
	int	gsel_tss;
	
	
	/* bootAP is set in start_ap() to our ID. */
	PCPU_SET(currentldt, _default_ldt);
	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
#if 0
	gdt[bootAP * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
#endif
	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
#if 0
	PCPU_SET(tss_gdt, &gdt[bootAP * NGDT + GPROC0_SEL].sd);

	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
#endif
	PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);

	/*
	 * Set to a known state:
	 * Set by mpboot.s: CR0_PG, CR0_PE
	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
	 */
	/*
	 * signal our startup to the BSP.
	 */
	mp_naps++;

	/* Spin until the BSP releases the AP's. */
	while (!aps_ready)
		ia32_pause();

	/* BSP may have changed PTD while we were waiting */
	invltlb();
	for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE)
		invlpg(addr);

	/* set up FPU state on the AP */
	npxinit();
#if 0
	
	/* set up SSE registers */
	enable_sse();
#endif
#if 0 && defined(PAE)
	/* Enable the PTE no-execute bit. */
	if ((amd_feature & AMDID_NX) != 0) {
		uint64_t msr;

		msr = rdmsr(MSR_EFER) | EFER_NXE;
		wrmsr(MSR_EFER, msr);
	}
#endif
#if 0
	/* A quick check from sanity claus */
	if (PCPU_GET(apic_id) != lapic_id()) {
		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
		printf("SMP: actual apic_id = %d\n", lapic_id());
		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
		panic("cpuid mismatch! boom!!");
	}
#endif
	
	/* Initialize curthread. */
	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
	PCPU_SET(curthread, PCPU_GET(idlethread));

	mtx_lock_spin(&ap_boot_mtx);
#if 0
	
	/* Init local apic for irq's */
	lapic_setup(1);
#endif
	smp_cpus++;

	cpuid = PCPU_GET(cpuid);
	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
	printf("SMP: AP CPU #%d Launched!\n", cpuid);

	/* Determine if we are a logical CPU. */
	if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
		CPU_SET(cpuid, &logical_cpus_mask);
	
	/* Determine if we are a hyperthread. */
	if (hyperthreading_cpus > 1 &&
	    PCPU_GET(apic_id) % hyperthreading_cpus != 0)
		CPU_SET(cpuid, &hyperthreading_cpus_mask);
#if 0
	if (bootverbose)
		lapic_dump("AP");
#endif
	if (smp_cpus == mp_ncpus) {
		/* enable IPI's, tlb shootdown, freezes etc */
		atomic_store_rel_int(&smp_started, 1);
		smp_active = 1;	 /* historic */
	}

	mtx_unlock_spin(&ap_boot_mtx);

	/* wait until all the AP's are up */
	while (smp_started == 0)
		ia32_pause();

	PCPU_SET(curthread, PCPU_GET(idlethread));

	/* Start per-CPU event timers. */
	cpu_initclocks_ap();

	/* enter the scheduler */
	sched_throw(NULL);

	panic("scheduler returned us to %s", __func__);
	/* NOTREACHED */
}
Ejemplo n.º 3
0
void
initializecpu(void)
{

	switch (cpu) {
#ifdef I486_CPU
	case CPU_BLUE:
		init_bluelightning();
		break;
	case CPU_486DLC:
		init_486dlc();
		break;
	case CPU_CY486DX:
		init_cy486dx();
		break;
	case CPU_M1SC:
		init_5x86();
		break;
#ifdef CPU_I486_ON_386
	case CPU_486:
		init_i486_on_386();
		break;
#endif
	case CPU_M1:
		init_6x86();
		break;
#endif /* I486_CPU */
#ifdef I686_CPU
	case CPU_M2:
		init_6x86MX();
		break;
	case CPU_686:
		if (cpu_vendor_id == CPU_VENDOR_INTEL) {
			switch (cpu_id & 0xff0) {
			case 0x610:
				init_ppro();
				break;
			case 0x660:
				init_mendocino();
				break;
			}
		} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
#if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
			/*
			 * Sometimes the BIOS doesn't enable SSE instructions.
			 * According to AMD document 20734, the mobile
			 * Duron, the (mobile) Athlon 4 and the Athlon MP
			 * support SSE. These correspond to cpu_id 0x66X
			 * or 0x67X.
			 */
			if ((cpu_feature & CPUID_XMM) == 0 &&
			    ((cpu_id & ~0xf) == 0x660 ||
			     (cpu_id & ~0xf) == 0x670 ||
			     (cpu_id & ~0xf) == 0x680)) {
				u_int regs[4];
				wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
				do_cpuid(1, regs);
				cpu_feature = regs[3];
			}
#endif
		} else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) {
			switch (cpu_id & 0xff0) {
			case 0x690:
				if ((cpu_id & 0xf) < 3)
					break;
				/* fall through. */
			case 0x6a0:
			case 0x6d0:
			case 0x6f0:
				init_via();
				break;
			default:
				break;
			}
		}
#ifdef PAE
		if ((amd_feature & AMDID_NX) != 0) {
			uint64_t msr;

			msr = rdmsr(MSR_EFER) | EFER_NXE;
			wrmsr(MSR_EFER, msr);
			pg_nx = PG_NX;
		}
#endif
		break;
#endif
	default:
		break;
	}
	enable_sse();

	/*
	 * CPUID with %eax = 1, %ebx returns
	 * Bits 15-8: CLFLUSH line size
	 * 	(Value * 8 = cache line size in bytes)
	 */
	if ((cpu_feature & CPUID_CLFSH) != 0)
		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
	/*
	 * XXXKIB: (temporary) hack to work around traps generated when
	 * CLFLUSHing APIC registers window.
	 */
	TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
	if (cpu_vendor_id == CPU_VENDOR_INTEL && !(cpu_feature & CPUID_SS) &&
	    hw_clflush_disable == -1)
		cpu_feature &= ~CPUID_CLFSH;
	/*
	 * Allow to disable CLFLUSH feature manually by
	 * hw.clflush_disable tunable.  This may help Xen guest on some AMD
	 * CPUs.
	 */
	if (hw_clflush_disable == 1)
		cpu_feature &= ~CPUID_CLFSH;

#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
	/*
	 * OS should flush L1 cache by itself because no PC-98 supports
	 * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
	 * when need_pre_dma_flush = 1, use invd instruction after DMA
	 * transfer when need_post_dma_flush = 1.  If your CPU upgrade
	 * product supports hardware cache control, you can add the
	 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
	 * This option eliminates unneeded cache flush instruction(s).
	 */
	if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
		switch (cpu) {
#ifdef I486_CPU
		case CPU_486DLC:
			need_post_dma_flush = 1;
			break;
		case CPU_M1SC:
			need_pre_dma_flush = 1;
			break;
		case CPU_CY486DX:
			need_pre_dma_flush = 1;
#ifdef CPU_I486_ON_386
			need_post_dma_flush = 1;
#endif
			break;
#endif
		default:
			break;
		}
	} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
		switch (cpu_id & 0xFF0) {
		case 0x470:		/* Enhanced Am486DX2 WB */
		case 0x490:		/* Enhanced Am486DX4 WB */
		case 0x4F0:		/* Am5x86 WB */
			need_pre_dma_flush = 1;
			break;
		}
	} else if (cpu_vendor_id == CPU_VENDOR_IBM) {
		need_post_dma_flush = 1;
	} else {
#ifdef CPU_I486_ON_386
		need_pre_dma_flush = 1;
#endif
	}
#endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
}
Ejemplo n.º 4
0
void
initializecpu(void)
{
	switch (cpu) {
#ifdef I486_CPU
	case CPU_BLUE:
		init_bluelightning();
		break;
	case CPU_486DLC:
		init_486dlc();
		break;
	case CPU_CY486DX:
		init_cy486dx();
		break;
	case CPU_M1SC:
		init_5x86();
		break;
#ifdef CPU_I486_ON_386
	case CPU_486:
		init_i486_on_386();
		break;
#endif
	case CPU_M1:
		init_6x86();
		break;
#endif /* I486_CPU */
#ifdef I686_CPU
	case CPU_M2:
		init_6x86MX();
		break;
	case CPU_686:
		if (cpu_vendor_id == CPU_VENDOR_INTEL) {
			switch (cpu_id & 0xff0) {
			case 0x610:
				init_ppro();
				break;
			case 0x660:
				init_mendocino();
				break;
			}
		} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
			init_686_amd();
		} else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) {
			switch (cpu_id & 0xff0) {
			case 0x690:
				if ((cpu_id & 0xf) < 3)
					break;
				/* fall through. */
			case 0x6a0:
			case 0x6d0:
			case 0x6f0:
				init_via();
				break;
			default:
				break;
			}
		}
		break;
#endif
	default:
		break;
	}
	enable_sse();
}