Exemple #1
0
static void generate_P_state_entries(int core, int cores_per_package)
{
	int ratio_min, ratio_max, ratio_turbo, ratio_step, ratio_range_2;
	int coord_type, power_max, power_unit, num_entries;
	int ratio, power, clock, clock_max;
	int vid, vid_turbo, vid_min, vid_max, vid_range_2;
	u32 control_status;
	const struct pattrs *pattrs = pattrs_get();
	msr_t msr;

	/* Inputs from CPU attributes */
	ratio_max = pattrs->iacore_ratios[IACORE_MAX];
	ratio_min = pattrs->iacore_ratios[IACORE_LFM];
	vid_max = pattrs->iacore_vids[IACORE_MAX];
	vid_min = pattrs->iacore_vids[IACORE_LFM];

	/* Hardware coordination of P-states */
	coord_type = HW_ALL;

	/* Max Non-Turbo Frequency */
	clock_max = (ratio_max * pattrs->bclk_khz) / 1000;

	/* Calculate CPU TDP in mW */
	msr = rdmsr(MSR_PKG_POWER_SKU_UNIT);
	power_unit = 1 << (msr.lo & 0xf);
	msr = rdmsr(MSR_PKG_POWER_LIMIT);
	power_max = ((msr.lo & 0x7fff) / power_unit) * 1000;

	/* Write _PCT indicating use of FFixedHW */
	acpigen_write_empty_PCT();

	/* Write _PPC with NVS specified limit on supported P-state */
	acpigen_write_PPC_NVS();

	/* Write PSD indicating configured coordination type */
	acpigen_write_PSD_package(core, 1, coord_type);

	/* Add P-state entries in _PSS table */
	acpigen_write_name("_PSS");

	/* Determine ratio points */
	ratio_step = 1;
	num_entries = (ratio_max - ratio_min) / ratio_step;
	while (num_entries > 15) { /* ACPI max is 15 ratios */
		ratio_step <<= 1;
		num_entries >>= 1;
	}

	/* P[T] is Turbo state if enabled */
	if (get_turbo_state() == TURBO_ENABLED) {
		/* _PSS package count including Turbo */
		acpigen_write_package(num_entries + 2);

		ratio_turbo = pattrs->iacore_ratios[IACORE_TURBO];
		vid_turbo = pattrs->iacore_vids[IACORE_TURBO];
		control_status = (ratio_turbo << 8) | vid_turbo;

		/* Add entry for Turbo ratio */
		acpigen_write_PSS_package(
			clock_max + 1,		/*MHz*/
			power_max,		/*mW*/
			10,			/*lat1*/
			10,			/*lat2*/
			control_status,		/*control*/
			control_status);	/*status*/
	} else {
		/* _PSS package count without Turbo */
		acpigen_write_package(num_entries + 1);
		ratio_turbo = ratio_max;
		vid_turbo = vid_max;
	}

	/* First regular entry is max non-turbo ratio */
	control_status = (ratio_max << 8) | vid_max;
	acpigen_write_PSS_package(
		clock_max,		/*MHz*/
		power_max,		/*mW*/
		10,			/*lat1*/
		10,			/*lat2*/
		control_status,		/*control */
		control_status);	/*status*/

	/* Set up ratio and vid ranges for VID calculation */
	ratio_range_2 = (ratio_turbo - ratio_min) * 2;
	vid_range_2 = (vid_turbo - vid_min) * 2;

	/* Generate the remaining entries */
	for (ratio = ratio_min + ((num_entries - 1) * ratio_step);
	     ratio >= ratio_min; ratio -= ratio_step) {

		/* Calculate VID for this ratio */
		vid = ((ratio - ratio_min) * vid_range_2) /
			ratio_range_2 + vid_min;
		/* Round up if remainder */
		if (((ratio - ratio_min) * vid_range_2) % ratio_range_2)
			vid++;

		/* Calculate power at this ratio */
		power = calculate_power(power_max, ratio_max, ratio);
		clock = (ratio * pattrs->bclk_khz) / 1000;
		control_status = (ratio << 8) | (vid & 0xff);

		acpigen_write_PSS_package(
			clock,			/*MHz*/
			power,			/*mW*/
			10,			/*lat1*/
			10,			/*lat2*/
			control_status,		/*control*/
			control_status);	/*status*/
	}

	/* Fix package length */
	acpigen_pop_len();
}
Exemple #2
0
/*
 * AP cpu's call this to sync up protected mode.
 *
 * WARNING! %gs is not set up on entry.  This routine sets up %gs.
 */
void
init_secondary(void)
{
	int	gsel_tss;
	int	x, myid = bootAP;
	u_int64_t msr, cr0;
	struct mdglobaldata *md;
	struct privatespace *ps;

	ps = CPU_prvspace[myid];

	gdt_segs[GPROC0_SEL].ssd_base =
		(long) &ps->mdglobaldata.gd_common_tss;
	ps->mdglobaldata.mi.gd_prvspace = ps;

	/* We fill the 32-bit segment descriptors */
	for (x = 0; x < NGDT; x++) {
		if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
			ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]);
	}
	/* And now a 64-bit one */
	ssdtosyssd(&gdt_segs[GPROC0_SEL],
	    (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]);

	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
	r_gdt.rd_base = (long) &gdt[myid * NGDT];
	lgdt(&r_gdt);			/* does magic intra-segment return */

	/* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */
	wrmsr(MSR_FSBASE, 0);		/* User value */
	wrmsr(MSR_GSBASE, (u_int64_t)ps);
	wrmsr(MSR_KGSBASE, 0);		/* XXX User value while we're in the kernel */

	lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]);

#if 0
	lldt(_default_ldt);
	mdcpu->gd_currentldt = _default_ldt;
#endif

	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
	gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS;

	md = mdcpu;	/* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/

	md->gd_common_tss.tss_rsp0 = 0;	/* not used until after switch */
#if 0 /* JG XXX */
	md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
#endif
	md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL];
	md->gd_common_tssd = *md->gd_tss_gdt;

	/* double fault stack */
	md->gd_common_tss.tss_ist1 =
		(long)&md->mi.gd_prvspace->idlestack[
			sizeof(md->mi.gd_prvspace->idlestack)];

	ltr(gsel_tss);

	/*
	 * Set to a known state:
	 * Set by mpboot.s: CR0_PG, CR0_PE
	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
	 */
	cr0 = rcr0();
	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
	load_cr0(cr0);

	/* Set up the fast syscall stuff */
	msr = rdmsr(MSR_EFER) | EFER_SCE;
	wrmsr(MSR_EFER, msr);
	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
	wrmsr(MSR_STAR, msr);
	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL);

	pmap_set_opt();		/* PSE/4MB pages, etc */
	pmap_init_pat();	/* Page Attribute Table */

	/* set up CPU registers and state */
	cpu_setregs();

	/* set up SSE/NX registers */
	initializecpu(myid);

	/* set up FPU state on the AP */
	npxinit();

	/* disable the APIC, just to be SURE */
	lapic->svr &= ~APIC_SVR_ENABLE;
}
Exemple #3
0
/**
 * This is Black Magic DRAM timing juju[1].
 *
 * DRAM delay depends on CPU clock, memory bus clock, memory bus loading,
 * memory bus termination, your middle initial (ha! caught you!), GeodeLink
 * clock rate, and DRAM timing specifications.
 *
 * From this the code computes a number which is "known to work". No,
 * hardware is not an exact science. And, finally, if an FS2 (JTAG debugger)
 * is hooked up, then just don't do anything. This code was written by a master
 * of the Dark Arts at AMD and should not be modified in any way.
 *
 * [1] (http://www.thefreedictionary.com/juju)
 *
 * @param dimm0 The SMBus address of DIMM 0 (mainboard dependent).
 * @param dimm1 The SMBus address of DIMM 1 (mainboard dependent).
 * @param terminated The bus is terminated. (mainboard dependent).
 */
static void SetDelayControl(u8 dimm0, u8 dimm1, int terminated)
{
	u32 glspeed;
	u8 spdbyte0, spdbyte1, dimms, i;
	msr_t msr;

	glspeed = GeodeLinkSpeed();

	/* Fix delay controls for DM and IM arrays. */
	for (i = 0; i < ARRAY_SIZE(delay_msr_table); i++)
		wrmsr(delay_msr_table[i].index, delay_msr_table[i].msr);

	msr = rdmsr(GLCP_FIFOCTL);
	msr.hi = 0x00000005;
	wrmsr(GLCP_FIFOCTL, msr);

	/* Enable setting. */
	msr.hi = 0;
	msr.lo = 0x00000001;
	wrmsr(CPU_BC_MSS_ARRAY_CTL_ENA, msr);

	/* Debug Delay Control setup check.
	 * Leave it alone if it has been setup. FS2 or something is here.
	 */
	msr = rdmsr(GLCP_DELAY_CONTROLS);
	if (msr.lo & ~(DELAY_LOWER_STATUS_MASK))
		return;

	/* Delay Controls based on DIMM loading. UGH!
	 * Number of devices = module width (SPD 6) / device width (SPD 13)
	 *                     * physical banks (SPD 5)
	 *
	 * Note: We only support a module width of 64.
	 */
	dimms = 0;
	spdbyte0 = spd_read_byte(dimm0, SPD_PRIMARY_SDRAM_WIDTH);
	if (spdbyte0 != 0xFF) {
		dimms++;
		spdbyte0 = (u8)64 / spdbyte0 *
			   (u8)(spd_read_byte(dimm0, SPD_NUM_DIMM_BANKS));
	} else {
		spdbyte0 = 0;
	}

	spdbyte1 = spd_read_byte(dimm1, SPD_PRIMARY_SDRAM_WIDTH);
	if (spdbyte1 != 0xFF) {
		dimms++;
		spdbyte1 = (u8)64 / spdbyte1 *
			   (u8)(spd_read_byte(dimm1, SPD_NUM_DIMM_BANKS));
	} else {
		spdbyte1 = 0;
	}

	/* Zero GLCP_DELAY_CONTROLS MSR */
	msr.hi = msr.lo = 0;

	/* Save some power, disable clock to second DIMM if it is empty. */
	if (spdbyte1 == 0)
		msr.hi |= DELAY_UPPER_DISABLE_CLK135;

	spdbyte0 += spdbyte1;

	if ((dimms == 1) && (terminated == DRAM_TERMINATED)) {
		msr.hi = 0xF2F100FF;
		msr.lo = 0x56960004;
	} else for (i = 0; i < ARRAY_SIZE(delay_control_table); i++) {
		if ((dimms == delay_control_table[i].dimms) &&
		    (spdbyte0 <= delay_control_table[i].devices)) {
			if (glspeed < 334) {
				msr.hi |= delay_control_table[i].slow_hi;
				msr.lo |= delay_control_table[i].slow_low;
			} else {
				msr.hi |= delay_control_table[i].fast_hi;
				msr.lo |= delay_control_table[i].fast_low;
			}
			break;
		}
	}
	wrmsr(GLCP_DELAY_CONTROLS, msr);
}
Exemple #4
0
void
via_nano_setup(struct cpu_info *ci)
{
	u_int32_t regs[4], val;
	u_int64_t msreg;
	int model = (ci->ci_signature >> 4) & 15;

	if (model >= 9) {
		CPUID(0xC0000000, regs[0], regs[1], regs[2], regs[3]);
		val = regs[0];
		if (val >= 0xC0000001) {
			CPUID(0xC0000001, regs[0], regs[1], regs[2], regs[3]);
			val = regs[3];
		} else
			val = 0;

		if (val & (C3_CPUID_HAS_RNG | C3_CPUID_HAS_ACE))
			printf("%s:", ci->ci_dev->dv_xname);

		/* Enable RNG if present and disabled */
		if (val & C3_CPUID_HAS_RNG) {
			extern int viac3_rnd_present;

			if (!(val & C3_CPUID_DO_RNG)) {
				msreg = rdmsr(0x110B);
				msreg |= 0x40;
				wrmsr(0x110B, msreg);
			}
			viac3_rnd_present = 1;
			printf(" RNG");
		}

		/* Enable AES engine if present and disabled */
		if (val & C3_CPUID_HAS_ACE) {
#ifdef CRYPTO
			if (!(val & C3_CPUID_DO_ACE)) {
				msreg = rdmsr(0x1107);
				msreg |= (0x01 << 28);
				wrmsr(0x1107, msreg);
			}
			amd64_has_xcrypt |= C3_HAS_AES;
#endif /* CRYPTO */
			printf(" AES");
		}

		/* Enable ACE2 engine if present and disabled */
		if (val & C3_CPUID_HAS_ACE2) {
#ifdef CRYPTO
			if (!(val & C3_CPUID_DO_ACE2)) {
				msreg = rdmsr(0x1107);
				msreg |= (0x01 << 28);
				wrmsr(0x1107, msreg);
			}
			amd64_has_xcrypt |= C3_HAS_AESCTR;
#endif /* CRYPTO */
			printf(" AES-CTR");
		}

		/* Enable SHA engine if present and disabled */
		if (val & C3_CPUID_HAS_PHE) {
#ifdef CRYPTO
			if (!(val & C3_CPUID_DO_PHE)) {
				msreg = rdmsr(0x1107);
				msreg |= (0x01 << 28/**/);
				wrmsr(0x1107, msreg);
			}
			amd64_has_xcrypt |= C3_HAS_SHA;
#endif /* CRYPTO */
			printf(" SHA1 SHA256");
		}

		/* Enable MM engine if present and disabled */
		if (val & C3_CPUID_HAS_PMM) {
#ifdef CRYPTO
			if (!(val & C3_CPUID_DO_PMM)) {
				msreg = rdmsr(0x1107);
				msreg |= (0x01 << 28/**/);
				wrmsr(0x1107, msreg);
			}
			amd64_has_xcrypt |= C3_HAS_MM;
#endif /* CRYPTO */
			printf(" RSA");
		}

		printf("\n");
	}
}
Exemple #5
0
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
	u64 misc_enable;

	/* Unmask CPUID levels if masked: */
	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);

		if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
			misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
			c->cpuid_level = cpuid_eax(0);
			get_cpu_cap(c);
		}
	}

	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
		(c->x86 == 0x6 && c->x86_model >= 0x0e))
		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);

	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
		unsigned lower_word;

		wrmsr(MSR_IA32_UCODE_REV, 0, 0);
		/* Required by the SDM */
		sync_core();
		rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
	}

	/*
	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
	 *
	 * A race condition between speculative fetches and invalidating
	 * a large page.  This is worked around in microcode, but we
	 * need the microcode to have already been loaded... so if it is
	 * not, recommend a BIOS update and disable large pages.
	 */
	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
	    c->microcode < 0x20e) {
		printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
		clear_cpu_cap(c, X86_FEATURE_PSE);
	}

#ifdef CONFIG_X86_64
	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#else
	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
	if (c->x86 == 15 && c->x86_cache_alignment == 64)
		c->x86_cache_alignment = 128;
#endif

	/* CPUID workaround for 0F33/0F34 CPU */
	if (c->x86 == 0xF && c->x86_model == 0x3
	    && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
		c->x86_phys_bits = 36;

	/*
	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
	 * with P/T states and does not stop in deep C-states.
	 *
	 * It is also reliable across cores and sockets. (but not across
	 * cabinets - we turn it off in that case explicitly.)
	 */
	if (c->x86_power & (1 << 8)) {
		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
		if (!check_tsc_unstable())
			sched_clock_stable = 1;
	}

	/*
	 * There is a known erratum on Pentium III and Core Solo
	 * and Core Duo CPUs.
	 * " Page with PAT set to WC while associated MTRR is UC
	 *   may consolidate to UC "
	 * Because of this erratum, it is better to stick with
	 * setting WC in MTRR rather than using PAT on these CPUs.
	 *
	 * Enable PAT WC only on P4, Core 2 or later CPUs.
	 */
	if (c->x86 == 6 && c->x86_model < 15)
		clear_cpu_cap(c, X86_FEATURE_PAT);

#ifdef CONFIG_KMEMCHECK
	/*
	 * P4s have a "fast strings" feature which causes single-
	 * stepping REP instructions to only generate a #DB on
	 * cache-line boundaries.
	 *
	 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
	 * (model 2) with the same problem.
	 */
	if (c->x86 == 15) {
		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);

		if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
			printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");

			misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
		}
	}
#endif

	/*
	 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
	 * clear the fast string and enhanced fast string CPU capabilities.
	 */
	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
		if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
			printk(KERN_INFO "Disabled fast string operations\n");
			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
			setup_clear_cpu_cap(X86_FEATURE_ERMS);
		}
	}
}
Exemple #6
0
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
	struct sys_info *sysinfo = &sysinfo_car;
	static const u8 spd_addr[] = {RC00, DIMM0, DIMM2, 0, 0, DIMM1, DIMM3, 0, 0, };
	u32 bsp_apicid = 0, val;
	msr_t msr;

	timestamp_init(timestamp_get());
	timestamp_add_now(TS_START_ROMSTAGE);

	if (!cpu_init_detectedx && boot_cpu()) {
		/* Nothing special needs to be done to find bus 0 */
		/* Allow the HT devices to be found */
		/* mov bsp to bus 0xff when > 8 nodes */
		set_bsp_node_CHtExtNodeCfgEn();
		enumerate_ht_chain();
		sb7xx_51xx_pci_port80();
	}

	post_code(0x30);

	if (bist == 0) {
		bsp_apicid = init_cpus(cpu_init_detectedx, sysinfo); /* mmconf is inited in init_cpus */
		/* All cores run this but the BSP(node0,core0) is the only core that returns. */
	}

	post_code(0x32);

	enable_rs780_dev8();
	sb7xx_51xx_lpc_init();

	ite_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);
	it8718f_disable_reboot(GPIO_DEV);
	console_init();

//	dump_mem(CONFIG_DCACHE_RAM_BASE+CONFIG_DCACHE_RAM_SIZE-0x200, CONFIG_DCACHE_RAM_BASE+CONFIG_DCACHE_RAM_SIZE);

	/* Halt if there was a built in self test failure */
	report_bist_failure(bist);

	// Load MPB
	val = cpuid_eax(1);
	printk(BIOS_DEBUG, "BSP Family_Model: %08x\n", val);
	printk(BIOS_DEBUG, "*sysinfo range: [%p,%p]\n",sysinfo,sysinfo+1);
	printk(BIOS_DEBUG, "bsp_apicid = %02x\n", bsp_apicid);
	printk(BIOS_DEBUG, "cpu_init_detectedx = %08lx\n", cpu_init_detectedx);

	/* Setup sysinfo defaults */
	set_sysinfo_in_ram(0);

	update_microcode(val);

	post_code(0x33);

	cpuSetAMDMSR(0);
	post_code(0x34);

	amd_ht_init(sysinfo);
	post_code(0x35);

	/* Setup nodes PCI space and start core 0 AP init. */
	finalize_node_setup(sysinfo);

	/* Setup any mainboard PCI settings etc. */
	setup_mb_resource_map();
	post_code(0x36);

	/* wait for all the APs core0 started by finalize_node_setup. */
	/* FIXME: A bunch of cores are going to start output to serial at once.
	   It would be nice to fixup prink spinlocks for ROM XIP mode.
	   I think it could be done by putting the spinlock flag in the cache
	   of the BSP located right after sysinfo.
	 */
	wait_all_core0_started();

#if CONFIG_LOGICAL_CPUS
	/* Core0 on each node is configured. Now setup any additional cores. */
	printk(BIOS_DEBUG, "start_other_cores()\n");
	start_other_cores();
	post_code(0x37);
	wait_all_other_cores_started(bsp_apicid);
#endif

	post_code(0x38);

	/* run _early_setup before soft-reset. */
	rs780_early_setup();
	sb7xx_51xx_early_setup();

#if CONFIG_SET_FIDVID
	msr = rdmsr(0xc0010071);
	printk(BIOS_DEBUG, "\nBegin FIDVID MSR 0xc0010071 0x%08x 0x%08x\n", msr.hi, msr.lo);

	/* FIXME: The sb fid change may survive the warm reset and only
	   need to be done once.*/
	enable_fid_change_on_sb(sysinfo->sbbusn, sysinfo->sbdn);

	post_code(0x39);

	if (!warm_reset_detect(0)) {			// BSP is node 0
		init_fidvid_bsp(bsp_apicid, sysinfo->nodes);
	} else {
		init_fidvid_stage2(bsp_apicid, 0);	// BSP is node 0
	}

	post_code(0x3A);

	/* show final fid and vid */
	msr=rdmsr(0xc0010071);
	printk(BIOS_DEBUG, "End FIDVIDMSR 0xc0010071 0x%08x 0x%08x\n", msr.hi, msr.lo);
#endif

	rs780_htinit();

	/* Reset for HT, FIDVID, PLL and errata changes to take affect. */
	if (!warm_reset_detect(0)) {
		printk(BIOS_INFO, "...WARM RESET...\n\n\n");
		soft_reset();
		die("After soft_reset_x - shouldn't see this message!!!\n");
	}

	post_code(0x3B);

	/* It's the time to set ctrl in sysinfo now; */
	printk(BIOS_DEBUG, "fill_mem_ctrl()\n");
	fill_mem_ctrl(sysinfo->nodes, sysinfo->ctrl, spd_addr);

	post_code(0x40);

//	die("Die Before MCT init.");

	timestamp_add_now(TS_BEFORE_INITRAM);
	printk(BIOS_DEBUG, "raminit_amdmct()\n");
	raminit_amdmct(sysinfo);
	timestamp_add_now(TS_AFTER_INITRAM);

	cbmem_initialize_empty();
	post_code(0x41);

	amdmct_cbmem_store_info(sysinfo);

/*
	dump_pci_device_range(PCI_DEV(0, 0x18, 0), 0, 0x200);
	dump_pci_device_range(PCI_DEV(0, 0x18, 1), 0, 0x200);
	dump_pci_device_range(PCI_DEV(0, 0x18, 2), 0, 0x200);
	dump_pci_device_range(PCI_DEV(0, 0x18, 3), 0, 0x200);
*/

//	die("After MCT init before CAR disabled.");

	rs780_before_pci_init();
	sb7xx_51xx_before_pci_init();

	post_code(0x42);
	post_cache_as_ram();	// BSP switch stack to ram, copy then execute LB.
	post_code(0x43);	// Should never see this post code.
}
Exemple #7
0
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int mbytes = num_physpages >> (20-PAGE_SHIFT);
	int r;

#ifdef CONFIG_SMP
	unsigned long long value;

	/* Disable TLB flush filter by setting HWCR.FFDIS on K8
	 * bit 6 of msr C001_0015
	 *
	 * Errata 63 for SH-B3 steppings
	 * Errata 122 for all steppings (F+ have it disabled by default)
	 */
	if (c->x86 == 15) {
		rdmsrl(MSR_K7_HWCR, value);
		value |= 1 << 6;
		wrmsrl(MSR_K7_HWCR, value);
	}
#endif

	early_init_amd(c);

	/*
	 *	FIXME: We should handle the K5 here. Set up the write
	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,
	 *	no bus pipeline)
	 */

	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
	clear_bit(0*32+31, c->x86_capability);
	
	r = get_model_name(c);

	switch(c->x86)
	{
		case 4:
		/*
		 * General Systems BIOSen alias the cpu frequency registers
		 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
		 * drivers subsequently pokes it, and changes the CPU speed.
		 * Workaround : Remove the unneeded alias.
		 */
#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
#define CBAR_ENB	(0x80000000)
#define CBAR_KEY	(0X000000CB)
			if (c->x86_model==9 || c->x86_model == 10) {
				if (inl (CBAR) & CBAR_ENB)
					outl (0 | CBAR_KEY, CBAR);
			}
			break;
		case 5:
			if( c->x86_model < 6 )
			{
				/* Based on AMD doc 20734R - June 2000 */
				if ( c->x86_model == 0 ) {
					clear_bit(X86_FEATURE_APIC, c->x86_capability);
					set_bit(X86_FEATURE_PGE, c->x86_capability);
				}
				break;
			}
			
			if ( c->x86_model == 6 && c->x86_mask == 1 ) {
				const int K6_BUG_LOOP = 1000000;
				int n;
				void (*f_vide)(void);
				unsigned long d, d2;
				
				printk(KERN_INFO "AMD K6 stepping B detected - ");
				
				/*
				 * It looks like AMD fixed the 2.6.2 bug and improved indirect 
				 * calls at the same time.
				 */

				n = K6_BUG_LOOP;
				f_vide = vide;
				rdtscl(d);
				while (n--) 
					f_vide();
				rdtscl(d2);
				d = d2-d;

				if (d > 20*K6_BUG_LOOP) 
					printk("system stability may be impaired when more than 32 MB are used.\n");
				else 
					printk("probably OK (after B9730xxxx).\n");
				printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
			}

			/* K6 with old style WHCR */
			if (c->x86_model < 8 ||
			   (c->x86_model== 8 && c->x86_mask < 8)) {
				/* We can only write allocate on the low 508Mb */
				if(mbytes>508)
					mbytes=508;

				rdmsr(MSR_K6_WHCR, l, h);
				if ((l&0x0000FFFF)==0) {
					unsigned long flags;
					l=(1<<0)|((mbytes/4)<<1);
					local_irq_save(flags);
					wbinvd();
					wrmsr(MSR_K6_WHCR, l, h);
					local_irq_restore(flags);
					printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
						mbytes);
				}
				break;
			}

			if ((c->x86_model == 8 && c->x86_mask >7) ||
			     c->x86_model == 9 || c->x86_model == 13) {
				/* The more serious chips .. */

				if(mbytes>4092)
					mbytes=4092;

				rdmsr(MSR_K6_WHCR, l, h);
				if ((l&0xFFFF0000)==0) {
					unsigned long flags;
					l=((mbytes>>2)<<22)|(1<<16);
					local_irq_save(flags);
					wbinvd();
					wrmsr(MSR_K6_WHCR, l, h);
					local_irq_restore(flags);
					printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
						mbytes);
				}

				/*  Set MTRR capability flag if appropriate */
				if (c->x86_model == 13 || c->x86_model == 9 ||
				   (c->x86_model == 8 && c->x86_mask >= 8))
					set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
				break;
			}

			if (c->x86_model == 10) {
				/* AMD Geode LX is model 10 */
				/* placeholder for any needed mods */
				break;
			}
			break;
		case 6: /* An Athlon/Duron */
 
			/* Bit 15 of Athlon specific MSR 15, needs to be 0
 			 * to enable SSE on Palomino/Morgan/Barton CPU's.
			 * If the BIOS didn't enable it already, enable it here.
			 */
			if (c->x86_model >= 6 && c->x86_model <= 10) {
				if (!cpu_has(c, X86_FEATURE_XMM)) {
					printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
					rdmsr(MSR_K7_HWCR, l, h);
					l &= ~0x00008000;
					wrmsr(MSR_K7_HWCR, l, h);
					set_bit(X86_FEATURE_XMM, c->x86_capability);
				}
			}

			/* It's been determined by AMD that Athlons since model 8 stepping 1
			 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
			 * As per AMD technical note 27212 0.2
			 */
			if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
				rdmsr(MSR_K7_CLK_CTL, l, h);
				if ((l & 0xfff00000) != 0x20000000) {
					printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
						((l & 0x000fffff)|0x20000000));
					wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
				}
			}
			break;
	}
Exemple #8
0
static int __init detect_init_APIC (void)
{
	u32 h, l, features;
	extern void get_cpu_vendor(struct cpuinfo_x86*);

	/* Disabled by DMI scan or kernel option? */
	if (dont_enable_local_apic)
		return -1;

	/* Workaround for us being called before identify_cpu(). */
	get_cpu_vendor(&boot_cpu_data);

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1)
			break;
		if (boot_cpu_data.x86 == 15 && cpu_has_apic)
			break;
		goto no_apic;
	case X86_VENDOR_INTEL:
		if (boot_cpu_data.x86 == 6 ||
		    (boot_cpu_data.x86 == 15 && cpu_has_apic) ||
		    (boot_cpu_data.x86 == 5 && cpu_has_apic))
			break;
		goto no_apic;
	default:
		goto no_apic;
	}

	if (!cpu_has_apic) {
		/*
		 * Some BIOSes disable the local APIC in the
		 * APIC_BASE MSR. This can only be done in
		 * software for Intel P6 and AMD K7 (Model > 1).
		 */
		rdmsr(MSR_IA32_APICBASE, l, h);
		if (!(l & MSR_IA32_APICBASE_ENABLE)) {
			printk("Local APIC disabled by BIOS -- reenabling.\n");
			l &= ~MSR_IA32_APICBASE_BASE;
			l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
			wrmsr(MSR_IA32_APICBASE, l, h);
		}
	}
	/*
	 * The APIC feature bit should now be enabled
	 * in `cpuid'
	 */
	features = cpuid_edx(1);
	if (!(features & (1 << X86_FEATURE_APIC))) {
		printk("Could not enable APIC!\n");
		return -1;
	}
	set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability);
	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
	if (nmi_watchdog != NMI_NONE)
		nmi_watchdog = NMI_LOCAL_APIC;

	printk("Found and enabled local APIC!\n");

	apic_pm_init1();

	return 0;

no_apic:
	printk("No local APIC present or hardware disabled\n");
	return -1;
}
Exemple #9
0
static void model_16_init(struct device *dev)
{
	printk(BIOS_DEBUG, "Model 16 Init.\n");

	u8 i;
	msr_t msr;
	int num_banks;
	int msrno;
#if IS_ENABLED(CONFIG_LOGICAL_CPUS)
	u32 siblings;
#endif

	//x86_enable_cache();
	//amd_setup_mtrrs();
	//x86_mtrr_check();
	disable_cache();
	/* Enable access to AMD RdDram and WrDram extension bits */
	msr = rdmsr(SYSCFG_MSR);
	msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
	msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
	wrmsr(SYSCFG_MSR, msr);

	// BSP: make a0000-bffff UC, c0000-fffff WB, same as OntarioApMtrrSettingsList for APs
	msr.lo = msr.hi = 0;
	wrmsr(MTRR_FIX_16K_A0000, msr);
	msr.lo = msr.hi = 0x1e1e1e1e;
	wrmsr(MTRR_FIX_64K_00000, msr);
	wrmsr(MTRR_FIX_16K_80000, msr);
	for (msrno = MTRR_FIX_4K_C0000; msrno <= MTRR_FIX_4K_F8000; msrno++)
		wrmsr(msrno, msr);

	msr = rdmsr(SYSCFG_MSR);
	msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
	msr.lo |= SYSCFG_MSR_MtrrFixDramEn;
	wrmsr(SYSCFG_MSR, msr);

	if (acpi_is_wakeup())
		restore_mtrr();

	x86_mtrr_check();
	x86_enable_cache();

	/* zero the machine check error status registers */
	msr = rdmsr(IA32_MCG_CAP);
	num_banks = msr.lo & MCA_BANKS_MASK;
	msr.lo = 0;
	msr.hi = 0;
	for (i = 0; i < num_banks; i++)
		wrmsr(IA32_MC0_STATUS + (i * 4), msr);

	/* Enable the local CPU APICs */
	setup_lapic();

#if IS_ENABLED(CONFIG_LOGICAL_CPUS)
	siblings = cpuid_ecx(0x80000008) & 0xff;

	if (siblings > 0) {
		msr = rdmsr_amd(CPU_ID_FEATURES_MSR);
		msr.lo |= 1 << 28;
		wrmsr_amd(CPU_ID_FEATURES_MSR, msr);

		msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
		msr.hi |= 1 << (33 - 32);
		wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
	}
	printk(BIOS_DEBUG, "siblings = %02d, ", siblings);
#endif

	/* DisableCf8ExtCfg */
	msr = rdmsr(NB_CFG_MSR);
	msr.hi &= ~(1 << (46 - 32));
	wrmsr(NB_CFG_MSR, msr);

	/* Write protect SMM space with SMMLOCK. */
	msr = rdmsr(HWCR_MSR);
	msr.lo |= (1 << 0);
	wrmsr(HWCR_MSR, msr);
}
Exemple #10
0
/*
 *  All traps come here.  It is slower to have all traps call trap()
 *  rather than directly vectoring the handler.  However, this avoids a
 *  lot of code duplication and possible bugs.  The only exception is
 *  VectorSYSCALL.
 *  Trap is called with interrupts disabled via interrupt-gates.
 */
void
trap(Ureg* ureg)
{
    int clockintr, vno, user;
    // cache the previous vno to see what might be causing
    // trouble
    vno = ureg->type;
    uint64_t gsbase = rdmsr(GSbase);
    //if (sce > scx) iprint("====================");
    lastvno = vno;
    if (gsbase < 1ULL<<63)
        die("bogus gsbase");
    Proc *up = externup();
    char buf[ERRMAX];
    Vctl *ctl, *v;

    machp()->perf.intrts = perfticks();
    user = userureg(ureg);
    if(user && (machp()->NIX.nixtype == NIXTC)) {
        up->dbgreg = ureg;
        cycles(&up->kentry);
    }

    clockintr = 0;

    //_pmcupdate(machp());

    if(ctl = vctl[vno]) {
        if(ctl->isintr) {
            machp()->intr++;
            if(vno >= VectorPIC && vno != VectorSYSCALL)
                machp()->lastintr = ctl->Vkey.irq;
        } else if(up)
            up->nqtrap++;

        if(ctl->isr) {
            ctl->isr(vno);
            if(islo())print("trap %d: isr %p enabled interrupts\n", vno, ctl->isr);
        }
        for(v = ctl; v != nil; v = v->next) {
            if(v->f) {
                v->f(ureg, v->a);
                if(islo())print("trap %d: ctlf %p enabled interrupts\n", vno, v->f);
            }
        }
        if(ctl->eoi) {
            ctl->eoi(vno);
            if(islo())print("trap %d: eoi %p enabled interrupts\n", vno, ctl->eoi);
        }

        intrtime(vno);
        if(ctl->isintr) {
            if(ctl->Vkey.irq == IrqCLOCK || ctl->Vkey.irq == IrqTIMER)
                clockintr = 1;

            if (ctl->Vkey.irq == IrqTIMER)
                oprof_alarm_handler(ureg);

            if(up && !clockintr)
                preempted();
        }
    }
    else if(vno < nelem(excname) && user) {
        spllo();
        snprint(buf, sizeof buf, "sys: trap: %s", excname[vno]);
        postnote(up, 1, buf, NDebug);
    }
    else if(vno >= VectorPIC && vno != VectorSYSCALL) {
        /*
         * An unknown interrupt.
         * Check for a default IRQ7. This can happen when
         * the IRQ input goes away before the acknowledge.
         * In this case, a 'default IRQ7' is generated, but
         * the corresponding bit in the ISR isn't set.
         * In fact, just ignore all such interrupts.
         */

        /* clear the interrupt */
        i8259isr(vno);

        iprint("cpu%d: spurious interrupt %d, last %d\n",
               machp()->machno, vno, machp()->lastintr);
        intrtime(vno);
        if(user)
            kexit(ureg);
        return;
    }
    else {
        if(vno == VectorNMI) {
            nmienable();
            if(machp()->machno != 0) {
                iprint("cpu%d: PC %#llx\n",
                       machp()->machno, ureg->ip);
                for(;;);
            }
        }
        dumpregs(ureg);
        if(!user) {
            ureg->sp = PTR2UINT(&ureg->sp);
            dumpstackwithureg(ureg);
        }
        if(vno < nelem(excname))
            panic("%s", excname[vno]);
        panic("unknown trap/intr: %d\n", vno);
    }
    splhi();

    /* delaysched set because we held a lock or because our quantum ended */
    if(up && up->delaysched && clockintr) {
        if(0)
            if(user && up->ac == nil && up->nqtrap == 0 && up->nqsyscall == 0) {
                if(!waserror()) {
                    up->ac = getac(up, -1);
                    poperror();
                    runacore();
                    return;
                }
            }
        sched();
        splhi();
    }


    if(user) {
        if(up && up->procctl || up->nnote)
            notify(ureg);
        kexit(ureg);
    }
}
Exemple #11
0
static uint32_t reg_script_read_res(struct reg_script_context *ctx)
{
	struct resource *res;
	uint32_t val = 0;
	const struct reg_script *step = reg_script_get_step(ctx);

	res = reg_script_get_resource(ctx);

	if (res == NULL)
		return val;

	if (res->flags & IORESOURCE_IO) {
		const struct reg_script io_step = {
			.size = step->size,
			.reg = res->base + step->reg,
		};
		reg_script_set_step(ctx, &io_step);
		val = reg_script_read_io(ctx);
	}
	else if (res->flags & IORESOURCE_MEM) {
		const struct reg_script mmio_step = {
			.size = step->size,
			.reg = res->base + step->reg,
		};
		reg_script_set_step(ctx, &mmio_step);
		val = reg_script_read_mmio(ctx);
	}
	reg_script_set_step(ctx, step);
	return val;
}

static void reg_script_write_res(struct reg_script_context *ctx)
{
	struct resource *res;
	const struct reg_script *step = reg_script_get_step(ctx);

	res = reg_script_get_resource(ctx);

	if (res == NULL)
		return;

	if (res->flags & IORESOURCE_IO) {
		const struct reg_script io_step = {
			.size = step->size,
			.reg = res->base + step->reg,
			.value = step->value,
		};
		reg_script_set_step(ctx, &io_step);
		reg_script_write_io(ctx);
	}
	else if (res->flags & IORESOURCE_MEM) {
		const struct reg_script mmio_step = {
			.size = step->size,
			.reg = res->base + step->reg,
			.value = step->value,
		};
		reg_script_set_step(ctx, &mmio_step);
		reg_script_write_mmio(ctx);
	}
	reg_script_set_step(ctx, step);
}

#if CONFIG_SOC_INTEL_BAYTRAIL
static uint32_t reg_script_read_iosf(struct reg_script_context *ctx)
{
	const struct reg_script *step = reg_script_get_step(ctx);

	switch (step->id) {
	case IOSF_PORT_AUNIT:
		return iosf_aunit_read(step->reg);
	case IOSF_PORT_CPU_BUS:
		return iosf_cpu_bus_read(step->reg);
	case IOSF_PORT_BUNIT:
		return iosf_bunit_read(step->reg);
	case IOSF_PORT_DUNIT_CH0:
		return iosf_dunit_ch0_read(step->reg);
	case IOSF_PORT_PMC:
		return iosf_punit_read(step->reg);
	case IOSF_PORT_USBPHY:
		return iosf_usbphy_read(step->reg);
	case IOSF_PORT_SEC:
		return iosf_sec_read(step->reg);
	case IOSF_PORT_0x45:
		return iosf_port45_read(step->reg);
	case IOSF_PORT_0x46:
		return iosf_port46_read(step->reg);
	case IOSF_PORT_0x47:
		return iosf_port47_read(step->reg);
	case IOSF_PORT_SCORE:
		return iosf_score_read(step->reg);
	case IOSF_PORT_0x55:
		return iosf_port55_read(step->reg);
	case IOSF_PORT_0x58:
		return iosf_port58_read(step->reg);
	case IOSF_PORT_0x59:
		return iosf_port59_read(step->reg);
	case IOSF_PORT_0x5a:
		return iosf_port5a_read(step->reg);
	case IOSF_PORT_USHPHY:
		return iosf_ushphy_read(step->reg);
	case IOSF_PORT_SCC:
		return iosf_scc_read(step->reg);
	case IOSF_PORT_LPSS:
		return iosf_lpss_read(step->reg);
	case IOSF_PORT_0xa2:
		return iosf_porta2_read(step->reg);
	case IOSF_PORT_CCU:
		return iosf_ccu_read(step->reg);
	case IOSF_PORT_SSUS:
		return iosf_ssus_read(step->reg);
	default:
		printk(BIOS_DEBUG, "No read support for IOSF port 0x%x.\n",
		       step->id);
		break;
	}
	return 0;
}

static void reg_script_write_iosf(struct reg_script_context *ctx)
{
	const struct reg_script *step = reg_script_get_step(ctx);

	switch (step->id) {
	case IOSF_PORT_AUNIT:
		iosf_aunit_write(step->reg, step->value);
		break;
	case IOSF_PORT_CPU_BUS:
		iosf_cpu_bus_write(step->reg, step->value);
		break;
	case IOSF_PORT_BUNIT:
		iosf_bunit_write(step->reg, step->value);
		break;
	case IOSF_PORT_DUNIT_CH0:
		iosf_dunit_write(step->reg, step->value);
		break;
	case IOSF_PORT_PMC:
		iosf_punit_write(step->reg, step->value);
		break;
	case IOSF_PORT_USBPHY:
		iosf_usbphy_write(step->reg, step->value);
		break;
	case IOSF_PORT_SEC:
		iosf_sec_write(step->reg, step->value);
		break;
	case IOSF_PORT_0x45:
		iosf_port45_write(step->reg, step->value);
		break;
	case IOSF_PORT_0x46:
		iosf_port46_write(step->reg, step->value);
		break;
	case IOSF_PORT_0x47:
		iosf_port47_write(step->reg, step->value);
		break;
	case IOSF_PORT_SCORE:
		iosf_score_write(step->reg, step->value);
		break;
	case IOSF_PORT_0x55:
		iosf_port55_write(step->reg, step->value);
		break;
	case IOSF_PORT_0x58:
		iosf_port58_write(step->reg, step->value);
		break;
	case IOSF_PORT_0x59:
		iosf_port59_write(step->reg, step->value);
		break;
	case IOSF_PORT_0x5a:
		iosf_port5a_write(step->reg, step->value);
		break;
	case IOSF_PORT_USHPHY:
		iosf_ushphy_write(step->reg, step->value);
		break;
	case IOSF_PORT_SCC:
		iosf_scc_write(step->reg, step->value);
		break;
	case IOSF_PORT_LPSS:
		iosf_lpss_write(step->reg, step->value);
		break;
	case IOSF_PORT_0xa2:
		iosf_porta2_write(step->reg, step->value);
		break;
	case IOSF_PORT_CCU:
		iosf_ccu_write(step->reg, step->value);
		break;
	case IOSF_PORT_SSUS:
		iosf_ssus_write(step->reg, step->value);
		break;
	default:
		printk(BIOS_DEBUG, "No write support for IOSF port 0x%x.\n",
		       step->id);
		break;
	}
}
#endif


static uint64_t reg_script_read_msr(struct reg_script_context *ctx)
{
#if CONFIG_ARCH_X86
	const struct reg_script *step = reg_script_get_step(ctx);
	msr_t msr = rdmsr(step->reg);
	uint64_t value = msr.hi;
	value = msr.hi;
	value <<= 32;
	value |= msr.lo;
	return value;
#endif
}

static void reg_script_write_msr(struct reg_script_context *ctx)
{
#if CONFIG_ARCH_X86
	const struct reg_script *step = reg_script_get_step(ctx);
	msr_t msr;
	msr.hi = step->value >> 32;
	msr.lo = step->value & 0xffffffff;
	wrmsr(step->reg, msr);
#endif
}

#ifndef __PRE_RAM__
/* Default routine provided for systems without platform specific busses */
const struct reg_script_bus_entry *__attribute__((weak))
	platform_bus_table(size_t *table_entries)
{
	/* No platform bus type table supplied */
	*table_entries = 0;
	return NULL;
}

/* Locate the structure containing the platform specific bus access routines */
static const struct reg_script_bus_entry
	*find_bus(const struct reg_script *step)
{
	const struct reg_script_bus_entry *bus;
	size_t table_entries;
	size_t i;

	/* Locate the platform specific bus */
	bus = platform_bus_table(&table_entries);
	for (i = 0; i < table_entries; i++) {
		if (bus[i].type == step->type)
			return &bus[i];
	}

	/* Bus not found */
	return NULL;
}
#endif

static uint64_t reg_script_read(struct reg_script_context *ctx)
{
	const struct reg_script *step = reg_script_get_step(ctx);

	switch (step->type) {
	case REG_SCRIPT_TYPE_PCI:
		return reg_script_read_pci(ctx);
	case REG_SCRIPT_TYPE_IO:
		return reg_script_read_io(ctx);
	case REG_SCRIPT_TYPE_MMIO:
		return reg_script_read_mmio(ctx);
	case REG_SCRIPT_TYPE_RES:
		return reg_script_read_res(ctx);
	case REG_SCRIPT_TYPE_MSR:
		return reg_script_read_msr(ctx);
#if CONFIG_SOC_INTEL_BAYTRAIL
	case REG_SCRIPT_TYPE_IOSF:
		return reg_script_read_iosf(ctx);
#endif
	default:
#ifndef __PRE_RAM__
		{
			const struct reg_script_bus_entry *bus;

			/* Read from the platform specific bus */
			bus = find_bus(step);
			if (NULL != bus)
				return bus->reg_script_read(ctx);
		}
#endif
		printk(BIOS_ERR,
			"Unsupported read type (0x%x) for this device!\n",
			step->type);
		break;
	}
	return 0;
}

static void reg_script_write(struct reg_script_context *ctx)
{
	const struct reg_script *step = reg_script_get_step(ctx);

	switch (step->type) {
	case REG_SCRIPT_TYPE_PCI:
		reg_script_write_pci(ctx);
		break;
	case REG_SCRIPT_TYPE_IO:
		reg_script_write_io(ctx);
		break;
	case REG_SCRIPT_TYPE_MMIO:
		reg_script_write_mmio(ctx);
		break;
	case REG_SCRIPT_TYPE_RES:
		reg_script_write_res(ctx);
		break;
	case REG_SCRIPT_TYPE_MSR:
		reg_script_write_msr(ctx);
		break;
#if CONFIG_SOC_INTEL_BAYTRAIL
	case REG_SCRIPT_TYPE_IOSF:
		reg_script_write_iosf(ctx);
		break;
#endif
	default:
#ifndef __PRE_RAM__
		{
			const struct reg_script_bus_entry *bus;

			/* Write to the platform specific bus */
			bus = find_bus(step);
			if (NULL != bus) {
				bus->reg_script_write(ctx);
				return;
			}
		}
#endif
		printk(BIOS_ERR,
			"Unsupported write type (0x%x) for this device!\n",
			step->type);
		break;
	}
}

static void reg_script_rmw(struct reg_script_context *ctx)
{
	uint64_t value;
	const struct reg_script *step = reg_script_get_step(ctx);
	struct reg_script write_step = *step;

	value = reg_script_read(ctx);
	value &= step->mask;
	value |= step->value;
	write_step.value = value;
	reg_script_set_step(ctx, &write_step);
	reg_script_write(ctx);
	reg_script_set_step(ctx, step);
}
Exemple #12
0
static void p4_setup_ctrs(struct op_msrs const * const msrs)
{
	unsigned int i;
	unsigned int low, high;
	unsigned int addr;
	unsigned int stag;

	stag = get_stagger();

	rdmsr(MSR_IA32_MISC_ENABLE, low, high);
	if (! MISC_PMC_ENABLED_P(low)) {
		printk(KERN_ERR "oprofile: P4 PMC not available\n");
		return;
	}

	/* clear the cccrs we will use */
	for (i = 0 ; i < num_counters ; i++) {
		rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
		CCCR_CLEAR(low);
		CCCR_SET_REQUIRED_BITS(low);
		wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
	}

	/* clear cccrs outside our concern */
	for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) {
		rdmsr(p4_unused_cccr[i], low, high);
		CCCR_CLEAR(low);
		CCCR_SET_REQUIRED_BITS(low);
		wrmsr(p4_unused_cccr[i], low, high);
	}

	/* clear all escrs (including those outside our concern) */
	for (addr = MSR_P4_BSU_ESCR0 + stag;
	     addr <= MSR_P4_SSU_ESCR0; addr += addr_increment()) { 
		wrmsr(addr, 0, 0);
	}
	
	for (addr = MSR_P4_MS_ESCR0 + stag;
	     addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){ 
		wrmsr(addr, 0, 0);
	}
	
	for (addr = MSR_P4_IX_ESCR0 + stag;
	     addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){ 
		wrmsr(addr, 0, 0);
	}

	if (num_counters == NUM_COUNTERS_NON_HT) {		
		wrmsr(MSR_P4_CRU_ESCR4, 0, 0);
		wrmsr(MSR_P4_CRU_ESCR5, 0, 0);
	} else if (stag == 0) {
		wrmsr(MSR_P4_CRU_ESCR4, 0, 0);
	} else {
		wrmsr(MSR_P4_CRU_ESCR5, 0, 0);
	}		
	
	/* setup all counters */
	for (i = 0 ; i < num_counters ; ++i) {
		if (counter_config[i].event) {
			reset_value[i] = counter_config[i].count;
			pmc_setup_one_p4_counter(i);
			CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
		} else {
			reset_value[i] = 0;
		}
	}
}
Exemple #13
0
static uint64_t
getctr(uint32_t regno)
{
	return rdmsr(regno + PerfCtrbase);
}
Exemple #14
0
void
initializecpu(void)
{

	switch (cpu) {
#ifdef I486_CPU
	case CPU_BLUE:
		init_bluelightning();
		break;
	case CPU_486DLC:
		init_486dlc();
		break;
	case CPU_CY486DX:
		init_cy486dx();
		break;
	case CPU_M1SC:
		init_5x86();
		break;
#ifdef CPU_I486_ON_386
	case CPU_486:
		init_i486_on_386();
		break;
#endif
	case CPU_M1:
		init_6x86();
		break;
#endif /* I486_CPU */
#ifdef I586_CPU
	case CPU_586:
		switch (cpu_vendor_id) {
		case CPU_VENDOR_CENTAUR:
			init_winchip();
			break;
		case CPU_VENDOR_TRANSMETA:
			init_transmeta();
			break;
		}
		break;
#endif
#ifdef I686_CPU
	case CPU_M2:
		init_6x86MX();
		break;
	case CPU_686:
		switch (cpu_vendor_id) {
		case CPU_VENDOR_INTEL:
			switch (cpu_id & 0xff0) {
			case 0x610:
				init_ppro();
				break;
			case 0x660:
				init_mendocino();
				break;
			}
			break;
#ifdef CPU_ATHLON_SSE_HACK
		case CPU_VENDOR_AMD:
			/*
			 * Sometimes the BIOS doesn't enable SSE instructions.
			 * According to AMD document 20734, the mobile
			 * Duron, the (mobile) Athlon 4 and the Athlon MP
			 * support SSE. These correspond to cpu_id 0x66X
			 * or 0x67X.
			 */
			if ((cpu_feature & CPUID_XMM) == 0 &&
			    ((cpu_id & ~0xf) == 0x660 ||
			     (cpu_id & ~0xf) == 0x670 ||
			     (cpu_id & ~0xf) == 0x680)) {
				u_int regs[4];
				wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
				do_cpuid(1, regs);
				cpu_feature = regs[3];
			}
			break;
#endif
		case CPU_VENDOR_CENTAUR:
			init_via();
			break;
		case CPU_VENDOR_TRANSMETA:
			init_transmeta();
			break;
		}
#ifdef PAE
		if ((amd_feature & AMDID_NX) != 0) {
			uint64_t msr;

			msr = rdmsr(MSR_EFER) | EFER_NXE;
			wrmsr(MSR_EFER, msr);
			pg_nx = PG_NX;
			elf32_nxstack = 1;
		}
#endif
		break;
#endif
	default:
		break;
	}
	enable_sse();

	/*
	 * CPUID with %eax = 1, %ebx returns
	 * Bits 15-8: CLFLUSH line size
	 * 	(Value * 8 = cache line size in bytes)
	 */
	if ((cpu_feature & CPUID_CLFSH) != 0)
		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
	/*
	 * XXXKIB: (temporary) hack to work around traps generated
	 * when CLFLUSHing APIC register window under virtualization
	 * environments.  These environments tend to disable the
	 * CPUID_SS feature even though the native CPU supports it.
	 */
	TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
	if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1)
		cpu_feature &= ~CPUID_CLFSH;
	/*
	 * Allow to disable CLFLUSH feature manually by
	 * hw.clflush_disable tunable.
	 */
	if (hw_clflush_disable == 1)
		cpu_feature &= ~CPUID_CLFSH;

#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
	/*
	 * OS should flush L1 cache by itself because no PC-98 supports
	 * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
	 * when need_pre_dma_flush = 1, use invd instruction after DMA
	 * transfer when need_post_dma_flush = 1.  If your CPU upgrade
	 * product supports hardware cache control, you can add the
	 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
	 * This option eliminates unneeded cache flush instruction(s).
	 */
	if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
		switch (cpu) {
#ifdef I486_CPU
		case CPU_486DLC:
			need_post_dma_flush = 1;
			break;
		case CPU_M1SC:
			need_pre_dma_flush = 1;
			break;
		case CPU_CY486DX:
			need_pre_dma_flush = 1;
#ifdef CPU_I486_ON_386
			need_post_dma_flush = 1;
#endif
			break;
#endif
		default:
			break;
		}
	} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
		switch (cpu_id & 0xFF0) {
		case 0x470:		/* Enhanced Am486DX2 WB */
		case 0x490:		/* Enhanced Am486DX4 WB */
		case 0x4F0:		/* Am5x86 WB */
			need_pre_dma_flush = 1;
			break;
		}
	} else if (cpu_vendor_id == CPU_VENDOR_IBM) {
		need_post_dma_flush = 1;
	} else {
#ifdef CPU_I486_ON_386
		need_pre_dma_flush = 1;
#endif
	}
#endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
}
Exemple #15
0
static int
pn_decode_acpi(device_t dev, device_t perf_dev)
{
	int i, j, n;
	uint64_t status;
	uint32_t ctrl;
	u_int cpuid;
	u_int regs[4];
	struct pn_softc *sc;
	struct powernow_state state;
	struct cf_setting sets[POWERNOW_MAX_STATES];
	int count = POWERNOW_MAX_STATES;
	int type;
	int rv;

	if (perf_dev == NULL)
		return (ENXIO);

	rv = CPUFREQ_DRV_SETTINGS(perf_dev, sets, &count);
	if (rv)
		return (ENXIO);
	rv = CPUFREQ_DRV_TYPE(perf_dev, &type);
	if (rv || (type & CPUFREQ_FLAG_INFO_ONLY) == 0)
		return (ENXIO);

	sc = device_get_softc(dev);

	do_cpuid(0x80000001, regs);
	cpuid = regs[0];
	if ((cpuid & 0xfff) == 0x760)
		sc->errata |= A0_ERRATA;

	ctrl = 0;
	sc->sgtc = 0;
	for (n = 0, i = 0; i < count; ++i) {
		ctrl = sets[i].spec[PX_SPEC_CONTROL];
		switch (sc->pn_type) {
		case PN7_TYPE:
			state.fid = ACPI_PN7_CTRL_TO_FID(ctrl);
			state.vid = ACPI_PN7_CTRL_TO_VID(ctrl);
			if ((sc->errata & A0_ERRATA) &&
			    (pn7_fid_to_mult[state.fid] % 10) == 5)
				continue;
			break;
		case PN8_TYPE:
			state.fid = ACPI_PN8_CTRL_TO_FID(ctrl);
			state.vid = ACPI_PN8_CTRL_TO_VID(ctrl);
			break;
		}
		state.freq = sets[i].freq * 1000;
		state.power = sets[i].power;

		j = n;
		while (j > 0 && sc->powernow_states[j - 1].freq < state.freq) {
			memcpy(&sc->powernow_states[j],
			    &sc->powernow_states[j - 1],
			    sizeof(struct powernow_state));
			--j;
		}
		memcpy(&sc->powernow_states[j], &state,
		    sizeof(struct powernow_state));
		++n;
	}

	sc->powernow_max_states = n;
	state = sc->powernow_states[0];
	status = rdmsr(MSR_AMDK7_FIDVID_STATUS);

	switch (sc->pn_type) {
	case PN7_TYPE:
		sc->sgtc = ACPI_PN7_CTRL_TO_SGTC(ctrl);
		/*
		 * XXX Some bios forget the max frequency!
		 * This maybe indicates we have the wrong tables.  Therefore,
		 * don't implement a quirk, but fallback to BIOS legacy
		 * tables instead.
		 */
		if (PN7_STA_MFID(status) != state.fid) {
			device_printf(dev, "ACPI MAX frequency not found\n");
			return (EINVAL);
		}
		sc->fsb = state.freq / 100 / pn7_fid_to_mult[state.fid];
		break;
	case PN8_TYPE:
		sc->vst = ACPI_PN8_CTRL_TO_VST(ctrl),
		sc->mvs = ACPI_PN8_CTRL_TO_MVS(ctrl),
		sc->pll = ACPI_PN8_CTRL_TO_PLL(ctrl),
		sc->rvo = ACPI_PN8_CTRL_TO_RVO(ctrl),
		sc->irt = ACPI_PN8_CTRL_TO_IRT(ctrl);
		sc->low = 0; /* XXX */

		/*
		 * powernow k8 supports only one low frequency.
		 */
		if (sc->powernow_max_states >= 2 &&
		    (sc->powernow_states[sc->powernow_max_states - 2].fid < 8))
			return (EINVAL);
		sc->fsb = state.freq / 100 / pn8_fid_to_mult[state.fid];
		break;
	}

	return (0);
}
Exemple #16
0
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{

	struct sys_info *sysinfo = (struct sys_info *)(CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE);
	u32 bsp_apicid = 0;
	u32 val;
	msr_t msr;

	if (!cpu_init_detectedx && boot_cpu()) {
		/* Nothing special needs to be done to find bus 0 */
		/* Allow the HT devices to be found */
		/* mov bsp to bus 0xff when > 8 nodes */
		set_bsp_node_CHtExtNodeCfgEn();
		enumerate_ht_chain();

		/* Setup the rom access for 4M */
		amd8111_enable_rom();
	}

	post_code(0x30);

	if (bist == 0) {
		bsp_apicid = init_cpus(cpu_init_detectedx, sysinfo); /* mmconf is inited in init_cpus */
		/* All cores run this but the BSP(node0,core0) is the only core that returns. */
	}

	post_code(0x32);

	w83627hf_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);
	uart_init();
	console_init();
	printk(BIOS_DEBUG, "\n");

//	dump_mem(CONFIG_DCACHE_RAM_BASE+CONFIG_DCACHE_RAM_SIZE-0x200, CONFIG_DCACHE_RAM_BASE+CONFIG_DCACHE_RAM_SIZE);

	/* Halt if there was a built in self test failure */
	report_bist_failure(bist);

	// Load MPB
	val = cpuid_eax(1);
	printk(BIOS_DEBUG, "BSP Family_Model: %08x \n", val);
	printk(BIOS_DEBUG, "*sysinfo range: [%p,%p]\n",sysinfo,sysinfo+1);
	printk(BIOS_DEBUG, "bsp_apicid = %02x \n", bsp_apicid);
	printk(BIOS_DEBUG, "cpu_init_detectedx = %08lx \n", cpu_init_detectedx);

	/* Setup sysinfo defaults */
	set_sysinfo_in_ram(0);

	update_microcode(val);
	post_code(0x33);

	cpuSetAMDMSR();
	post_code(0x34);

	amd_ht_init(sysinfo);
	post_code(0x35);

	/* Setup nodes PCI space and start core 0 AP init. */
	finalize_node_setup(sysinfo);

	/* Setup any mainboard PCI settings etc. */
	setup_mb_resource_map();
	post_code(0x36);

	/* wait for all the APs core0 started by finalize_node_setup. */
	/* FIXME: A bunch of cores are going to start output to serial at once.
	   It would be nice to fixup prink spinlocks for ROM XIP mode.
	   I think it could be done by putting the spinlock flag in the cache
	   of the BSP located right after sysinfo.
	 */
	wait_all_core0_started();

 #if CONFIG_LOGICAL_CPUS==1
	/* Core0 on each node is configured. Now setup any additional cores. */
	printk(BIOS_DEBUG, "start_other_cores()\n");
	start_other_cores();
	post_code(0x37);
	wait_all_other_cores_started(bsp_apicid);
 #endif

	post_code(0x38);

 #if SET_FIDVID == 1
	msr = rdmsr(0xc0010071);
	printk(BIOS_DEBUG, "\nBegin FIDVID MSR 0xc0010071 0x%08x 0x%08x \n", msr.hi, msr.lo);

	/* FIXME: The sb fid change may survive the warm reset and only
	   need to be done once.*/
	enable_fid_change_on_sb(sysinfo->sbbusn, sysinfo->sbdn);

	post_code(0x39);

	if (!warm_reset_detect(0)) {			// BSP is node 0
		init_fidvid_bsp(bsp_apicid, sysinfo->nodes);
	} else {
		init_fidvid_stage2(bsp_apicid, 0);	// BSP is node 0
	}

	post_code(0x3A);

	/* show final fid and vid */
	msr=rdmsr(0xc0010071);
	printk(BIOS_DEBUG, "End FIDVIDMSR 0xc0010071 0x%08x 0x%08x \n", msr.hi, msr.lo);
 #endif

	/* Reset for HT, FIDVID, PLL and errata changes to take affect. */
	if (!warm_reset_detect(0)) {
		print_info("...WARM RESET...\n\n\n");
		soft_reset_x(sysinfo->sbbusn, sysinfo->sbdn);
		die("After soft_reset_x - shouldn't see this message!!!\n");
	}

	post_code(0x3B);

	/* FIXME:  Move this to chipset init.
	enable cf9 for hard reset */
	print_debug("enable_cf9_x()\n");
	enable_cf9_x(sysinfo->sbbusn, sysinfo->sbdn);
	post_code(0x3C);

	/* It's the time to set ctrl in sysinfo now; */
	printk(BIOS_DEBUG, "fill_mem_ctrl()\n");
	fill_mem_ctrl(sysinfo->nodes, sysinfo->ctrl, spd_addr);
	post_code(0x3D);

	printk(BIOS_DEBUG, "enable_smbus()\n");
	enable_smbus();
	post_code(0x3E);

	memreset_setup();
	post_code(0x40);

//	die("Die Before MCT init.");

	printk(BIOS_DEBUG, "raminit_amdmct()\n");
	raminit_amdmct(sysinfo);
	post_code(0x41);

/*
	dump_pci_device_range(PCI_DEV(0, 0x18, 0), 0, 0x200);
	dump_pci_device_range(PCI_DEV(0, 0x18, 1), 0, 0x200);
	dump_pci_device_range(PCI_DEV(0, 0x18, 2), 0, 0x200);
	dump_pci_device_range(PCI_DEV(0, 0x18, 3), 0, 0x200);
*/

//	ram_check(0x00200000, 0x00200000 + (640 * 1024));
//	ram_check(0x40200000, 0x40200000 + (640 * 1024));

//	die("After MCT init before CAR disabled.");

	post_code(0x42);
	printk(BIOS_DEBUG, "\n*** Yes, the copy/decompress is taking a while, FIXME!\n");
	post_cache_as_ram();	// BSP switch stack to ram, copy then execute LB.
	post_code(0x43);	// Should never see this post code.

}
Exemple #17
0
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
	static const u16 spd_addr[] = { DIMM0, 0, 0, 0, DIMM1, 0, 0, 0, };
	int needs_reset = 0;
	u32 bsp_apicid = 0;
	msr_t msr;
	struct cpuid_result cpuid1;
	struct sys_info *sysinfo = &sysinfo_car;

	if (!cpu_init_detectedx && boot_cpu()) {
		/* Nothing special needs to be done to find bus 0 */
		/* Allow the HT devices to be found */
		enumerate_ht_chain();
		/* sb600_lpc_port80(); */
		sb600_pci_port80();
	}

	if (bist == 0)
		bsp_apicid = init_cpus(cpu_init_detectedx, sysinfo);

	enable_rs690_dev8();
	sb600_lpc_init();

	ite_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);
	it8712f_kill_watchdog();

	console_init();

	/* Halt if there was a built in self test failure */
	report_bist_failure(bist);
	printk(BIOS_DEBUG, "bsp_apicid=0x%x\n", bsp_apicid);

	setup_tim8690_resource_map();

	setup_coherent_ht_domain();

#if CONFIG_LOGICAL_CPUS
	/* It is said that we should start core1 after all core0 launched */
	wait_all_core0_started();
	start_other_cores();
#endif
	wait_all_aps_started(bsp_apicid);

	ht_setup_chains_x(sysinfo);

	/* run _early_setup before soft-reset. */
	rs690_early_setup();
	sb600_early_setup();

	/* Check to see if processor is capable of changing FIDVID  */
	/* otherwise it will throw a GP# when reading FIDVID_STATUS */
	cpuid1 = cpuid(0x80000007);
	if ((cpuid1.edx & 0x6) == 0x6 ) {
		/* Read FIDVID_STATUS */
		msr=rdmsr(0xc0010042);
		printk(BIOS_DEBUG, "begin msr fid, vid: hi=0x%x, lo=0x%x\n", msr.hi, msr.lo);

		enable_fid_change();
		enable_fid_change_on_sb(sysinfo->sbbusn, sysinfo->sbdn);
		init_fidvid_bsp(bsp_apicid);

		/* show final fid and vid */
		msr=rdmsr(0xc0010042);
		printk(BIOS_DEBUG, "end msr fid, vid: hi=0x%x, lo=0x%x\n", msr.hi, msr.lo);
	} else {
		printk(BIOS_DEBUG, "Changing FIDVID not supported\n");
	}

	needs_reset = optimize_link_coherent_ht();
	needs_reset |= optimize_link_incoherent_ht(sysinfo);
	rs690_htinit();
	printk(BIOS_DEBUG, "needs_reset=0x%x\n", needs_reset);

	if (needs_reset) {
		print_info("ht reset -\n");
		soft_reset();
	}

	allow_all_aps_stop(bsp_apicid);

	/* It's the time to set ctrl now; */
	printk(BIOS_DEBUG, "sysinfo->nodes: %2x  sysinfo->ctrl: %p  spd_addr: %p\n",
		     sysinfo->nodes, sysinfo->ctrl, spd_addr);
	fill_mem_ctrl(sysinfo->nodes, sysinfo->ctrl, spd_addr);
	sdram_initialize(sysinfo->nodes, sysinfo->ctrl, sysinfo);

	rs690_before_pci_init();
	sb600_before_pci_init();

	post_cache_as_ram();
}
Exemple #18
0
void
glxpcib_attach(struct device *parent, struct device *self, void *aux)
{
	struct glxpcib_softc *sc = (struct glxpcib_softc *)self;
	struct timecounter *tc = &sc->sc_timecounter;
#ifndef SMALL_KERNEL
	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
	u_int64_t wa;
#if NGPIO > 0
	u_int64_t ga;
	struct gpiobus_attach_args gba;
	int i, gpio = 0;
#endif
	u_int64_t sa;
	struct i2cbus_attach_args iba;
	int i2c = 0;
	bus_space_handle_t tmpioh;
#endif
	tc->tc_get_timecount = glxpcib_get_timecount;
	tc->tc_counter_mask = 0xffffffff;
	tc->tc_frequency = 3579545;
	tc->tc_name = "CS5536";
	tc->tc_quality = 1000;
	tc->tc_priv = sc;
	tc_init(tc);

	printf(": rev %d, 32-bit %lluHz timer",
	    (int)rdmsr(AMD5536_REV) & AMD5536_REV_MASK,
	    tc->tc_frequency);

#ifndef SMALL_KERNEL
	/* Attach the watchdog timer */
	sc->sc_iot = pa->pa_iot;
	wa = rdmsr(MSR_LBAR_MFGPT);
	if (wa & MSR_LBAR_ENABLE &&
	    !bus_space_map(sc->sc_iot, wa & MSR_MFGPT_ADDR_MASK,
	    MSR_MFGPT_SIZE, 0, &sc->sc_ioh)) {
		/* count in seconds (as upper level desires) */
		bus_space_write_2(sc->sc_iot, sc->sc_ioh, AMD5536_MFGPT0_SETUP,
		    AMD5536_MFGPT_CNT_EN | AMD5536_MFGPT_CMP2EV |
		    AMD5536_MFGPT_CMP2 | AMD5536_MFGPT_DIV_MASK |
		    AMD5536_MFGPT_STOP_EN);
		wdog_register(glxpcib_wdogctl_cb, sc);
		sc->sc_wdog = 1;
		printf(", watchdog");
	}

#if NGPIO > 0
	/* map GPIO I/O space */
	sc->sc_gpio_iot = pa->pa_iot;
	ga = rdmsr(MSR_LBAR_GPIO);
	if (ga & MSR_LBAR_ENABLE &&
	    !bus_space_map(sc->sc_gpio_iot, ga & MSR_GPIO_ADDR_MASK,
	    MSR_GPIO_SIZE, 0, &sc->sc_gpio_ioh)) {
		printf(", gpio");

		/* initialize pin array */
		for (i = 0; i < AMD5536_GPIO_NPINS; i++) {
			sc->sc_gpio_pins[i].pin_num = i;
			sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT |
			    GPIO_PIN_OUTPUT | GPIO_PIN_OPENDRAIN |
			    GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN |
			    GPIO_PIN_INVIN | GPIO_PIN_INVOUT;

			/* read initial state */
			sc->sc_gpio_pins[i].pin_state =
			    glxpcib_gpio_pin_read(sc, i);
		}

		/* create controller tag */
		sc->sc_gpio_gc.gp_cookie = sc;
		sc->sc_gpio_gc.gp_pin_read = glxpcib_gpio_pin_read;
		sc->sc_gpio_gc.gp_pin_write = glxpcib_gpio_pin_write;
		sc->sc_gpio_gc.gp_pin_ctl = glxpcib_gpio_pin_ctl;

		gba.gba_name = "gpio";
		gba.gba_gc = &sc->sc_gpio_gc;
		gba.gba_pins = sc->sc_gpio_pins;
		gba.gba_npins = AMD5536_GPIO_NPINS;
		gpio = 1;

	}
#endif /* NGPIO */

	/* Map SMB I/O space */
	sc->sc_smb_iot = pa->pa_iot;
	sa = rdmsr(MSR_LBAR_SMB);
	if (sa & MSR_LBAR_ENABLE &&
	    !bus_space_map(sc->sc_smb_iot, sa & MSR_SMB_ADDR_MASK,
	    MSR_SMB_SIZE, 0, &sc->sc_smb_ioh)) {
		printf(", i2c");

		/* Enable controller */
		bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
		    AMD5536_SMB_CTL2, AMD5536_SMB_CTL2_EN |
		    AMD5536_SMB_CTL2_FREQ);

		/* Disable interrupts */
		bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
		    AMD5536_SMB_CTL1, 0);

		/* Disable slave address */
		bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
		    AMD5536_SMB_ADDR, 0);

		/* Stall the bus after start */
		bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
		    AMD5536_SMB_CTL1, AMD5536_SMB_CTL1_STASTRE);

		/* Attach I2C framework */
		sc->sc_smb_ic.ic_cookie = sc;
		sc->sc_smb_ic.ic_acquire_bus = glxpcib_smb_acquire_bus;
		sc->sc_smb_ic.ic_release_bus = glxpcib_smb_release_bus;
		sc->sc_smb_ic.ic_send_start = glxpcib_smb_send_start;
		sc->sc_smb_ic.ic_send_stop = glxpcib_smb_send_stop;
		sc->sc_smb_ic.ic_initiate_xfer = glxpcib_smb_initiate_xfer;
		sc->sc_smb_ic.ic_read_byte = glxpcib_smb_read_byte;
		sc->sc_smb_ic.ic_write_byte = glxpcib_smb_write_byte;

		rw_init(&sc->sc_smb_lck, "iiclk");

		bzero(&iba, sizeof(iba));
		iba.iba_name = "iic";
		iba.iba_tag = &sc->sc_smb_ic;
		i2c = 1;
	}

	/* Map PMS I/O space and enable the ``Power Immediate'' feature */
	sa = rdmsr(MSR_LBAR_PMS);
	if (sa & MSR_LBAR_ENABLE &&
	    !bus_space_map(pa->pa_iot, sa & MSR_PMS_ADDR_MASK,
	    MSR_PMS_SIZE, 0, &tmpioh)) {
		bus_space_write_4(pa->pa_iot, tmpioh, AMD5536_PMS_SSC,
		    AMD5536_PMS_SSC_SET_PI);
		bus_space_barrier(pa->pa_iot, tmpioh, AMD5536_PMS_SSC, 4,
		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
		bus_space_unmap(pa->pa_iot, tmpioh, MSR_PMS_SIZE);
	}
#endif /* SMALL_KERNEL */
	pcibattach(parent, self, aux);

#ifndef SMALL_KERNEL
#if NGPIO > 0
	if (gpio)
		config_found(&sc->sc_dev, &gba, gpiobus_print);
#endif
	if (i2c)
		config_found(&sc->sc_dev, &iba, iicbus_print);

	config_search(glxpcib_search, self, pa);
#endif
}
Exemple #19
0
static int __init detect_init_APIC (void)
{
	u32 h, l, features;

	/* Disabled by kernel option? */
	if (enable_local_apic < 0)
		return -1;

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
		    (boot_cpu_data.x86 == 15))	    
			break;
		goto no_apic;
	case X86_VENDOR_INTEL:
		if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
		    (boot_cpu_data.x86 == 5 && cpu_has_apic))
			break;
		goto no_apic;
	default:
		goto no_apic;
	}

	if (!cpu_has_apic) {
		/*
		 * Over-ride BIOS and try to enable the local
		 * APIC only if "lapic" specified.
		 */
		if (enable_local_apic <= 0) {
			printk("Local APIC disabled by BIOS -- "
			       "you can enable it with \"lapic\"\n");
			return -1;
		}
		/*
		 * Some BIOSes disable the local APIC in the
		 * APIC_BASE MSR. This can only be done in
		 * software for Intel P6 or later and AMD K7
		 * (Model > 1) or later.
		 */
		rdmsr(MSR_IA32_APICBASE, l, h);
		if (!(l & MSR_IA32_APICBASE_ENABLE)) {
			printk("Local APIC disabled by BIOS -- reenabling.\n");
			l &= ~MSR_IA32_APICBASE_BASE;
			l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
			wrmsr(MSR_IA32_APICBASE, l, h);
			enabled_via_apicbase = 1;
		}
	}
	/*
	 * The APIC feature bit should now be enabled
	 * in `cpuid'
	 */
	features = cpuid_edx(1);
	if (!(features & (1 << X86_FEATURE_APIC))) {
		printk("Could not enable APIC!\n");
		return -1;
	}
	set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;

	/* The BIOS may have set up the APIC at some other address */
	rdmsr(MSR_IA32_APICBASE, l, h);
	if (l & MSR_IA32_APICBASE_ENABLE)
		mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;

	if (nmi_watchdog != NMI_NONE)
		nmi_watchdog = NMI_LOCAL_APIC;

	printk("Found and enabled local APIC!\n");

	apic_pm_activate();

	return 0;

no_apic:
	printk("No local APIC present or hardware disabled\n");
	return -1;
}
Exemple #20
0
u_int
glxpcib_get_timecount(struct timecounter *tc)
{
        return rdmsr(AMD5536_TMC);
}
Exemple #21
0
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
	static const uint16_t spd_addr [] = {
		//first node
		RC0|DIMM0, RC0|DIMM2, 0, 0,
		RC0|DIMM1, RC0|DIMM3, 0, 0,
#if CONFIG_MAX_PHYSICAL_CPUS > 1
		//second node
		RC1|DIMM0, RC1|DIMM2, 0, 0,
		RC1|DIMM1, RC1|DIMM3, 0, 0,
#endif
	};
	struct sys_info *sysinfo = &sysinfo_car;

	int needs_reset = 0;
	unsigned bsp_apicid = 0;

	if (bist == 0)
		bsp_apicid = init_cpus(cpu_init_detectedx,sysinfo);

	winbond_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);
	console_init();

	/* Halt if there was a built in self test failure */
	report_bist_failure(bist);

	printk(BIOS_DEBUG, "*sysinfo range: [%p,%p]\n",sysinfo,sysinfo+1);

	setup_dl145g1_resource_map();
	//setup_default_resource_map();

	setup_coherent_ht_domain();
	wait_all_core0_started();
#if CONFIG_LOGICAL_CPUS
	// It is said that we should start core1 after all core0 launched
	start_other_cores();
	wait_all_other_cores_started(bsp_apicid);
#endif

	ht_setup_chains_x(sysinfo);
#if CONFIG_SET_FIDVID
	/* Check to see if processor is capable of changing FIDVID */
	/* otherwise it will throw a GP# when reading FIDVID_STATUS */
	struct cpuid_result cpuid1 = cpuid(0x80000007);
	if ((cpuid1.edx & 0x6) == 0x6) {
		{
		/* Read FIDVID_STATUS */
			msr_t msr;
			msr = rdmsr(0xc0010042);
			printk(BIOS_DEBUG, "begin msr fid, vid %08x%08x\n", msr.hi, msr.lo);
		}

		enable_fid_change();
		enable_fid_change_on_sb(sysinfo->sbbusn, sysinfo->sbdn);
		init_fidvid_bsp(bsp_apicid);

		// show final fid and vid
		{
			msr_t msr;
			msr = rdmsr(0xc0010042);
			printk(BIOS_DEBUG, "end msr fid, vid %08x%08x\n", msr.hi, msr.lo);
		}

	} else {
		printk(BIOS_DEBUG, "Changing FIDVID not supported\n");
	}
#endif

	needs_reset |= optimize_link_coherent_ht();
	needs_reset |= optimize_link_incoherent_ht(sysinfo);

	if (needs_reset) {
		printk(BIOS_INFO, "ht reset -\n");
		soft_reset_x(sysinfo->sbbusn, sysinfo->sbdn);
	}

	enable_smbus();

	int i;
	for(i = 0; i < 2; i++) {
		activate_spd_rom(&sysinfo->ctrl[i]);
	}
	for(i = RC0; i <= RC1; i<<=1) {
		change_i2c_mux(i);
	}

	//dump_spd_registers(&sysinfo->ctrl[0]);
	//dump_spd_registers(&sysinfo->ctrl[1]);
	//dump_smbus_registers();

	allow_all_aps_stop(bsp_apicid);

	//It's the time to set ctrl now;
	fill_mem_ctrl(sysinfo->nodes, sysinfo->ctrl, spd_addr);

	memreset_setup();
#if CONFIG_SET_FIDVID
	init_timer(); // Need to use TMICT to synchronize FID/VID
#endif
	sdram_initialize(sysinfo->nodes, sysinfo->ctrl, sysinfo);

	//dump_pci_devices();

	post_cache_as_ram();
}
Exemple #22
0
static u32 find_fid_from_pstate(u32 pstate)
{
	u32 hi, lo;
	rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi);
	return lo & HW_PSTATE_FID_MASK;
}
Exemple #23
0
static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
{
	unsigned long lo, hi;

#ifdef CONFIG_X86_F00F_BUG____NEVER_FOR_L4
	/*
	 * All current models of Pentium and Pentium with MMX technology CPUs
	 * have the F0 0F bug, which lets nonprivileged users lock up the
	 * system.
	 * Note that the workaround only should be initialized once...
	 */
	c->f00f_bug = 0;
	if (!paravirt_enabled() && c->x86 == 5) {
		static int f00f_workaround_enabled;

		c->f00f_bug = 1;
		if (!f00f_workaround_enabled) {
			trap_init_f00f_bug();
			printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
			f00f_workaround_enabled = 1;
		}
	}
#endif

	/*
	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
	 * model 3 mask 3
	 */
	if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
		clear_cpu_cap(c, X86_FEATURE_SEP);

	/*
	 * P4 Xeon errata 037 workaround.
	 * Hardware prefetcher may cause stale data to be loaded into the cache.
	 */
	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
		rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
		if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
			printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
			printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
			lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
			wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
		}
	}

	/*
	 * See if we have a good local APIC by checking for buggy Pentia,
	 * i.e. all B steppings and the C2 stepping of P54C when using their
	 * integrated APIC (see 11AP erratum in "Pentium Processor
	 * Specification Update").
	 */
	if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
	    (c->x86_mask < 0x6 || c->x86_mask == 0xb))
		set_cpu_cap(c, X86_FEATURE_11AP);


#ifdef CONFIG_X86_INTEL_USERCOPY
	/*
	 * Set up the preferred alignment for movsl bulk memory moves
	 */
	switch (c->x86) {
	case 4:		/* 486: untested */
		break;
	case 5:		/* Old Pentia: untested */
		break;
	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
		movsl_mask.mask = 7;
		break;
	case 15:	/* P4 is OK down to 8-byte alignment */
		movsl_mask.mask = 7;
		break;
	}
#endif

#ifdef CONFIG_X86_NUMAQ
	numaq_tsc_disable();
#endif

	intel_smp_check(c);
}
Exemple #24
0
static u32 find_did_from_pstate(u32 pstate)
{
	u32 hi, lo;
	rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi);
	return (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
}
Exemple #25
0
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
	static const uint16_t spd_addr[] = {
		// first node
		DIMM0, DIMM2, 0, 0,
		DIMM1, DIMM3, 0, 0,
		// second node
		DIMM4, DIMM6, 0, 0,
		DIMM5, DIMM7, 0, 0,
	};

	struct sys_info *sysinfo = &sysinfo_car;
	int needs_reset;
	unsigned bsp_apicid = 0;

	if (!cpu_init_detectedx && boot_cpu()) {
		/* Nothing special needs to be done to find bus 0 */
		/* Allow the HT devices to be found */
		enumerate_ht_chain();
		bcm5785_enable_lpc();
		pc87417_enable_dev(RTC_DEV); /* Enable RTC */
	}

	if (bist == 0)
		bsp_apicid = init_cpus(cpu_init_detectedx, sysinfo);

	pilot_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);

	console_init();

	/* Halt if there was a built in self test failure */
	report_bist_failure(bist);

//	setup_early_ipmi_serial();
	pilot_early_init(SERIAL_DEV); //config port is being taken from SERIAL_DEV
	printk(BIOS_DEBUG, "*sysinfo range: [%p,%p]\n",sysinfo,sysinfo+1);
	printk(BIOS_DEBUG, "bsp_apicid=%02x\n", bsp_apicid);

#if CONFIG_MEM_TRAIN_SEQ == 1
	set_sysinfo_in_ram(0); // in BSP so could hold all ap until sysinfo is in ram
#endif
	setup_coherent_ht_domain();

	wait_all_core0_started();
#if CONFIG_LOGICAL_CPUS
	// It is said that we should start core1 after all core0 launched
	/* becase optimize_link_coherent_ht is moved out from setup_coherent_ht_domain,
	 * So here need to make sure last core0 is started, esp for two way system,
	 * (there may be apic id conflicts in that case)
	*/
	start_other_cores();
	wait_all_other_cores_started(bsp_apicid);
#endif

	/* it will set up chains and store link pair for optimization later */
	ht_setup_chains_x(sysinfo); // it will init sblnk and sbbusn, nodes, sbdn
	bcm5785_early_setup();

#if CONFIG_SET_FIDVID
	{
		msr_t msr;
		msr=rdmsr(0xc0010042);
		printk(BIOS_DEBUG, "begin msr fid, vid %08x %08x\n", msr.hi, msr.lo);
	}
	enable_fid_change();
	enable_fid_change_on_sb(sysinfo->sbbusn, sysinfo->sbdn);
	init_fidvid_bsp(bsp_apicid);
	// show final fid and vid
	{
		msr_t msr;
		msr=rdmsr(0xc0010042);
		printk(BIOS_DEBUG, "end msr fid, vid %08x %08x\n", msr.hi, msr.lo);
	}
#endif

	needs_reset = optimize_link_coherent_ht();
	needs_reset |= optimize_link_incoherent_ht(sysinfo);

	// fidvid change will issue one LDTSTOP and the HT change will be effective too
	if (needs_reset) {
		printk(BIOS_INFO, "ht reset -\n");
		soft_reset();
	}

	allow_all_aps_stop(bsp_apicid);

	//It's the time to set ctrl in sysinfo now;
	fill_mem_ctrl(sysinfo->nodes, sysinfo->ctrl, spd_addr);
	enable_smbus();

	//do we need apci timer, tsc...., only debug need it for better output
	/* all ap stopped? */
	// init_timer(); // Need to use TMICT to synconize FID/VID

	sdram_initialize(sysinfo->nodes, sysinfo->ctrl, sysinfo);

	post_cache_as_ram();
}
Exemple #26
0
static void generic_machine_check(struct pt_regs * regs, long error_code)
{
    int recover=1;
    u32 alow, ahigh, high, low;
    u32 mcgstl, mcgsth;
    int i;
    struct notifier_mc_err mc_err;

    rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
    if(mcgstl&(1<<0))	/* Recoverable ? */
        recover=0;

    /* Make sure unrecoverable MCEs reach the console */
    if(recover & 3)
        oops_in_progress++;

    printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
           smp_processor_id(), mcgsth, mcgstl);

    if (regs && (mcgstl & 2))
        printk(KERN_EMERG "RIP <%02lx>:%016lx RSP %016lx\n",
               regs->cs, regs->rip, regs->rsp);

    for(i=0; i<banks; i++)
    {
        if ((1UL<<i) & ignored_banks)
            continue;

        rdmsr(MSR_IA32_MC0_STATUS+i*4,low, high);
        if(high&(1<<31))
        {
            memset(&mc_err, 0x00, sizeof(mc_err));
            mc_err.cpunum = safe_smp_processor_id();
            mc_err.banknum = i;
            mc_err.mci_status = ((u64)high << 32) | low;
            if(high&(1<<29))
                recover|=1;
            if(high&(1<<25))
                recover|=2;
            printk(KERN_EMERG "Bank %d: %08x%08x", i, high, low);
            high&=~(1<<31);
            if(high&(1<<27))
            {
                rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
                mc_err.mci_misc = ((u64)ahigh << 32) | alow;
                printk("[%08x%08x]", alow, ahigh);
            }
            if(high&(1<<26))
            {
                rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
                mc_err.mci_addr = ((u64)ahigh << 32) | alow;
                printk(" at %08x%08x",
                       ahigh, alow);
            }
            rdmsr(MSR_IA32_MC0_CTL+i*4, alow, ahigh);
            mc_err.mci_ctl = ((u64)ahigh << 32) | alow;

            printk("\n");

            /* Clear it */
            wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
            /* Serialize */
            wmb();
            notifier_call_chain(&mc_notifier_list, X86_VENDOR_INTEL, &mc_err);
        }
    }

    if(recover&2)
        panic("CPU context corrupt");
    if(recover&1)
        panic("Unable to continue");
    printk(KERN_EMERG "Attempting to continue.\n");
    mcgstl&=~(1<<2);
    wrmsr(MSR_IA32_MCG_STATUS,mcgstl, mcgsth);
}
Exemple #27
0
void cpuRegInit(int debug_clock_disable, u8 dimm0, u8 dimm1, int terminated)
{
	int msrnum;
	msr_t msr;

	/* Castle 2.0 BTM periodic sync period. */
	/*      [40:37] 1 sync record per 256 bytes */
	printk(BIOS_DEBUG, "Castle 2.0 BTM periodic sync period.\n");
	msrnum = CPU_PF_CONF;
	msr = rdmsr(msrnum);
	msr.hi |= (0x8 << 5);
	wrmsr(msrnum, msr);

	/*
	 * LX performance setting.
	 * Enable Quack for fewer re-RAS on the MC
	 */
	printk(BIOS_DEBUG, "Enable Quack for fewer re-RAS on the MC\n");
	msrnum = GLIU0_ARB;
	msr = rdmsr(msrnum);
	msr.hi &= ~ARB_UPPER_DACK_EN_SET;
	msr.hi |= ARB_UPPER_QUACK_EN_SET;
	wrmsr(msrnum, msr);

	msrnum = GLIU1_ARB;
	msr = rdmsr(msrnum);
	msr.hi &= ~ARB_UPPER_DACK_EN_SET;
	msr.hi |= ARB_UPPER_QUACK_EN_SET;
	wrmsr(msrnum, msr);

	/* GLIU port active enable, limit south pole masters
	 * (AES and PCI) to one outstanding transaction.
	 */
	printk(BIOS_DEBUG, " GLIU port active enable\n");
	msrnum = GLIU1_PORT_ACTIVE;
	msr = rdmsr(msrnum);
	msr.lo &= ~0x880;
	wrmsr(msrnum, msr);

	/* Set the Delay Control in GLCP */
	printk(BIOS_DEBUG, "Set the Delay Control in GLCP\n");
	SetDelayControl(dimm0, dimm1, terminated);

	/*  Enable RSDC */
	printk(BIOS_DEBUG, "Enable RSDC\n");
	msrnum = CPU_AC_SMM_CTL;
	msr = rdmsr(msrnum);
	msr.lo |= SMM_INST_EN_SET;
	wrmsr(msrnum, msr);

	/* FPU imprecise exceptions bit */
	printk(BIOS_DEBUG, "FPU imprecise exceptions bit\n");
	msrnum = CPU_FPU_MSR_MODE;
	msr = rdmsr(msrnum);
	msr.lo |= FPU_IE_SET;
	wrmsr(msrnum, msr);

	/* Power Savers (Do after BIST) */
	/* Enable Suspend on HLT & PAUSE instructions */
	printk(BIOS_DEBUG, "Enable Suspend on HLT & PAUSE instructions\n");
	msrnum = CPU_XC_CONFIG;
	msr = rdmsr(msrnum);
	msr.lo |= XC_CONFIG_SUSP_ON_HLT | XC_CONFIG_SUSP_ON_PAUSE;
	wrmsr(msrnum, msr);

	/* Enable SUSP and allow TSC to run in Suspend (keep speed detection happy) */
	printk(BIOS_DEBUG, "Enable SUSP and allow TSC to run in Suspend\n");
	msrnum = CPU_BC_CONF_0;
	msr = rdmsr(msrnum);
	msr.lo |= TSC_SUSP_SET | SUSP_EN_SET;
	msr.lo &= 0x0F0FFFFFF;
	msr.lo |= 0x002000000;	/* PBZ213: Set PAUSEDLY = 2 */
	wrmsr(msrnum, msr);

	/* Disable the debug clock to save power. */
	/* NOTE: leave it enabled for fs2 debug */
	if (debug_clock_disable && 0) {
		msrnum = GLCP_DBGCLKCTL;
		msr.hi = 0;
		msr.lo = 0;
		wrmsr(msrnum, msr);
	}

	/* Setup throttling delays to proper mode if it is ever enabled. */
	printk(BIOS_DEBUG, "Setup throttling delays to proper mode\n");
	msrnum = GLCP_TH_OD;
	msr.hi = 0;
	msr.lo = 0x00000603C;
	wrmsr(msrnum, msr);
	printk(BIOS_DEBUG, "Done cpuRegInit\n");
}
Exemple #28
0
static int
pn_decode_pst(device_t dev)
{
	int maxpst;
	struct pn_softc *sc;
	u_int cpuid, maxfid, startvid;
	u_long sig;
	struct psb_header *psb;
	uint8_t *p;
	u_int regs[4];
	uint64_t status;

	sc = device_get_softc(dev);

	do_cpuid(0x80000001, regs);
	cpuid = regs[0];

	if ((cpuid & 0xfff) == 0x760)
		sc->errata |= A0_ERRATA;

	status = rdmsr(MSR_AMDK7_FIDVID_STATUS);

	switch (sc->pn_type) {
	case PN7_TYPE:
		maxfid = PN7_STA_MFID(status);
		startvid = PN7_STA_SVID(status);
		break;
	case PN8_TYPE:
		maxfid = PN8_STA_MFID(status);
		/*
		 * we should actually use a variable named 'maxvid' if K8,
		 * but why introducing a new variable for that?
		 */
		startvid = PN8_STA_MVID(status);
		break;
	default:
		return (ENODEV);
	}

	if (bootverbose) {
		device_printf(dev, "STATUS: 0x%jx\n", status);
		device_printf(dev, "STATUS: maxfid: 0x%02x\n", maxfid);
		device_printf(dev, "STATUS: %s: 0x%02x\n",
		    sc->pn_type == PN7_TYPE ? "startvid" : "maxvid",
		    startvid);
	}

	sig = bios_sigsearch(PSB_START, PSB_SIG, PSB_LEN, PSB_STEP, PSB_OFF);
	if (sig) {
		struct pst_header *pst;

		psb = (struct psb_header*)(uintptr_t)BIOS_PADDRTOVADDR(sig);

		switch (psb->version) {
		default:
			return (ENODEV);
		case 0x14:
			/*
			 * We can't be picky about numpst since at least
			 * some systems have a value of 1 and some have 2.
			 * We trust that cpuid_is_k7() will be better at
			 * catching that we're on a K8 anyway.
			 */
			if (sc->pn_type != PN8_TYPE)
				return (EINVAL);
			sc->vst = psb->settlingtime;
			sc->rvo = PN8_PSB_TO_RVO(psb->res1),
			sc->irt = PN8_PSB_TO_IRT(psb->res1),
			sc->mvs = PN8_PSB_TO_MVS(psb->res1),
			sc->low = PN8_PSB_TO_BATT(psb->res1);
			if (bootverbose) {
				device_printf(dev, "PSB: VST: %d\n",
				    psb->settlingtime);
				device_printf(dev, "PSB: RVO %x IRT %d "
				    "MVS %d BATT %d\n",
				    sc->rvo,
				    sc->irt,
				    sc->mvs,
				    sc->low);
			}
			break;
		case 0x12:
			if (sc->pn_type != PN7_TYPE)
				return (EINVAL);
			sc->sgtc = psb->settlingtime * sc->fsb;
			if (sc->sgtc < 100 * sc->fsb)
				sc->sgtc = 100 * sc->fsb;
			break;
		}

		p = ((uint8_t *) psb) + sizeof(struct psb_header);
		pst = (struct pst_header*) p;

		maxpst = 200;

		do {
			struct pst_header *pst = (struct pst_header*) p;

			if (cpuid == pst->cpuid &&
			    maxfid == pst->maxfid &&
			    startvid == pst->startvid) {
				sc->powernow_max_states = pst->numpstates;
				switch (sc->pn_type) {
				case PN7_TYPE:
					if (abs(sc->fsb - pst->fsb) > 5)
						continue;
					break;
				case PN8_TYPE:
					break;
				}
				return (decode_pst(sc,
				    p + sizeof(struct pst_header),
				    sc->powernow_max_states));
			}

			p += sizeof(struct pst_header) + (2 * pst->numpstates);
		} while (cpuid_is_k7(pst->cpuid) && maxpst--);

		device_printf(dev, "no match for extended cpuid %.3x\n", cpuid);
	}

	return (ENODEV);
}
Exemple #29
0
void init_pm(const sysinfo_t *const sysinfo, int do_freq_scaling_cfg)
{
	const stepping_t stepping = sysinfo->stepping;
	const fsb_clock_t fsb = sysinfo->selected_timings.fsb_clock;
	const mem_clock_t memclk = sysinfo->selected_timings.mem_clock;

	MCHBAR16(0xc14) = 0;
	MCHBAR16(0xc20) = 0;
	MCHBAR32(0xfc0) = 0x001f00fd;
	MCHBAR32(0xfc0) |= 3 << 25;
	MCHBAR32(0xfc0) |= 1 << 11;
	MCHBAR8(0xfb0) = 3;
	MCHBAR8(0xf10) |= 1 << 1;
	if (fsb == FSB_CLOCK_667MHz) {
		MCHBAR16(0xc3a) = 0xea6;
		MCHBAR8(0xc16) = (MCHBAR8(0xc16) & 0x80) | 0x0e;
	} else if (fsb == FSB_CLOCK_800MHz) {
		MCHBAR16(0xc3a) = 0x1194;
		MCHBAR8(0xc16) = (MCHBAR8(0xc16) & 0x80) | 0x10;
	} else if (fsb == FSB_CLOCK_1067MHz) {
		MCHBAR16(0xc3a) = 0x1777;
		MCHBAR8(0xc16) = (MCHBAR8(0xc16) & 0x80) | 0x15;
	}
	MCHBAR8(0xfb8) = 3;
	if (fsb == FSB_CLOCK_667MHz)
		MCHBAR16(0xc38) = 0x0ea6;
	else if (fsb == FSB_CLOCK_800MHz)
		MCHBAR16(0xc38) = 0x1194;
	else if (fsb == FSB_CLOCK_1067MHz)
		MCHBAR16(0xc38) = 0x1777;
	MCHBAR8(0xf10) |= 1 << 5;
	MCHBAR16(0xc16) |= 3 << 12;
	MCHBAR32(0xf60) = 0x01030419;
	if (fsb == FSB_CLOCK_667MHz) {
		MCHBAR32(0xf00) = 0x00000600;
		MCHBAR32(0xf04) = 0x00001d80;
	} else if (fsb == FSB_CLOCK_800MHz) {
		MCHBAR32(0xf00) = 0x00000700;
		MCHBAR32(0xf04) = 0x00002380;
	} else if (fsb == FSB_CLOCK_1067MHz) {
		MCHBAR32(0xf00) = 0x00000900;
		MCHBAR32(0xf04) = 0x00002e80;
	}
	MCHBAR16(0xf08) = 0x730f;
	if (fsb == FSB_CLOCK_667MHz)
		MCHBAR16(0xf0c) = 0x0b96;
	else if (fsb == FSB_CLOCK_800MHz)
		MCHBAR16(0xf0c) = 0x0c99;
	else if (fsb == FSB_CLOCK_1067MHz)
		MCHBAR16(0xf0c) = 0x10a4;
	MCHBAR32(0xf80) |= 1 << 31;

	MCHBAR32(0x40) = (MCHBAR32(0x40) & ~(0x3f << 24)) |
		(sysinfo->cores == 4) ? (1 << 24) : 0;

	MCHBAR32(0x40) &= ~(1 << 19);
	MCHBAR32(0x40) |= 1 << 13;
	MCHBAR32(0x40) |= 1 << 21;
	MCHBAR32(0x40) |= 1 << 9;
	if (stepping > STEPPING_B1) {
		if (fsb != FSB_CLOCK_1067MHz) {
			MCHBAR32(0x70) |= 1 << 30;
		} else {
			MCHBAR32(0x70) &= ~(1 << 30);
		}
	}
	if (stepping < STEPPING_B1)
		MCHBAR32(0x70) |= 1 << 29;
	else
		MCHBAR32(0x70) &= ~(1 << 29);
	if (stepping > STEPPING_B1) {
		MCHBAR32(0x70) |= 1 << 28;
		MCHBAR32(0x70) |= 1 << 25;
	}
	if (stepping > STEPPING_B0) {
		if (fsb != FSB_CLOCK_667MHz)
			MCHBAR32(0x70) = (MCHBAR32(0x70) & ~(3<<21)) | (1 << 21);
		else
			MCHBAR32(0x70) = (MCHBAR32(0x70) & ~(3<<21));
	}
	if (stepping > STEPPING_B2)
		MCHBAR32(0x44) |= 1 << 30;
	MCHBAR32(0x44) |= 1 << 31;
	if (sysinfo->cores == 2)
		MCHBAR32(0x44) |= 1 << 26;
	MCHBAR32(0x44) |= 1 << 21;
	MCHBAR32(0x44) = (MCHBAR32(0x44) & ~(3 << 24)) | (2 << 24);
	MCHBAR32(0x44) |= 1 << 5;
	MCHBAR32(0x44) |= 1 << 4;
	MCHBAR32(0x90) = (MCHBAR32(0x90) & ~7) | 4;
	MCHBAR32(0x94) |= 1 << 29;
	MCHBAR32(0x94) |= 1 << 11;
	if (stepping < STEPPING_B0)
		MCHBAR32(0x94) = (MCHBAR32(0x94) & ~(3 << 19)) | (2 << 19);
	if (stepping > STEPPING_B2)
		MCHBAR32(0x94) |= 1 << 21;
	MCHBAR8(0xb00) &= ~1;
	MCHBAR8(0xb00) |= 1 << 7;
	if (fsb != FSB_CLOCK_1067MHz)
		MCHBAR8(0x75) |= 1 << 6;
	else
		MCHBAR8(0x75) &= 1 << 1;
	MCHBAR8(0x77) |= 3;
	if (stepping >= STEPPING_B1)
		MCHBAR8(0x77) |= 1 << 2;
	if (stepping > STEPPING_B2)
		MCHBAR8(0x77) |= 1 << 4;
	if (MCHBAR16(0x90) & 0x100)
		MCHBAR8(0x90) &= ~(7 << 4);
	if (stepping >= STEPPING_B0)
		MCHBAR8(0xd0) |= 1 << 1;
	MCHBAR8(0xbd8) |= 3 << 2;
	if (stepping >= STEPPING_B3)
		MCHBAR32(0x70) |= 1 << 0;
	MCHBAR32(0x70) |= 1 << 3;
	if (stepping >= STEPPING_B0)
		MCHBAR32(0x70) &= ~(1 << 16);
	else
		MCHBAR32(0x70) |= 1 << 16;
	if (stepping >= STEPPING_B3)
		MCHBAR8(0xc14) |= 1 << 1;
	if (stepping >= STEPPING_B1)
		MCHBAR16(0xffc) = (MCHBAR16(0xffc) & ~0x7ff) | 0x7c0;
	MCHBAR16(0x48) = (MCHBAR16(0x48) & ~(0xff << 2)) | (0xaa << 2);
	if (stepping == STEPPING_CONVERSION_A1) {
		MCHBAR16(0x40) |= 1 << 12;
		MCHBAR32(0x94) |= 3 << 22;
	}

	const int cpu_supports_super_lfm =
				rdmsr(MSR_EXTENDED_CONFIG).lo & (1 << 27);
	if ((stepping >= STEPPING_B0) && cpu_supports_super_lfm) {
		MCHBAR16(CLKCFG_MCHBAR) &= ~(1 << 7);
		MCHBAR16(CLKCFG_MCHBAR) |= 1 << 14;
	} else {
		MCHBAR16(CLKCFG_MCHBAR) &= ~(1 << 14);
		MCHBAR16(CLKCFG_MCHBAR) |= 1 << 7;
		MCHBAR32(0x44) &= ~(1 << 31); /* Was set above. */
	}

	if ((sysinfo->gfx_type != GMCH_PM45) && do_freq_scaling_cfg &&
			(sysinfo->gfx_type != GMCH_UNKNOWN))
		init_freq_scaling(sysinfo->gfx_type,
				  sysinfo->gs45_low_power_mode);

	/* This has to be the last write to CLKCFG. */
	if ((fsb == FSB_CLOCK_1067MHz) && (memclk == MEM_CLOCK_667MT))
		MCHBAR32(CLKCFG_MCHBAR) &= ~(1 << 17);
}
Exemple #30
0
static void model_15_init(device_t dev)
{
	printk(BIOS_DEBUG, "Model 15 Init.\n");

	u8 i;
	msr_t msr;
	int msrno;
#if IS_ENABLED(CONFIG_LOGICAL_CPUS)
	u32 siblings;
#endif

	disable_cache();
	/* Enable access to AMD RdDram and WrDram extension bits */
	msr = rdmsr(SYSCFG_MSR);
	msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
	msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
	wrmsr(SYSCFG_MSR, msr);

	// BSP: make a0000-bffff UC, c0000-fffff WB
	msr.lo = msr.hi = 0;
	wrmsr(0x259, msr);
	msr.lo = msr.hi = 0x1e1e1e1e;
	wrmsr(0x250, msr);
	wrmsr(0x258, msr);
	for (msrno = 0x268; msrno <= 0x26f; msrno++)
		wrmsr(msrno, msr);

	msr = rdmsr(SYSCFG_MSR);
	msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
	msr.lo |= SYSCFG_MSR_MtrrFixDramEn;
	wrmsr(SYSCFG_MSR, msr);

	x86_mtrr_check();
	x86_enable_cache();

	/* zero the machine check error status registers */
	msr.lo = 0;
	msr.hi = 0;
	for (i = 0; i < 6; i++)
		wrmsr(MCI_STATUS + (i * 4), msr);


	/* Enable the local CPU APICs */
	setup_lapic();

#if IS_ENABLED(CONFIG_LOGICAL_CPUS)
	siblings = cpuid_ecx(0x80000008) & 0xff;

	if (siblings > 0) {
		msr = rdmsr_amd(CPU_ID_FEATURES_MSR);
		msr.lo |= 1 << 28;
		wrmsr_amd(CPU_ID_FEATURES_MSR, msr);

		msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
		msr.hi |= 1 << (33 - 32);
		wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
	}
	printk(BIOS_DEBUG, "siblings = %02d, ", siblings);
#endif
	PSPProgBar3Msr(NULL);

	/* DisableCf8ExtCfg */
	msr = rdmsr(NB_CFG_MSR);
	msr.hi &= ~(1 << (46 - 32));
	wrmsr(NB_CFG_MSR, msr);


	/* Write protect SMM space with SMMLOCK. */
	msr = rdmsr(HWCR_MSR);
	msr.lo |= (1 << 0);
	wrmsr(HWCR_MSR, msr);
}