Пример #1
0
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
	struct sys_info *sysinfo = &sysinfo_car;
	static const u8 spd_addr[] = {RC00, DIMM0, DIMM2, 0, 0, DIMM1, DIMM3, 0, 0, };
	u32 bsp_apicid = 0, val;
	msr_t msr;

	if (!cpu_init_detectedx && boot_cpu()) {
		/* Nothing special needs to be done to find bus 0 */
		/* Allow the HT devices to be found */
		/* mov bsp to bus 0xff when > 8 nodes */
		set_bsp_node_CHtExtNodeCfgEn();
		enumerate_ht_chain();
		sb7xx_51xx_pci_port80();
	}

	post_code(0x30);

	if (bist == 0) {
		bsp_apicid = init_cpus(cpu_init_detectedx, sysinfo); /* mmconf is inited in init_cpus */
		/* All cores run this but the BSP(node0,core0) is the only core that returns. */
	}

	post_code(0x32);

	enable_rs780_dev8();
	sb7xx_51xx_lpc_init();

	f71859_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);

	console_init();

//	dump_mem(CONFIG_DCACHE_RAM_BASE+CONFIG_DCACHE_RAM_SIZE-0x200, CONFIG_DCACHE_RAM_BASE+CONFIG_DCACHE_RAM_SIZE);

	/* Halt if there was a built in self test failure */
	report_bist_failure(bist);

	// Load MPB
	val = cpuid_eax(1);
	printk(BIOS_DEBUG, "BSP Family_Model: %08x \n", val);
	printk(BIOS_DEBUG, "*sysinfo range: [%p,%p]\n",sysinfo,sysinfo+1);
	printk(BIOS_DEBUG, "bsp_apicid = %02x \n", bsp_apicid);
	printk(BIOS_DEBUG, "cpu_init_detectedx = %08lx \n", cpu_init_detectedx);

	/* Setup sysinfo defaults */
	set_sysinfo_in_ram(0);

#if CONFIG_UPDATE_CPU_MICROCODE
	update_microcode(val);
#endif
	post_code(0x33);

	cpuSetAMDMSR();
	post_code(0x34);

	amd_ht_init(sysinfo);
	post_code(0x35);

	/* Setup nodes PCI space and start core 0 AP init. */
	finalize_node_setup(sysinfo);

	/* Setup any mainboard PCI settings etc. */
	setup_mb_resource_map();
	post_code(0x36);

	/* wait for all the APs core0 started by finalize_node_setup. */
	/* FIXME: A bunch of cores are going to start output to serial at once.
	   It would be nice to fixup prink spinlocks for ROM XIP mode.
	   I think it could be done by putting the spinlock flag in the cache
	   of the BSP located right after sysinfo.
	 */
	wait_all_core0_started();

 #if CONFIG_LOGICAL_CPUS
	/* Core0 on each node is configured. Now setup any additional cores. */
	printk(BIOS_DEBUG, "start_other_cores()\n");
	start_other_cores();
	post_code(0x37);
	wait_all_other_cores_started(bsp_apicid);
 #endif

	post_code(0x38);

	/* run _early_setup before soft-reset. */
	rs780_early_setup();
	sb7xx_51xx_early_setup();

 #if CONFIG_SET_FIDVID
	msr = rdmsr(0xc0010071);
	printk(BIOS_DEBUG, "\nBegin FIDVID MSR 0xc0010071 0x%08x 0x%08x \n", msr.hi, msr.lo);

	/* FIXME: The sb fid change may survive the warm reset and only
	   need to be done once.*/
	enable_fid_change_on_sb(sysinfo->sbbusn, sysinfo->sbdn);

	post_code(0x39);

	if (!warm_reset_detect(0)) {			// BSP is node 0
		init_fidvid_bsp(bsp_apicid, sysinfo->nodes);
	} else {
		init_fidvid_stage2(bsp_apicid, 0);	// BSP is node 0
	}

	post_code(0x3A);

	/* show final fid and vid */
	msr=rdmsr(0xc0010071);
	printk(BIOS_DEBUG, "End FIDVIDMSR 0xc0010071 0x%08x 0x%08x \n", msr.hi, msr.lo);
 #endif

	rs780_htinit();

	/* Reset for HT, FIDVID, PLL and errata changes to take affect. */
	if (!warm_reset_detect(0)) {
		print_info("...WARM RESET...\n\n\n");
		soft_reset();
		die("After soft_reset_x - shouldn't see this message!!!\n");
	}

	post_code(0x3B);

	/* It's the time to set ctrl in sysinfo now; */
	printk(BIOS_DEBUG, "fill_mem_ctrl()\n");
	fill_mem_ctrl(sysinfo->nodes, sysinfo->ctrl, spd_addr);

	post_code(0x40);

//	die("Die Before MCT init.");

	printk(BIOS_DEBUG, "raminit_amdmct()\n");
	raminit_amdmct(sysinfo);
	post_code(0x41);

/*
	dump_pci_device_range(PCI_DEV(0, 0x18, 0), 0, 0x200);
	dump_pci_device_range(PCI_DEV(0, 0x18, 1), 0, 0x200);
	dump_pci_device_range(PCI_DEV(0, 0x18, 2), 0, 0x200);
	dump_pci_device_range(PCI_DEV(0, 0x18, 3), 0, 0x200);
*/

//	die("After MCT init before CAR disabled.");

	rs780_before_pci_init();
	sb7xx_51xx_before_pci_init();

	post_code(0x42);
	post_cache_as_ram();	// BSP switch stack to ram, copy then execute LB.
	post_code(0x43);	// Should never see this post code.
}
Пример #2
0
static int cpu_request_microcode(int cpu, const void *buf, size_t bufsize)
{
    struct microcode_amd *mc_amd, *mc_old;
    size_t offset = 0;
    size_t last_offset, applied_offset = 0;
    int error = 0, save_error = 1;
    struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
    unsigned int current_cpu_id;
    unsigned int equiv_cpu_id;

    /* We should bind the task to the CPU */
    BUG_ON(cpu != raw_smp_processor_id());

    current_cpu_id = cpuid_eax(0x00000001);

    if ( *(const uint32_t *)buf != UCODE_MAGIC )
    {
        printk(KERN_ERR "microcode: Wrong microcode patch file magic\n");
        error = -EINVAL;
        goto out;
    }

    mc_amd = xmalloc(struct microcode_amd);
    if ( !mc_amd )
    {
        printk(KERN_ERR "microcode: Cannot allocate memory for microcode patch\n");
        error = -ENOMEM;
        goto out;
    }

    /*
     * Multiple container file support:
     * 1. check if this container file has equiv_cpu_id match
     * 2. If not, fast-fwd to next container file
     */
    while ( offset < bufsize )
    {
        error = install_equiv_cpu_table(mc_amd, buf, &offset);
        if ( error )
        {
            printk(KERN_ERR "microcode: installing equivalent cpu table failed\n");
            break;
        }

        if ( find_equiv_cpu_id(mc_amd->equiv_cpu_table, current_cpu_id,
                               &equiv_cpu_id) )
                break;

        /*
         * Could happen as we advance 'offset' early
         * in install_equiv_cpu_table
         */
        if ( offset > bufsize )
        {
            printk(KERN_ERR "microcode: Microcode buffer overrun\n");
            error = -EINVAL;
            break;
        }

        error = container_fast_forward(buf, bufsize - offset, &offset);
        if ( error )
        {
            printk(KERN_ERR "microcode: CPU%d incorrect or corrupt container file\n"
                   "microcode: Failed to update patch level. "
                   "Current lvl:%#x\n", cpu, uci->cpu_sig.rev);
            break;
        }
    }

    if ( error )
    {
        xfree(mc_amd);
        goto out;
    }

    mc_old = uci->mc.mc_amd;
    /* implicitely validates uci->mc.mc_valid */
    uci->mc.mc_amd = mc_amd;

    /*
     * It's possible the data file has multiple matching ucode,
     * lets keep searching till the latest version
     */
    mc_amd->mpb = NULL;
    mc_amd->mpb_size = 0;
    last_offset = offset;
    while ( (error = get_ucode_from_buffer_amd(mc_amd, buf, bufsize,
                                               &offset)) == 0 )
    {
        if ( microcode_fits(mc_amd, cpu) )
        {
            error = apply_microcode(cpu);
            if ( error )
                break;
            applied_offset = last_offset;
        }

        last_offset = offset;

        if ( offset >= bufsize )
            break;

        /*
         * 1. Given a situation where multiple containers exist and correct
         *    patch lives on a container that is not the last container.
         * 2. We match equivalent ids using find_equiv_cpu_id() from the
         *    earlier while() (On this case, matches on earlier container
         *    file and we break)
         * 3. Proceed to while ( (error = get_ucode_from_buffer_amd(mc_amd,
         *                                  buf, bufsize,&offset)) == 0 )
         * 4. Find correct patch using microcode_fits() and apply the patch
         *    (Assume: apply_microcode() is successful)
         * 5. The while() loop from (3) continues to parse the binary as
         *    there is a subsequent container file, but...
         * 6. ...a correct patch can only be on one container and not on any
         *    subsequent ones. (Refer docs for more info) Therefore, we
         *    don't have to parse a subsequent container. So, we can abort
         *    the process here.
         * 7. This ensures that we retain a success value (= 0) to 'error'
         *    before if ( mpbuf->type != UCODE_UCODE_TYPE ) evaluates to
         *    false and returns -EINVAL.
         */
        if ( offset + SECTION_HDR_SIZE <= bufsize &&
             *(const uint32_t *)(buf + offset) == UCODE_MAGIC )
            break;
    }

    /* On success keep the microcode patch for
     * re-apply on resume.
     */
    if ( applied_offset )
    {
        save_error = get_ucode_from_buffer_amd(
            mc_amd, buf, bufsize, &applied_offset);

        if ( save_error )
            error = save_error;
    }

    if ( save_error )
    {
        xfree(mc_amd);
        uci->mc.mc_amd = mc_old;
    }
    else
        xfree(mc_old);

  out:
    svm_host_osvw_init();

    /*
     * In some cases we may return an error even if processor's microcode has
     * been updated. For example, the first patch in a container file is loaded
     * successfully but subsequent container file processing encounters a
     * failure.
     */
    return error;
}
Пример #3
0
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
	u32 val;
#if CONFIG_HAVE_ACPI_RESUME
	void *resume_backup_memory;
#endif

	amd_initmmio();

	hudson_lpc_port80();

	if (!cpu_init_detectedx && boot_cpu()) {
		post_code(0x30);

		post_code(0x31);
		console_init();
	}

	/* Halt if there was a built in self test failure */
	post_code(0x34);
	report_bist_failure(bist & 0x7FFFFFFF); /* Mask bit 31. One result of Silicon Observation */

	/* Load MPB */
	val = cpuid_eax(1);
	printk(BIOS_DEBUG, "BSP Family_Model: %08x \n", val);
	printk(BIOS_DEBUG, "cpu_init_detectedx = %08lx \n", cpu_init_detectedx);

	post_code(0x37);
	AGESAWRAPPER(amdinitreset);
	post_code(0x38);
	printk(BIOS_DEBUG, "Got past agesawrapper_amdinitreset\n");

	post_code(0x39);
	AGESAWRAPPER(amdinitearly);
	int s3resume = acpi_is_wakeup_s3();
	if (!s3resume) {
		post_code(0x40);
		AGESAWRAPPER(amdinitpost);
		post_code(0x41);
		AGESAWRAPPER(amdinitenv);
		/* TODO: Disable cache is not ok. */
		disable_cache_as_ram();
	} else { /* S3 detect */
		printk(BIOS_INFO, "S3 detected\n");

		post_code(0x60);
		AGESAWRAPPER(amdinitresume);

		AGESAWRAPPER(amds3laterestore);

		post_code(0x61);
		prepare_for_resume();
	}

	if (s3resume || acpi_is_wakeup_s4()) {
		outb(0xEE, PM_INDEX);
		outb(0x8, PM_DATA);
	}

	post_code(0x50);
	copy_and_run();

	post_code(0x54);  /* Should never see this post code. */
}
Пример #4
0
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
	u64 misc_enable;

	/* Unmask CPUID levels if masked: */
	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);

		if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
			misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
			c->cpuid_level = cpuid_eax(0);
			get_cpu_cap(c);
		}
	}

	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
		(c->x86 == 0x6 && c->x86_model >= 0x0e))
		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);

	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
		unsigned lower_word;

		wrmsr(MSR_IA32_UCODE_REV, 0, 0);
		/* Required by the SDM */
		sync_core();
		rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
	}

	/*
	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
	 *
	 * A race condition between speculative fetches and invalidating
	 * a large page.  This is worked around in microcode, but we
	 * need the microcode to have already been loaded... so if it is
	 * not, recommend a BIOS update and disable large pages.
	 */
	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
	    c->microcode < 0x20e) {
		printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
		clear_cpu_cap(c, X86_FEATURE_PSE);
	}

#ifdef CONFIG_X86_64
	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#else
	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
	if (c->x86 == 15 && c->x86_cache_alignment == 64)
		c->x86_cache_alignment = 128;
#endif

	/* CPUID workaround for 0F33/0F34 CPU */
	if (c->x86 == 0xF && c->x86_model == 0x3
	    && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
		c->x86_phys_bits = 36;

	/*
	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
	 * with P/T states and does not stop in deep C-states.
	 *
	 * It is also reliable across cores and sockets. (but not across
	 * cabinets - we turn it off in that case explicitly.)
	 */
	if (c->x86_power & (1 << 8)) {
		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
//		if (!check_tsc_unstable())
//			sched_clock_stable = 1;
	}

	/*
	 * There is a known erratum on Pentium III and Core Solo
	 * and Core Duo CPUs.
	 * " Page with PAT set to WC while associated MTRR is UC
	 *   may consolidate to UC "
	 * Because of this erratum, it is better to stick with
	 * setting WC in MTRR rather than using PAT on these CPUs.
	 *
	 * Enable PAT WC only on P4, Core 2 or later CPUs.
	 */
	if (c->x86 == 6 && c->x86_model < 15)
		clear_cpu_cap(c, X86_FEATURE_PAT);

#ifdef CONFIG_KMEMCHECK
	/*
	 * P4s have a "fast strings" feature which causes single-
	 * stepping REP instructions to only generate a #DB on
	 * cache-line boundaries.
	 *
	 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
	 * (model 2) with the same problem.
	 */
	if (c->x86 == 15) {
		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);

		if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
			printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");

			misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
		}
	}
#endif

	/*
	 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
	 * clear the fast string and enhanced fast string CPU capabilities.
	 */
	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
		if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
			printk(KERN_INFO "Disabled fast string operations\n");
			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
			setup_clear_cpu_cap(X86_FEATURE_ERMS);
		}
	}
}