예제 #1
0
파일: cpu.c 프로젝트: CreatorDev/u-boot
static int cpu_x86_baytrail_probe(struct udevice *dev)
{
	debug("Init BayTrail core\n");

	/*
	 * On BayTrail the turbo disable bit is actually scoped at the
	 * building-block level, not package. For non-BSP cores that are
	 * within a building block, enable turbo. The cores within the BSP's
	 * building block will just see it already enabled and move on.
	 */
	if (lapicid())
		turbo_enable();

	/* Dynamic L2 shrink enable and threshold */
	msr_clrsetbits_64(MSR_PMG_CST_CONFIG_CONTROL, 0x3f000f, 0xe0008),

	/* Disable C1E */
	msr_clrsetbits_64(MSR_POWER_CTL, 2, 0);
	msr_setbits_64(MSR_POWER_MISC, 0x44);

	/* Set this core to max frequency ratio */
	set_max_freq();

	return 0;
}
예제 #2
0
/**
 * Normally this function is defined in lapic.h as an always inline function
 * that just keeps the CPU in a hlt() loop. This does not work on all CPUs.
 * I think all hyperthreading CPUs might need this version, but I could only
 * verify this on the Intel Core Duo
 */
void stop_this_cpu(void)
{
	int timeout;
	unsigned long send_status;
	unsigned long id;

	id = lapicid();

	printk(BIOS_DEBUG, "CPU %ld going down...\n", id);

	/* send an LAPIC INIT to myself */
	lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(id));
	lapic_write_around(LAPIC_ICR, LAPIC_INT_LEVELTRIG |
				LAPIC_INT_ASSERT | LAPIC_DM_INIT);

	/* wait for the ipi send to finish */
#if DEBUG_HALT_SELF
	printk(BIOS_SPEW, "Waiting for send to finish...\n");
#endif
	timeout = 0;
	do {
#if DEBUG_HALT_SELF
		printk(BIOS_SPEW, "+");
#endif
		udelay(100);
		send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
	} while (send_status && (timeout++ < 1000));
	if (timeout >= 1000) {
#if DEBUG_HALT_SELF
		printk(BIOS_ERR, "timed out\n");
#endif
	}
	mdelay(10);

#if DEBUG_HALT_SELF
	printk(BIOS_SPEW, "Deasserting INIT.\n");
#endif
	/* Deassert the LAPIC INIT */
	lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(id));
	lapic_write_around(LAPIC_ICR, LAPIC_INT_LEVELTRIG | LAPIC_DM_INIT);

#if DEBUG_HALT_SELF
	printk(BIOS_SPEW, "Waiting for send to finish...\n");
#endif
	timeout = 0;
	do {
#if DEBUG_HALT_SELF
		printk(BIOS_SPEW, "+");
#endif
		udelay(100);
		send_status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
	} while (send_status && (timeout++ < 1000));
	if (timeout >= 1000) {
#if DEBUG_HALT_SELF
		printk(BIOS_ERR, "timed out\n");
#endif
	}

	halt();
}
예제 #3
0
파일: mp_init.c 프로젝트: disdi/u-boot-x86
/*
 * By the time APs call ap_init() caching has been setup, and microcode has
 * been loaded
 */
static void ap_init(unsigned int cpu_index)
{
	struct udevice *dev;
	int apic_id;
	int ret;

	/* Ensure the local apic is enabled */
	enable_lapic();

	apic_id = lapicid();
	ret = find_cpu_by_apid_id(apic_id, &dev);
	if (ret) {
		debug("Unknown CPU apic_id %x\n", apic_id);
		goto done;
	}

	debug("AP: slot %d apic_id %x, dev %s\n", cpu_index, apic_id,
	      dev ? dev->name : "(apic_id not found)");

	/* Walk the flight plan */
	ap_do_flight_plan(dev);

	/* Park the AP */
	debug("parking\n");
done:
	stop_this_cpu();
}
예제 #4
0
/*
 * BSP deploys APs to callout_ap_entry(), which calls
 * agesawrapper_amdlaterunaptask with the agesadata.
 */
static void callout_ap_entry(void *unused)
{
	AGESA_STATUS Status = AGESA_UNSUPPORTED;

	printk(BIOS_DEBUG, "%s Func: 0x%x,  Data: 0x%lx, Ptr: 0x%p\n",
		__func__, agesadata.Func, agesadata.Data, agesadata.ConfigPtr);

	/* Check if this AP should run the function */
	if (!((agesadata.Func == AGESA_RUNFUNC_ONAP) &&
	    (agesadata.Data == lapicid())))
		return;

	Status = amd_late_run_ap_task(agesadata.ConfigPtr);

	if (Status)
		printk(BIOS_DEBUG, "There was a problem with %lx returned %s\n",
			lapicid(), decodeAGESA_STATUS(Status));
}
예제 #5
0
파일: ioapic.c 프로젝트: tidatida/coreboot
static void load_vectors(void *ioapic_base)
{
    u32 bsp_lapicid = lapicid();
    u32 low, high;
    u32 i, ioapic_interrupts;

    ioapic_interrupts = ioapic_interrupt_count(ioapic_base);

#if CONFIG_IOAPIC_INTERRUPTS_ON_FSB
    /*
     * For the Pentium 4 and above APICs deliver their interrupts
     * on the front side bus, enable that.
     */
    printk(BIOS_DEBUG, "IOAPIC: Enabling interrupts on FSB\n");
    io_apic_write(ioapic_base, 0x03,
                  io_apic_read(ioapic_base, 0x03) | (1 << 0));
#endif
#if CONFIG_IOAPIC_INTERRUPTS_ON_APIC_SERIAL_BUS
    printk(BIOS_DEBUG, "IOAPIC: Enabling interrupts on APIC serial bus\n");
    io_apic_write(ioapic_base, 0x03, 0);
#endif

    /* Enable Virtual Wire Mode. */
    low = ENABLED | TRIGGER_EDGE | POLARITY_HIGH | PHYSICAL_DEST | ExtINT;
    high = bsp_lapicid << (56 - 32);

    io_apic_write(ioapic_base, 0x10, low);
    io_apic_write(ioapic_base, 0x11, high);

    if (io_apic_read(ioapic_base, 0x10) == 0xffffffff) {
        printk(BIOS_WARNING, "IOAPIC not responding.\n");
        return;
    }

    printk(BIOS_SPEW, "IOAPIC: reg 0x%08x value 0x%08x 0x%08x\n",
           0, high, low);
    low = DISABLED;
    high = NONE;
    for (i = 1; i < ioapic_interrupts; i++) {
        io_apic_write(ioapic_base, i * 2 + 0x10, low);
        io_apic_write(ioapic_base, i * 2 + 0x11, high);

        printk(BIOS_SPEW, "IOAPIC: reg 0x%08x value 0x%08x 0x%08x\n",
               i, high, low);
    }
}
예제 #6
0
static void baytrail_core_init(device_t cpu)
{
	printk(BIOS_DEBUG, "Init BayTrail core.\n");

	/* On bay trail the turbo disable bit is actually scoped at building
	 * block level -- not package. For non-bsp cores that are within a
	 * building block enable turbo. The cores within the BSP's building
	 * block will just see it already enabled and move on. */
	if (lapicid())
		enable_turbo();

	/* Set core MSRs */
	reg_script_run(core_msr_script);

	/* Set this core to max frequency ratio */
	set_max_freq();
}
예제 #7
0
파일: proc.c 프로젝트: buf1024/xv6-public
// Must be called with interrupts disabled to avoid the caller being
// rescheduled between reading lapicid and running through the loop.
struct cpu*
mycpu(void)
{
  int apicid, i;
  
  if(readeflags()&FL_IF)
    panic("mycpu called with interrupts enabled\n");
  
  apicid = lapicid();
  // APIC IDs are not guaranteed to be contiguous. Maybe we should have
  // a reverse map, or reserve a register to store &cpus[i].
  for (i = 0; i < ncpu; ++i) {
    if (cpus[i].apicid == apicid)
      return &cpus[i];
  }
  panic("unknown apicid\n");
}
예제 #8
0
파일: mp_init.c 프로젝트: OpenNoah/u-boot
static int init_bsp(struct udevice **devp)
{
	char processor_name[CPU_MAX_NAME_LEN];
	int apic_id;
	int ret;

	cpu_get_name(processor_name);
	debug("CPU: %s\n", processor_name);

	apic_id = lapicid();
	ret = find_cpu_by_apic_id(apic_id, devp);
	if (ret) {
		printf("Cannot find boot CPU, APIC ID %d\n", apic_id);
		return ret;
	}

	return 0;
}
예제 #9
0
void
panic(char *s)
{
  int i;
  uint pcs[10];

  cli();
  cons.locking = 0;
  // use lapiccpunum so that we can call panic from mycpu()
  cprintf("lapicid %d: panic: ", lapicid());
  cprintf(s);
  cprintf("\n");
  getcallerpcs(&s, pcs);
  for(i=0; i<10; i++)
    cprintf(" %p", pcs[i]);
  panicked = 1; // freeze other CPU
  for(;;)
    ;
}
예제 #10
0
파일: cpu.c 프로젝트: AdriDlu/coreboot
static void soc_core_init(device_t cpu)
{
	printk(BIOS_SPEW, "%s/%s ( %s )\n",
			__FILE__, __func__, dev_name(cpu));
	printk(BIOS_DEBUG, "Init Braswell core.\n");

	/*
	 * The turbo disable bit is actually scoped at building
	 * block level -- not package. For non-bsp cores that are within a
	 * building block enable turbo. The cores within the BSP's building
	 * block will just see it already enabled and move on.
	 */
	if (lapicid())
		enable_turbo();

	/* Set core MSRs */
	reg_script_run(core_msr_script);

	/* Set this core to max frequency ratio */
	set_max_freq();
}
예제 #11
0
파일: ioapic.c 프로젝트: tidatida/coreboot
void set_ioapic_id(void *ioapic_base, u8 ioapic_id)
{
    u32 bsp_lapicid = lapicid();
    int i;

    printk(BIOS_DEBUG, "IOAPIC: Initializing IOAPIC at 0x%p\n",
           ioapic_base);
    printk(BIOS_DEBUG, "IOAPIC: Bootstrap Processor Local APIC = 0x%02x\n",
           bsp_lapicid);

    if (ioapic_id) {
        printk(BIOS_DEBUG, "IOAPIC: ID = 0x%02x\n", ioapic_id);
        /* Set IOAPIC ID if it has been specified. */
        io_apic_write(ioapic_base, 0x00,
                      (io_apic_read(ioapic_base, 0x00) & 0xf0ffffff) |
                      (ioapic_id << 24));
    }

    printk(BIOS_SPEW, "IOAPIC: Dumping registers\n");
    for (i = 0; i < 3; i++)
        printk(BIOS_SPEW, "  reg 0x%04x: 0x%08x\n", i,
               io_apic_read(ioapic_base, i));

}
예제 #12
0
static void *smp_write_config_table(void *v)
{
    struct mp_config_table *mc;
    int bus_isa;
    int boot_apic_id;
    unsigned apic_version;
    unsigned cpu_features;
    unsigned cpu_feature_flags;
    struct cpuid_result result;
    unsigned long cpu_flag;

    mc = (void *)(((char *)v) + SMP_FLOATING_TABLE_LEN);

    mptable_init(mc, LAPIC_ADDR);
    memcpy(mc->mpc_oem, "AMD     ", 8);

    /*Inagua used dure core cpu with one die */
    boot_apic_id = lapicid();
    apic_version = lapic_read(LAPIC_LVR) & 0xff;
    result = cpuid(1);
    cpu_features = result.eax;
    cpu_feature_flags = result.edx;
    cpu_flag = MPC_CPU_ENABLED | MPC_CPU_BOOTPROCESSOR;
    smp_write_processor(mc,
                        0, apic_version,
                        cpu_flag, cpu_features, cpu_feature_flags
                       );

    cpu_flag = MPC_CPU_ENABLED;
    smp_write_processor(mc,
                        1, apic_version,
                        cpu_flag, cpu_features, cpu_feature_flags
                       );

    get_bus_conf();

    //mptable_write_buses(mc, NULL, &bus_isa);
    my_smp_write_bus(mc, 0, "PCI   ");
    my_smp_write_bus(mc, 1, "PCI   ");
    bus_isa = 0x02;
    my_smp_write_bus(mc, bus_isa, "ISA   ");

    /* I/O APICs:   APIC ID Version State   Address */

    device_t dev;
    u32 dword;
    u8 byte;

    dword = 0;
    dword = pm_ioread(0x34) & 0xF0;
    dword |= (pm_ioread(0x35) & 0xFF) << 8;
    dword |= (pm_ioread(0x36) & 0xFF) << 16;
    dword |= (pm_ioread(0x37) & 0xFF) << 24;
    /* Set IO APIC ID onto IO_APIC_ID */
    write32 (dword, 0x00);
    write32 (dword + 0x10, IO_APIC_ID << 24);
    apicid_sb900 = IO_APIC_ID;
    smp_write_ioapic(mc, apicid_sb900, 0x21, dword);

    /* PIC IRQ routine */
    for (byte = 0x0; byte < sizeof(picr_data); byte ++) {
        outb(byte, 0xC00);
        outb(picr_data[byte], 0xC01);
    }

    /* APIC IRQ routine */
    for (byte = 0x0; byte < sizeof(intr_data); byte ++) {
        outb(byte | 0x80, 0xC00);
        outb(intr_data[byte], 0xC01);
    }

    /* I/O Ints:    Type    Polarity    Trigger     Bus ID   IRQ    APIC ID PIN# */
#define IO_LOCAL_INT(type, intr, apicid, pin) \
  smp_write_lintsrc(mc, (type), MP_IRQ_TRIGGER_EDGE | MP_IRQ_POLARITY_HIGH, bus_isa, (intr), (apicid), (pin));

    //mptable_add_isa_interrupts(mc, bus_isa, apicid_sb900, 0);
    /*I/O Ints:          Type    Trigger             Polarity               Bus ID   IRQ  APIC ID       PIN# */
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0x0, apicid_sb900, 0x0);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0x1, apicid_sb900, 0x1);
    smp_write_intsrc(mc, mp_ExtINT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0x2, apicid_sb900, 0x2);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0x3, apicid_sb900, 0x3);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0x4, apicid_sb900, 0x4);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,  0, 0x49, apicid_sb900, 0x11);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0x6, apicid_sb900, 0x6);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0x7, apicid_sb900, 0x7);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0x8, apicid_sb900, 0x8);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0x9, apicid_sb900, 0x9);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,  bus_isa, 0xa, apicid_sb900, 0xa);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,  bus_isa, 0x1c, apicid_sb900, 0x13);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0xc, apicid_sb900, 0xc);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0xd, apicid_sb900, 0xd);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0xe, apicid_sb900, 0xe);
    smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_EDGE|MP_IRQ_POLARITY_HIGH,  bus_isa, 0xf, apicid_sb900, 0xf);

    /* PCI interrupts are level triggered, and are
     * associated with a specific bus/device/function tuple.
     */
#define PCI_INT(bus, dev, int_sign, pin) \
        smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW, (bus), (((dev)<<2)|(int_sign)), apicid_sb900, (pin))

    /* Internal VGA */
    PCI_INT(0x0, 0x01, 0x0, intr_data[0x02]);
    PCI_INT(0x0, 0x01, 0x1, intr_data[0x03]);

    /* SMBUS */
    PCI_INT(0x0, 0x14, 0x0, 0x10);

    /* HD Audio */
    PCI_INT(0x0, 0x14, 0x0, intr_data[0x13]);

    /* USB */
    PCI_INT(0x0, 0x12, 0x0, intr_data[0x30]);
    PCI_INT(0x0, 0x12, 0x1, intr_data[0x31]);
    PCI_INT(0x0, 0x13, 0x0, intr_data[0x32]);
    PCI_INT(0x0, 0x13, 0x1, intr_data[0x33]);
    PCI_INT(0x0, 0x16, 0x0, intr_data[0x34]);
    PCI_INT(0x0, 0x16, 0x1, intr_data[0x35]);
    PCI_INT(0x0, 0x14, 0x2, intr_data[0x36]);

    /* sata */
    PCI_INT(0x0, 0x11, 0x0, intr_data[0x40]);
    PCI_INT(0x0, 0x11, 0x0, intr_data[0x41]);


    /* on board NIC & Slot PCIE.  */

    /* PCI slots */
    /* PCI_SLOT 0. */
    PCI_INT(bus_sb900[1], 0x5, 0x0, 0x14);
    PCI_INT(bus_sb900[1], 0x5, 0x1, 0x15);
    PCI_INT(bus_sb900[1], 0x5, 0x2, 0x16);
    PCI_INT(bus_sb900[1], 0x5, 0x3, 0x17);

    /* PCI_SLOT 1. */
    PCI_INT(bus_sb900[1], 0x6, 0x0, 0x15);
    PCI_INT(bus_sb900[1], 0x6, 0x1, 0x16);
    PCI_INT(bus_sb900[1], 0x6, 0x2, 0x17);
    PCI_INT(bus_sb900[1], 0x6, 0x3, 0x14);

    /* PCI_SLOT 2. */
    PCI_INT(bus_sb900[1], 0x7, 0x0, 0x16);
    PCI_INT(bus_sb900[1], 0x7, 0x1, 0x17);
    PCI_INT(bus_sb900[1], 0x7, 0x2, 0x14);
    PCI_INT(bus_sb900[1], 0x7, 0x3, 0x15);

    PCI_INT(bus_sb900[2], 0x0, 0x0, 0x12);
    PCI_INT(bus_sb900[2], 0x0, 0x1, 0x13);
    PCI_INT(bus_sb900[2], 0x0, 0x2, 0x14);

    /* PCIe Lan*/
    PCI_INT(0x0, 0x06, 0x0, 0x13);

    /* FCH PCIe PortA */
    PCI_INT(0x0, 0x15, 0x0, 0x10);
    /* FCH PCIe PortB */
    PCI_INT(0x0, 0x15, 0x1, 0x11);
    /* FCH PCIe PortC */
    PCI_INT(0x0, 0x15, 0x2, 0x12);
    /* FCH PCIe PortD */
    PCI_INT(0x0, 0x15, 0x3, 0x13);

    /*Local Ints:   Type    Polarity    Trigger     Bus ID   IRQ    APIC ID PIN# */
    IO_LOCAL_INT(mp_ExtINT, 0, MP_APIC_ALL, 0x0);
    IO_LOCAL_INT(mp_NMI, 0, MP_APIC_ALL, 0x1);
    /* There is no extension information... */

    /* Compute the checksums */
    mc->mpe_checksum =
        smp_compute_checksum(smp_next_mpc_entry(mc), mc->mpe_length);
    mc->mpc_checksum = smp_compute_checksum(mc, mc->mpc_length);
    printk(BIOS_DEBUG, "Wrote the mp table end at: %p - %p\n",
           mc, smp_next_mpe_entry(mc));
    return smp_next_mpe_entry(mc);
}
예제 #13
0
void initialize_cpus(struct bus *cpu_bus)
{
	struct device_path cpu_path;
	struct cpu_info *info;

	/* Find the info struct for this CPU */
	info = cpu_info();

	if (need_lapic_init()) {
		/* Ensure the local APIC is enabled */
		enable_lapic();

		/* Get the device path of the boot CPU */
		cpu_path.type           = DEVICE_PATH_APIC;
		cpu_path.apic.apic_id = lapicid();
	} else {
		/* Get the device path of the boot CPU */
		cpu_path.type = DEVICE_PATH_CPU;
		cpu_path.cpu.id       = 0;
	}

	/* Find the device structure for the boot CPU */
	info->cpu = alloc_find_dev(cpu_bus, &cpu_path);

	// why here? In case some day we can start core1 in amd_sibling_init
	if (is_smp_boot())
		copy_secondary_start_to_lowest_1M();

	if (!IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION))
		smm_init();

	/* start all aps at first, so we can init ECC all together */
	if (is_smp_boot() && IS_ENABLED(CONFIG_PARALLEL_CPU_INIT))
		start_other_cpus(cpu_bus, info->cpu);

	/* Initialize the bootstrap processor */
	cpu_initialize(0);

	if (is_smp_boot() && !IS_ENABLED(CONFIG_PARALLEL_CPU_INIT)) {
		start_other_cpus(cpu_bus, info->cpu);

		/* Now wait the rest of the cpus stop*/
		wait_other_cpus_stop(cpu_bus);
	}

	if (IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION)) {
		/* At this point, all APs are sleeping:
		 * smm_init() will queue a pending SMI on all cpus
		 * and smm_other_cpus() will start them one by one */
		smm_init();

		if (is_smp_boot()) {
			last_cpu_index = 0;
			smm_other_cpus(cpu_bus, info->cpu);
		}
	}

	smm_init_completion();

	if (is_smp_boot())
		recover_lowest_1M();
}
예제 #14
0
파일: smm_init.c 프로젝트: cristim/coreboot
void smm_init(void)
{
	msr_t msr;

	msr = rdmsr(HWCR_MSR);
	if (msr.lo & (1 << 0)) {
		// This sounds like a bug... ? 
		printk(BIOS_DEBUG, "SMM is still locked from last boot, using old handler.\n");
		return;
	}

	/* Only copy SMM handler once, not once per CPU */
	if (!smm_handler_copied) {
		msr_t syscfg_orig, mtrr_aseg_orig;

		smm_handler_copied = 1;

		/* Back up MSRs for later restore */
		syscfg_orig = rdmsr(SYSCFG_MSR);
		mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR);

		/* MTRR changes don't like an enabled cache */
		disable_cache();

		msr = syscfg_orig;
		/* Allow changes to MTRR extended attributes */
		msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
		/* turn the extended attributes off until we fix
		 * them so A0000 is routed to memory
		 */
		msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
		wrmsr(SYSCFG_MSR, msr);

		/* set DRAM access to 0xa0000 */
		/* A0000 is memory */
		msr.lo = 0x18181818;
		msr.hi = 0x18181818;
		wrmsr(MTRRfix16K_A0000_MSR, msr);

		/* enable the extended features */
		msr = syscfg_orig;
		msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
		msr.lo |= SYSCFG_MSR_MtrrFixDramEn;
		wrmsr(SYSCFG_MSR, msr);

		enable_cache();
		/* copy the real SMM handler */
		memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size);
		wbinvd();

		/* Restore MTRR */
		disable_cache();

		/* Restore SYSCFG */
		wrmsr(SYSCFG_MSR, syscfg_orig);

		wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
		enable_cache();
	}


	/* But set SMM base address on all CPUs/cores */
	msr = rdmsr(SMM_BASE_MSR);
	msr.lo = SMM_BASE - (lapicid() * 0x400);
	wrmsr(SMM_BASE_MSR, msr);

	/* enable the SMM memory window */
	msr = rdmsr(SMM_MASK_MSR);
	msr.lo |= (1 << 0); // Enable ASEG SMRAM Range
	wrmsr(SMM_MASK_MSR, msr);

	/* Set SMMLOCK to avoid exploits messing with SMM */
	msr = rdmsr(HWCR_MSR);
	msr.lo |= (1 << 0);
	wrmsr(HWCR_MSR, msr);
}
예제 #15
0
static void model_10xxx_init(device_t dev)
{
	u8 i;
	msr_t msr;
	struct node_core_id id;
#if CONFIG_LOGICAL_CPUS
	u32 siblings;
#endif

	id = get_node_core_id(read_nb_cfg_54());	/* nb_cfg_54 can not be set */
	printk(BIOS_DEBUG, "nodeid = %02d, coreid = %02d\n", id.nodeid, id.coreid);

	/* Turn on caching if we haven't already */
	x86_enable_cache();
	amd_setup_mtrrs();
	x86_mtrr_check();

	disable_cache();

	/* zero the machine check error status registers */
	msr.lo = 0;
	msr.hi = 0;
	for (i = 0; i < 5; i++) {
		wrmsr(MCI_STATUS + (i * 4), msr);
	}

	enable_cache();

	/* Enable the local cpu apics */
	setup_lapic();

	/* Set the processor name string */
	init_processor_name();

#if CONFIG_LOGICAL_CPUS
	siblings = cpuid_ecx(0x80000008) & 0xff;

	if (siblings > 0) {
		msr = rdmsr_amd(CPU_ID_FEATURES_MSR);
		msr.lo |= 1 << 28;
		wrmsr_amd(CPU_ID_FEATURES_MSR, msr);

		msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
		msr.hi |= 1 << (33 - 32);
		wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
	}
	printk(BIOS_DEBUG, "siblings = %02d, ", siblings);
#endif

	/* DisableCf8ExtCfg */
	msr = rdmsr(NB_CFG_MSR);
	msr.hi &= ~(1 << (46 - 32));
	wrmsr(NB_CFG_MSR, msr);

	msr = rdmsr(BU_CFG2_MSR);
	/* Clear ClLinesToNbDis */
	msr.lo &= ~(1 << 15);
	/* Clear bit 35 as per Erratum 343 */
	msr.hi &= ~(1 << (35-32));
	wrmsr(BU_CFG2_MSR, msr);

	if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) {
		printk(BIOS_DEBUG, "Initializing SMM ASeg memory\n");

		/* Set SMM base address for this CPU */
		msr = rdmsr(SMM_BASE_MSR);
		msr.lo = SMM_BASE - (lapicid() * 0x400);
		wrmsr(SMM_BASE_MSR, msr);

		/* Enable the SMM memory window */
		msr = rdmsr(SMM_MASK_MSR);
		msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */
		wrmsr(SMM_MASK_MSR, msr);
	} else {
		printk(BIOS_DEBUG, "Disabling SMM ASeg memory\n");

		/* Set SMM base address for this CPU */
		msr = rdmsr(SMM_BASE_MSR);
		msr.lo = SMM_BASE - (lapicid() * 0x400);
		wrmsr(SMM_BASE_MSR, msr);

		/* Disable the SMM memory window */
		msr.hi = 0x0;
		msr.lo = 0x0;
		wrmsr(SMM_MASK_MSR, msr);
	}

	/* Set SMMLOCK to avoid exploits messing with SMM */
	msr = rdmsr(HWCR_MSR);
	msr.lo |= (1 << 0);
	wrmsr(HWCR_MSR, msr);

}