Пример #1
0
static void amd_pmu_cpu_starting(int cpu)
{
    struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
    struct amd_nb *nb;
    int i, nb_id;

    if (boot_cpu_data.x86_max_cores < 2)
        return;

    nb_id = amd_get_nb_id(cpu);
    WARN_ON_ONCE(nb_id == BAD_APICID);

    raw_spin_lock(&amd_nb_lock);

    for_each_online_cpu(i) {
        nb = per_cpu(cpu_hw_events, i).amd_nb;
        if (WARN_ON_ONCE(!nb))
            continue;

        if (nb->nb_id == nb_id) {
            kfree(cpuc->amd_nb);
            cpuc->amd_nb = nb;
            break;
        }
    }

    cpuc->amd_nb->nb_id = nb_id;
    cpuc->amd_nb->refcnt++;

    raw_spin_unlock(&amd_nb_lock);
}
static void amd_pmu_cpu_starting(int cpu)
{
	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
	struct amd_nb *nb;
	int i, nb_id;

	cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;

	if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
		return;

	nb_id = amd_get_nb_id(cpu);
	WARN_ON_ONCE(nb_id == BAD_APICID);

	for_each_online_cpu(i) {
		nb = per_cpu(cpu_hw_events, i).amd_nb;
		if (WARN_ON_ONCE(!nb))
			continue;

		if (nb->nb_id == nb_id) {
			cpuc->kfree_on_online = cpuc->amd_nb;
			cpuc->amd_nb = nb;
			break;
		}
	}

	cpuc->amd_nb->nb_id = nb_id;
	cpuc->amd_nb->refcnt++;
}
Пример #3
0
static void decode_mc4_mce(struct mce *m)
{
	struct cpuinfo_x86 *c = &boot_cpu_data;
	int node_id = amd_get_nb_id(m->extcpu);
	u16 ec = EC(m->status);
	u8 xec = XEC(m->status, 0x1f);
	u8 offset = 0;

	pr_emerg(HW_ERR "MC4 Error (node %d): ", node_id);

	switch (xec) {
	case 0x0 ... 0xe:

		/* special handling for DRAM ECCs */
		if (xec == 0x0 || xec == 0x8) {
			/* no ECCs on F11h */
			if (c->x86 == 0x11)
				goto wrong_mc4_mce;

			pr_cont("%s.\n", mc4_mce_desc[xec]);

			if (nb_bus_decoder)
				nb_bus_decoder(node_id, m);
			return;
		}
		break;

	case 0xf:
		if (TLB_ERROR(ec))
			pr_cont("GART Table Walk data error.\n");
		else if (BUS_ERROR(ec))
			pr_cont("DMA Exclusion Vector Table Walk error.\n");
		else
			goto wrong_mc4_mce;
		return;

	case 0x19:
		if (boot_cpu_data.x86 == 0x15)
			pr_cont("Compute Unit Data Error.\n");
		else
			goto wrong_mc4_mce;
		return;

	case 0x1c ... 0x1f:
		offset = 13;
		break;

	default:
		goto wrong_mc4_mce;
	}

	pr_cont("%s.\n", mc4_mce_desc[xec - offset]);
	return;

 wrong_mc4_mce:
	pr_emerg(HW_ERR "Corrupted MC4 MCE info?\n");
}
Пример #4
0
int amd_get_subcaches(int cpu)
{
	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
	unsigned int mask;
	int cuid = 0;

	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
		return 0;

	pci_read_config_dword(link, 0x1d4, &mask);

#ifdef CONFIG_SMP
	cuid = cpu_data(cpu).compute_unit_id;
#endif
	return (mask >> (4 * cuid)) & 0xf;
}
Пример #5
0
void amd_decode_nb_mce(struct mce *m)
{
	struct cpuinfo_x86 *c = &boot_cpu_data;
	int node_id = amd_get_nb_id(m->extcpu);
	u16 ec = EC(m->status);
	u8 xec = XEC(m->status, 0x1f);

	pr_emerg(HW_ERR "Northbridge Error (node %d): ", node_id);

	switch (xec) {
	case 0x2:
		pr_cont("Sync error (sync packets on HT link detected).\n");
		return;

	case 0x3:
		pr_cont("HT Master abort.\n");
		return;

	case 0x4:
		pr_cont("HT Target abort.\n");
		return;

	case 0x7:
		pr_cont("NB Watchdog timeout.\n");
		return;

	case 0x9:
		pr_cont("SVM DMA Exclusion Vector error.\n");
		return;

	default:
		break;
	}

	if (!fam_ops->nb_mce(ec, xec))
		goto wrong_nb_mce;

	if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15)
		if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
			nb_bus_decoder(node_id, m);

	return;

wrong_nb_mce:
	pr_emerg(HW_ERR "Corrupted NB MCE info?\n");
}
Пример #6
0
static void amd_pmu_cpu_online(int cpu)
{
	struct cpu_hw_events *cpu1, *cpu2;
	struct amd_nb *nb = NULL;
	int i, nb_id;

	if (boot_cpu_data.x86_max_cores < 2)
		return;

	/*
	 * function may be called too early in the
	 * boot process, in which case nb_id is bogus
	 */
	nb_id = amd_get_nb_id(cpu);
	if (nb_id == BAD_APICID)
		return;

	cpu1 = &per_cpu(cpu_hw_events, cpu);
	cpu1->amd_nb = NULL;

	raw_spin_lock(&amd_nb_lock);

	for_each_online_cpu(i) {
		cpu2 = &per_cpu(cpu_hw_events, i);
		nb = cpu2->amd_nb;
		if (!nb)
			continue;
		if (nb->nb_id == nb_id)
			goto found;
	}

	nb = amd_alloc_nb(cpu, nb_id);
	if (!nb) {
		pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
		raw_spin_unlock(&amd_nb_lock);
		return;
	}
found:
	nb->refcnt++;
	cpu1->amd_nb = nb;

	raw_spin_unlock(&amd_nb_lock);
}
Пример #7
0
int amd_set_subcaches(int cpu, int mask)
{
	static unsigned int reset, ban;
	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
	unsigned int reg;
	int cuid = 0;

	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
		return -EINVAL;

	/* if necessary, collect reset state of L3 partitioning and BAN mode */
	if (reset == 0) {
		pci_read_config_dword(nb->link, 0x1d4, &reset);
		pci_read_config_dword(nb->misc, 0x1b8, &ban);
		ban &= 0x180000;
	}

	/* deactivate BAN mode if any subcaches are to be disabled */
	if (mask != 0xf) {
		pci_read_config_dword(nb->misc, 0x1b8, &reg);
		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
	}

#ifdef CONFIG_SMP
	cuid = cpu_data(cpu).compute_unit_id;
#endif
	mask <<= 4 * cuid;
	mask |= (0xf ^ (1 << cuid)) << 26;

	pci_write_config_dword(nb->link, 0x1d4, mask);

	/* reset BAN mode if L3 partitioning returned to reset state */
	pci_read_config_dword(nb->link, 0x1d4, &reg);
	if (reg == reset) {
		pci_read_config_dword(nb->misc, 0x1b8, &reg);
		reg &= ~0x180000;
		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
	}

	return 0;
}