Esempio n. 1
0
/*
 * apic_is_clustered_box() -- Check if we can expect good TSC
 *
 * Thus far, the major user of this is IBM's Summit2 series:
 *
 * Clustered boxes may have unsynced TSC problems if they are
 * multi-chassis. Use available data to take a good guess.
 * If in doubt, go HPET.
 */
__cpuinit int apic_is_clustered_box(void)
{
	int i, clusters, zeros;
	unsigned id;
	u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

	for (i = 0; i < NR_CPUS; i++) {
		/* are we being called early in kernel startup? */
		if (bios_cpu_apicid) {
			id = bios_cpu_apicid[i];
		}
		else if (i < nr_cpu_ids) {
			if (cpu_present(i))
				id = per_cpu(x86_bios_cpu_apicid, i);
			else
				continue;
		}
		else
			break;

		if (id != BAD_APICID)
			__set_bit(APIC_CLUSTERID(id), clustermap);
	}

	/* Problem:  Partially populated chassis may not have CPUs in some of
	 * the APIC clusters they have been allocated.  Only present CPUs have
	 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
	 * Since clusters are allocated sequentially, count zeros only if
	 * they are bounded by ones.
	 */
	clusters = 0;
	zeros = 0;
	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
		if (test_bit(i, clustermap)) {
			clusters += 1 + zeros;
			zeros = 0;
		} else
			++zeros;
	}

	/*
	 * If clusters > 2, then should be multi-chassis.
	 * May have to revisit this when multi-core + hyperthreaded CPUs come
	 * out, but AFAIK this will work even for them.
	 */
	return (clusters > 2);
}
Esempio n. 2
0
static int __cpuinit apic_cluster_num(void)
{
    int i, clusters, zeros;
    unsigned id;
    u16 *bios_cpu_apicid;
    DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

    bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
    bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

    for (i = 0; i < nr_cpu_ids; i++) {

        if (bios_cpu_apicid) {
            id = bios_cpu_apicid[i];
        } else if (i < nr_cpu_ids) {
            if (cpu_present(i))
                id = per_cpu(x86_bios_cpu_apicid, i);
            else
                continue;
        } else
            break;

        if (id != BAD_APICID)
            __set_bit(APIC_CLUSTERID(id), clustermap);
    }


    clusters = 0;
    zeros = 0;
    for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
        if (test_bit(i, clustermap)) {
            clusters += 1 + zeros;
            zeros = 0;
        } else
            ++zeros;
    }

    return clusters;
}
Esempio n. 3
0
/*
 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
 */
void __init clustered_apic_check(void)
{
	long i;
	u8 clusters, max_cluster;
	u8 id;
	u8 cluster_cnt[NUM_APIC_CLUSTERS];

	memset(cluster_cnt, 0, sizeof(cluster_cnt));

	for (i = 0; i < NR_CPUS; i++) {
		id = bios_cpu_apicid[i];
		if (id != BAD_APICID)
			cluster_cnt[APIC_CLUSTERID(id)]++;
	}

	clusters = 0;
	max_cluster = 0;
	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
		if (cluster_cnt[i] > 0) {
			++clusters;
			if (cluster_cnt[i] > max_cluster)
				max_cluster = cluster_cnt[i];
		}
	}

	/*
	 * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
	 * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
	 * else physical mode.
	 * (We don't use lowest priority delivery + HW APIC IRQ steering, so
	 * can ignore the clustered logical case and go straight to physical.)
	 */
	if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster)
		genapic = &apic_flat;
	else
		genapic = &apic_cluster;

	printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
}
Esempio n. 4
0
/*
 * oem_force_hpet_timer -- force HPET mode for some boxes.
 *
 * Thus far, the major user of this is IBM's Summit2 series:
 *
 * Clustered boxes may have unsynced TSC problems if they are
 * multi-chassis. Use available data to take a good guess.
 * If in doubt, go HPET.
 */
__cpuinit int oem_force_hpet_timer(void)
{
	int i, clusters, zeros;
	unsigned id;
	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

	for (i = 0; i < NR_CPUS; i++) {
		id = bios_cpu_apicid[i];
		if (id != BAD_APICID)
			__set_bit(APIC_CLUSTERID(id), clustermap);
	}

	/* Problem:  Partially populated chassis may not have CPUs in some of
	 * the APIC clusters they have been allocated.  Only present CPUs have
	 * bios_cpu_apicid entries, thus causing zeroes in the bitmap.  Since
	 * clusters are allocated sequentially, count zeros only if they are
	 * bounded by ones.
	 */
	clusters = 0;
	zeros = 0;
	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
		if (test_bit(i, clustermap)) {
			clusters += 1 + zeros;
			zeros = 0;
		} else
			++zeros;
	}

	/*
	 * If clusters > 2, then should be multi-chassis.  Return 1 for HPET.
	 * Else return 0 to use TSC.
	 * May have to revisit this when multi-core + hyperthreaded CPUs come
	 * out, but AFAIK this will work even for them.
	 */
	return (clusters > 2);
}
Esempio n. 5
0
/*
 * apic_is_clustered_box() -- Check if we can expect good TSC
 *
 * Thus far, the major user of this is IBM's Summit2 series:
 *
 * Clustered boxes may have unsynced TSC problems if they are
 * multi-chassis. Use available data to take a good guess.
 * If in doubt, go HPET.
 */
__cpuinit int apic_is_clustered_box(void)
{
    int i, clusters, zeros;
    unsigned id;
    u16 *bios_cpu_apicid;
    DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

    /*
     * there is not this kind of box with AMD CPU yet.
     * Some AMD box with quadcore cpu and 8 sockets apicid
     * will be [4, 0x23] or [8, 0x27] could be thought to
     * vsmp box still need checking...
     */
    if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
        return 0;

    bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
    bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

    for (i = 0; i < NR_CPUS; i++) {
        /* are we being called early in kernel startup? */
        if (bios_cpu_apicid) {
            id = bios_cpu_apicid[i];
        }
        else if (i < nr_cpu_ids) {
            if (cpu_present(i))
                id = per_cpu(x86_bios_cpu_apicid, i);
            else
                continue;
        }
        else
            break;

        if (id != BAD_APICID)
            __set_bit(APIC_CLUSTERID(id), clustermap);
    }

    /* Problem:  Partially populated chassis may not have CPUs in some of
     * the APIC clusters they have been allocated.  Only present CPUs have
     * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
     * Since clusters are allocated sequentially, count zeros only if
     * they are bounded by ones.
     */
    clusters = 0;
    zeros = 0;
    for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
        if (test_bit(i, clustermap)) {
            clusters += 1 + zeros;
            zeros = 0;
        } else
            ++zeros;
    }

    /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
     * not guaranteed to be synced between boards
     */
    if (is_vsmp_box() && clusters > 1)
        return 1;

    /*
     * If clusters > 2, then should be multi-chassis.
     * May have to revisit this when multi-core + hyperthreaded CPUs come
     * out, but AFAIK this will work even for them.
     */
    return (clusters > 2);
}
Esempio n. 6
0
/*
 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
 */
void __init clustered_apic_check(void)
{
	long i;
	u8 clusters, max_cluster;
	u8 id;
	u8 cluster_cnt[NUM_APIC_CLUSTERS];
	int max_apic = 0;

	/* genapic selection can be forced because of certain quirks.
	 */
	if (genapic_force) {
		genapic = genapic_force;
		goto print;
	}

#if defined(CONFIG_ACPI)
	/*
	 * Some x86_64 machines use physical APIC mode regardless of how many
	 * procs/clusters are present (x86_64 ES7000 is an example).
	 */
	if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
		if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
			genapic = &apic_cluster;
			goto print;
		}
#endif

	memset(cluster_cnt, 0, sizeof(cluster_cnt));
	for (i = 0; i < NR_CPUS; i++) {
		id = bios_cpu_apicid[i];
		if (id == BAD_APICID)
			continue;
		if (id > max_apic)
			max_apic = id;
		cluster_cnt[APIC_CLUSTERID(id)]++;
	}

	/* Don't use clustered mode on AMD platforms. */
 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
		genapic = &apic_physflat;
#ifndef CONFIG_HOTPLUG_CPU
		/* In the CPU hotplug case we cannot use broadcast mode
		   because that opens a race when a CPU is removed.
		   Stay at physflat mode in this case.
		   It is bad to do this unconditionally though. Once
		   we have ACPI platform support for CPU hotplug
		   we should detect hotplug capablity from ACPI tables and
		   only do this when really needed. -AK */
		if (max_apic <= 8)
			genapic = &apic_flat;
#endif
 		goto print;
 	}

	clusters = 0;
	max_cluster = 0;

	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
		if (cluster_cnt[i] > 0) {
			++clusters;
			if (cluster_cnt[i] > max_cluster)
				max_cluster = cluster_cnt[i];
		}
	}

	/*
	 * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
	 * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
	 * else physical mode.
	 * (We don't use lowest priority delivery + HW APIC IRQ steering, so
	 * can ignore the clustered logical case and go straight to physical.)
	 */
	if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster) {
#ifdef CONFIG_HOTPLUG_CPU
		/* Don't use APIC shortcuts in CPU hotplug to avoid races */
		genapic = &apic_physflat;
#else
		genapic = &apic_flat;
#endif
	} else
		genapic = &apic_cluster;

print:
	printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
}