void __cpuinit set_cpu_sibling_map(int cpu)
{
    bool has_smt = smp_num_siblings > 1;
    bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
    struct cpuinfo_x86 *c = &cpu_data(cpu);
    struct cpuinfo_x86 *o;
    int i;

    cpumask_set_cpu(cpu, cpu_sibling_setup_mask);

    if (!has_mp) {
        cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
        cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
        cpumask_set_cpu(cpu, cpu_core_mask(cpu));
        c->booted_cores = 1;
        return;
    }

    for_each_cpu(i, cpu_sibling_setup_mask) {
        o = &cpu_data(i);

        if ((i == cpu) || (has_smt && match_smt(c, o)))
            link_mask(sibling, cpu, i);

        if ((i == cpu) || (has_mp && match_llc(c, o)))
            link_mask(llc_shared, cpu, i);

    }
Beispiel #2
0
void __cpuinit set_cpu_sibling_map(int cpu)
{
	bool has_mc = boot_cpu_data.x86_max_cores > 1;
	bool has_smt = smp_num_siblings > 1;
	struct cpuinfo_x86 *c = &cpu_data(cpu);
	struct cpuinfo_x86 *o;
	int i;

	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);

	if (!has_smt && !has_mc) {
		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
		cpumask_set_cpu(cpu, cpu_core_mask(cpu));
		c->booted_cores = 1;
		return;
	}

	for_each_cpu(i, cpu_sibling_setup_mask) {
		o = &cpu_data(i);

		if ((i == cpu) || (has_smt && match_smt(c, o)))
			link_mask(sibling, cpu, i);

		if ((i == cpu) || (has_mc && match_llc(c, o)))
			link_mask(llc_shared, cpu, i);

		if ((i == cpu) || (has_mc && match_mc(c, o))) {
			link_mask(core, cpu, i);

			/*
			 *  Does this new cpu bringup a new core?
			 */
			if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
				/*
				 * for each core in package, increment
				 * the booted_cores for this new cpu
				 */
				if (cpumask_first(cpu_sibling_mask(i)) == i)
					c->booted_cores++;
				/*
				 * increment the core count for all
				 * the other cpus in this package
				 */
				if (i != cpu)
					cpu_data(i).booted_cores++;
			} else if (i != cpu && !c->booted_cores)
				c->booted_cores = cpu_data(i).booted_cores;
		}
	}
    /*
     * This needs a separate iteration over the cpus because we rely on all
     * cpu_sibling_mask links to be set-up.
     */
    for_each_cpu(i, cpu_sibling_setup_mask) {
        o = &cpu_data(i);

        if ((i == cpu) || (has_mp && match_mc(c, o))) {
            link_mask(core, cpu, i);

            /*
             *  Does this new cpu bringup a new core?
             */
            if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
                /*
                 * for each core in package, increment
                 * the booted_cores for this new cpu
                 */
                if (cpumask_first(cpu_sibling_mask(i)) == i)
                    c->booted_cores++;
                /*
                 * increment the core count for all
                 * the other cpus in this package
                 */
                if (i != cpu)
                    cpu_data(i).booted_cores++;
            } else if (i != cpu && !c->booted_cores)
                c->booted_cores = cpu_data(i).booted_cores;
        }
    }
	/*
	 * This needs a separate iteration over the cpus because we rely on all
	 * topology_sibling_cpumask links to be set-up.
	 */
	for_each_cpu(i, cpu_sibling_setup_mask) {
		o = &cpu_data(i);

		if ((i == cpu) || (has_mp && match_die(c, o))) {
			link_mask(topology_core_cpumask, cpu, i);

			/*
			 *  Does this new cpu bringup a new core?
			 */
			if (cpumask_weight(
			    topology_sibling_cpumask(cpu)) == 1) {
				/*
				 * for each core in package, increment
				 * the booted_cores for this new cpu
				 */
				if (cpumask_first(
				    topology_sibling_cpumask(i)) == i)
					c->booted_cores++;
				/*
				 * increment the core count for all
				 * the other cpus in this package
				 */
				if (i != cpu)
					cpu_data(i).booted_cores++;
			} else if (i != cpu && !c->booted_cores)
				c->booted_cores = cpu_data(i).booted_cores;
		}
		if (match_die(c, o) && !topology_same_node(c, o))
			primarily_use_numa_for_topology();
	}