/* * Take a map of online CPUs and the number of available interrupt vectors * and generate an output cpumask suitable for spreading MSI/MSI-X vectors * so that they are distributed as good as possible around the CPUs. If * more vectors than CPUs are available we'll map one to each CPU, * otherwise we map one to the first sibling of each socket. * * If there are more vectors than CPUs we will still only have one bit * set per CPU, but interrupt code will keep on assigning the vectors from * the start of the bitmap until we run out of vectors. */ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) { struct cpumask *affinity_mask; unsigned int max_vecs = *nr_vecs; if (max_vecs == 1) return NULL; affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL); if (!affinity_mask) { *nr_vecs = 1; return NULL; } if (max_vecs >= num_online_cpus()) { cpumask_copy(affinity_mask, cpu_online_mask); *nr_vecs = num_online_cpus(); } else { unsigned int vecs = 0, cpu; for_each_online_cpu(cpu) { if (cpu == get_first_sibling(cpu)) { cpumask_set_cpu(cpu, affinity_mask); vecs++; } if (--max_vecs == 0) break; } *nr_vecs = vecs; } return affinity_mask; }
int blk_mq_map_queues(struct blk_mq_queue_map *qmap) { unsigned int *map = qmap->mq_map; unsigned int nr_queues = qmap->nr_queues; unsigned int cpu, first_sibling; for_each_possible_cpu(cpu) { /* * First do sequential mapping between CPUs and queues. * In case we still have CPUs to map, and we have some number of * threads per cores then map sibling threads to the same queue for * performace optimizations. */ if (cpu < nr_queues) { map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu); } else { first_sibling = get_first_sibling(cpu); if (first_sibling == cpu) map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu); else map[cpu] = map[first_sibling]; } } return 0; }
int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, const struct cpumask *online_mask) { unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; cpumask_var_t cpus; if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) return 1; cpumask_clear(cpus); nr_cpus = nr_uniq_cpus = 0; for_each_cpu(i, online_mask) { nr_cpus++; first_sibling = get_first_sibling(i); if (!cpumask_test_cpu(first_sibling, cpus)) nr_uniq_cpus++; cpumask_set_cpu(i, cpus); }