Exemplo n.º 1
0
/*
 * Update the irq_stat for cpus that we are going to interrupt
 * with TLB or cache flushes.  Also handle removing dataplane cpus
 * from the TLB flush set, and setting dataplane_tlb_state instead.
 */
static void hv_flush_update(const struct cpumask *cache_cpumask,
			    struct cpumask *tlb_cpumask,
			    unsigned long tlb_va, unsigned long tlb_length,
			    HV_Remote_ASID *asids, int asidcount)
{
	struct cpumask mask;
	int i, cpu;

	cpumask_clear(&mask);
	if (cache_cpumask)
		cpumask_or(&mask, &mask, cache_cpumask);
	if (tlb_cpumask && tlb_length) {
		cpumask_or(&mask, &mask, tlb_cpumask);
	}

	for (i = 0; i < asidcount; ++i)
		cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);

	/*
	 * Don't bother to update atomically; losing a count
	 * here is not that critical.
	 */
	for_each_cpu(cpu, &mask)
		++per_cpu(irq_stat, cpu).irq_hv_flush_count;
}
Exemplo n.º 2
0
static void od_set_powersave_bias(unsigned int powersave_bias)
{
	struct cpufreq_policy *policy;
	struct dbs_data *dbs_data;
	struct od_dbs_tuners *od_tuners;
	unsigned int cpu;
	cpumask_t done;

	default_powersave_bias = powersave_bias;
	cpumask_clear(&done);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		if (cpumask_test_cpu(cpu, &done))
			continue;

		policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
		if (!policy)
			continue;

		cpumask_or(&done, &done, policy->cpus);

		if (policy->governor != &cpufreq_gov_ondemand)
			continue;

		dbs_data = policy->governor_data;
		od_tuners = dbs_data->tuners;
		od_tuners->powersave_bias = default_powersave_bias;
	}
	put_online_cpus();
}
Exemplo n.º 3
0
void domain_update_node_affinity(struct domain *d)
{
    cpumask_var_t cpumask;
    cpumask_var_t online_affinity;
    const cpumask_t *online;
    nodemask_t nodemask = NODE_MASK_NONE;
    struct vcpu *v;
    unsigned int node;

    if ( !zalloc_cpumask_var(&cpumask) )
        return;
    if ( !alloc_cpumask_var(&online_affinity) )
    {
        free_cpumask_var(cpumask);
        return;
    }

    online = cpupool_online_cpumask(d->cpupool);

    spin_lock(&d->node_affinity_lock);

    for_each_vcpu ( d, v )
    {
        cpumask_and(online_affinity, v->cpu_affinity, online);
        cpumask_or(cpumask, cpumask, online_affinity);
    }
/*
 * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	unsigned int real_len;
	cpumask_t allowed, mask;
	int retval;
	struct task_struct *p;

	real_len = sizeof(mask);
	if (len < real_len)
		return -EINVAL;

	get_online_cpus();
	read_lock(&tasklist_lock);

	retval = -ESRCH;
	p = find_process_by_pid(pid);
	if (!p)
		goto out_unlock;
	retval = security_task_getscheduler(p);
	if (retval)
		goto out_unlock;

	cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
	cpumask_and(&mask, &allowed, cpu_active_mask);

out_unlock:
	read_unlock(&tasklist_lock);
	put_online_cpus();
	if (retval)
		return retval;
	if (copy_to_user(user_mask_ptr, &mask, real_len))
		return -EFAULT;
	return real_len;
}
Exemplo n.º 5
0
/**
 * cpu_rmap_update - update CPU rmap following a change of object affinity
 * @rmap: CPU rmap to update
 * @index: Index of object whose affinity changed
 * @affinity: New CPU affinity of object
 */
int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
		    const struct cpumask *affinity)
{
	cpumask_var_t update_mask;
	unsigned int cpu;

	if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
		return -ENOMEM;

	/* Invalidate distance for all CPUs for which this used to be
	 * the nearest object.  Mark those CPUs for update.
	 */
	for_each_online_cpu(cpu) {
		if (rmap->near[cpu].index == index) {
			rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
			cpumask_set_cpu(cpu, update_mask);
		}
	}

	debug_print_rmap(rmap, "after invalidating old distances");

	/* Set distance to 0 for all CPUs in the new affinity mask.
	 * Mark all CPUs within their NUMA nodes for update.
	 */
	for_each_cpu(cpu, affinity) {
		rmap->near[cpu].index = index;
		rmap->near[cpu].dist = 0;
		cpumask_or(update_mask, update_mask,
			   cpumask_of_node(cpu_to_node(cpu)));
	}
Exemplo n.º 6
0
/*
 * Return a mask of the cpus whose caches currently own these pages.
 * The return value is whether the pages are all coherently cached
 * (i.e. none are immutable, incoherent, or uncached).
 */
static int homecache_mask(struct page *page, int pages,
			  struct cpumask *home_mask)
{
	int i;
	int cached_coherently = 1;
	cpumask_clear(home_mask);
	for (i = 0; i < pages; ++i) {
		int home = page_home(&page[i]);
		if (home == PAGE_HOME_IMMUTABLE ||
		    home == PAGE_HOME_INCOHERENT) {
			cpumask_copy(home_mask, cpu_possible_mask);
			return 0;
		}
#if CHIP_HAS_CBOX_HOME_MAP()
		if (home == PAGE_HOME_HASH) {
			cpumask_or(home_mask, home_mask, &hash_for_home_map);
			continue;
		}
#endif
		if (home == PAGE_HOME_UNCACHED) {
			cached_coherently = 0;
			continue;
		}
		BUG_ON(home < 0 || home >= NR_CPUS);
		cpumask_set_cpu(home, home_mask);
	}
	return cached_coherently;
}
Exemplo n.º 7
0
unsigned int __init dom0_max_vcpus(void)
{
    unsigned int i, max_vcpus, limit;
    nodeid_t node;

    for ( i = 0; i < dom0_nr_pxms; ++i )
        if ( (node = pxm_to_node(dom0_pxms[i])) != NUMA_NO_NODE )
            node_set(node, dom0_nodes);
    nodes_and(dom0_nodes, dom0_nodes, node_online_map);
    if ( nodes_empty(dom0_nodes) )
        dom0_nodes = node_online_map;
    for_each_node_mask ( node, dom0_nodes )
        cpumask_or(&dom0_cpus, &dom0_cpus, &node_to_cpumask(node));
    cpumask_and(&dom0_cpus, &dom0_cpus, cpupool0->cpu_valid);
    if ( cpumask_empty(&dom0_cpus) )
        cpumask_copy(&dom0_cpus, cpupool0->cpu_valid);

    max_vcpus = cpumask_weight(&dom0_cpus);
    if ( opt_dom0_max_vcpus_min > max_vcpus )
        max_vcpus = opt_dom0_max_vcpus_min;
    if ( opt_dom0_max_vcpus_max < max_vcpus )
        max_vcpus = opt_dom0_max_vcpus_max;
    limit = dom0_pvh ? HVM_MAX_VCPUS : MAX_VIRT_CPUS;
    if ( max_vcpus > limit )
        max_vcpus = limit;

    return max_vcpus;
}
Exemplo n.º 8
0
/*
 * Initializes PIC ITE entries PRM 9.5.6.26
 * XLP restricts CPU affinity to 8 groups. Though configurable, they are
 * programmed to have the following patterns.
 * 0 =>	Only 0th cpu on the node
 * 1 => All local threads in node; mask = (0xffffffff) on node
 * 2 => cpu0-15 on node; mask = 0x0000ffff & online_cpu_mask on nodes
 * 3 => cpu15-31 on node; mask = 0xffff0000 & online_cpu_mask on node
 * 4 => All cpus on all nodes; i.e.,
 * mask = (0xffffffff_ffffffff_ffffffff_ffffffff & physical online cpu map)
 * These are programmer defined groups and can be changed as warranted.
 * Added 5 => CPUs 0-11
 * Added 6 => CPUs 0-7
 * Added 7 => CPUs 0-3
 * Actual programmed value will take into consideration cpu_online_mask.
 *
 * There is a major issue that needs addressing when run in multi node mode
 * Number of nodes must be determined and programmed correctly, if a bit in ITE
 * is programmed without physical thread being present, when interrupt is
 * dispatched to that CPU under global scheme, system would hang. Thus this
 * scenario should be avoided. That is why phys_cpu_present_map is used
 *
 * This function simply initializes the xlp_ites entries with proposed
 * CPUmasks.  */
static void xlp_ites_init(void)
{
    u64 bm = 0x1;
    u8 node;
    struct cpumask m;

    cpumask_clear(&m);
    for_each_online_node(node) {
        /* Simply set the static pattern in all */
        bm = 1;
        u32_to_cpumask(&xlp_ites[node][0], bm);
        cpumask_shift_left(&xlp_ites[node][0], &xlp_ites[node][0], NLM_MAX_CPU_PER_NODE * node); /* directs only to cpu0 of node `node` */

        bm = 0xffffffff;
        u32_to_cpumask(&xlp_ites[node][1], bm);
        cpumask_shift_left(&xlp_ites[node][1], &xlp_ites[node][1], NLM_MAX_CPU_PER_NODE * node); /* directs to all cpus of node `node` */
        cpumask_or(&m, &m, &xlp_ites[node][1]);

        bm = 0x0000ffff;
        u32_to_cpumask(&xlp_ites[node][2], bm);
        cpumask_shift_left(&xlp_ites[node][2], &xlp_ites[node][2], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0xffff0000;
        u32_to_cpumask(&xlp_ites[node][3], bm);
        cpumask_shift_left(&xlp_ites[node][3], &xlp_ites[node][3], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0x000000ff;
        u32_to_cpumask(&xlp_ites[node][5], bm);
        cpumask_shift_left(&xlp_ites[node][5], &xlp_ites[node][5], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0x000000f0;
        u32_to_cpumask(&xlp_ites[node][6], bm);
        cpumask_shift_left(&xlp_ites[node][6], &xlp_ites[node][6], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0x0000000f;
        u32_to_cpumask(&xlp_ites[node][7], bm);
        cpumask_shift_left(&xlp_ites[node][7], &xlp_ites[node][7], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

    }
    for_each_online_node(node) {
        cpumask_copy(&xlp_ites[node][4], &m);
    }
//	dump_all_ites();
}
static int mem_rx_setup(struct link_device *ld)
{
	struct mem_link_device *mld = to_mem_link_device(ld);

	if (!zalloc_cpumask_var(&mld->dmask, GFP_KERNEL))
		return -ENOMEM;
	if (!zalloc_cpumask_var(&mld->imask, GFP_KERNEL))
		return -ENOMEM;
	if (!zalloc_cpumask_var(&mld->tmask, GFP_KERNEL))
		return -ENOMEM;

#ifdef CONFIG_ARGOS
	/* Below hard-coded mask values should be removed later on.
	 * Like net-sysfs, argos module also should support sysfs knob,
	 * so that user layer must be able to control these cpu mask. */
#ifdef CONFIG_SCHED_HMP
	cpumask_copy(mld->dmask, &hmp_slow_cpu_mask);
#endif

	cpumask_or(mld->imask, mld->imask, cpumask_of(3));

	argos_irq_affinity_setup_label(217, "IPC", mld->imask, mld->dmask);
#endif

	ld->tx_wq = create_singlethread_workqueue("mem_tx_work");
	if (!ld->tx_wq) {
		mif_err("%s: ERR! fail to create tx_wq\n", ld->name);
		return -ENOMEM;
	}

	ld->rx_wq = alloc_workqueue(
			"mem_rx_work", WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
	if (!ld->rx_wq) {
		mif_err("%s: ERR! fail to create rx_wq\n", ld->name);
		return -ENOMEM;
	}

	INIT_DELAYED_WORK(&ld->rx_delayed_work, link_to_demux_work);

	return 0;
}
Exemplo n.º 10
0
static void round_robin_cpu(unsigned int tsk_index)
{
	struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
	cpumask_var_t tmp;
	int cpu;
	unsigned long min_weight = -1;
	unsigned long uninitialized_var(preferred_cpu);

	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
		return;

	mutex_lock(&round_robin_lock);
	cpumask_clear(tmp);
	for_each_cpu(cpu, pad_busy_cpus)
		cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
	cpumask_andnot(tmp, cpu_online_mask, tmp);
	/* avoid HT sibilings if possible */
	if (cpumask_empty(tmp))
		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
	if (cpumask_empty(tmp)) {
		mutex_unlock(&round_robin_lock);
		return;
	}
	for_each_cpu(cpu, tmp) {
		if (cpu_weight[cpu] < min_weight) {
			min_weight = cpu_weight[cpu];
			preferred_cpu = cpu;
		}
	}

	if (tsk_in_cpu[tsk_index] != -1)
		cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
	tsk_in_cpu[tsk_index] = preferred_cpu;
	cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
	cpu_weight[preferred_cpu]++;
	mutex_unlock(&round_robin_lock);

	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
}
Exemplo n.º 11
0
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
{
	cpumask_t mask;
	int cpu;
	struct irq_cfg *cfg = &irq_cfg[irq];

	BUG_ON((unsigned)irq >= NR_IRQS);
	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);

	cpumask_and(&mask, &domain, cpu_online_mask);
	if (cpumask_empty(&mask))
		return -EINVAL;
	if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
		return 0;
	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
		return -EBUSY;
	for_each_cpu(cpu, &mask)
		per_cpu(vector_irq, cpu)[vector] = irq;
	cfg->vector = vector;
	cfg->domain = domain;
	irq_status[irq] = IRQ_USED;
	cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
	return 0;
}
Exemplo n.º 12
0
/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
	ktime_t now, next_event;
	int cpu, next_cpu = 0;
	bool bc_local;

	raw_spin_lock(&tick_broadcast_lock);
	dev->next_event = KTIME_MAX;
	next_event = KTIME_MAX;
	cpumask_clear(tmpmask);
	now = ktime_get();
	/* Find all expired events */
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
		/*
		 * Required for !SMP because for_each_cpu() reports
		 * unconditionally CPU0 as set on UP kernels.
		 */
		if (!IS_ENABLED(CONFIG_SMP) &&
		    cpumask_empty(tick_broadcast_oneshot_mask))
			break;

		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev->next_event <= now) {
			cpumask_set_cpu(cpu, tmpmask);
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
		} else if (td->evtdev->next_event < next_event) {
			next_event = td->evtdev->next_event;
			next_cpu = cpu;
		}
	}

	/*
	 * Remove the current cpu from the pending mask. The event is
	 * delivered immediately in tick_do_broadcast() !
	 */
	cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);

	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

	/*
	 * Sanity check. Catch the case where we try to broadcast to
	 * offline cpus.
	 */
	if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
		cpumask_and(tmpmask, tmpmask, cpu_online_mask);

	/*
	 * Wakeup the cpus which have an expired event.
	 */
	bc_local = tick_do_broadcast(tmpmask);

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
	 */
	if (next_event != KTIME_MAX)
		tick_broadcast_set_event(dev, next_cpu, next_event);

	raw_spin_unlock(&tick_broadcast_lock);

	if (bc_local) {
		td = this_cpu_ptr(&tick_cpu_device);
		td->evtdev->event_handler(td->evtdev);
	}
}
Exemplo n.º 13
0
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
				  struct cpumask *groupmask)
{
	struct sched_group *group = sd->groups;

	cpumask_clear(groupmask);

	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);

	if (!(sd->flags & SD_LOAD_BALANCE)) {
		printk("does not load-balance\n");
		if (sd->parent)
			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
		return -1;
	}

	printk(KERN_CONT "span=%*pbl level=%s\n",
	       cpumask_pr_args(sched_domain_span(sd)), sd->name);

	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
	}
	if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
	}

	printk(KERN_DEBUG "%*s groups:", level + 1, "");
	do {
		if (!group) {
			printk("\n");
			printk(KERN_ERR "ERROR: group is NULL\n");
			break;
		}

		if (!cpumask_weight(sched_group_span(group))) {
			printk(KERN_CONT "\n");
			printk(KERN_ERR "ERROR: empty group\n");
			break;
		}

		if (!(sd->flags & SD_OVERLAP) &&
		    cpumask_intersects(groupmask, sched_group_span(group))) {
			printk(KERN_CONT "\n");
			printk(KERN_ERR "ERROR: repeated CPUs\n");
			break;
		}

		cpumask_or(groupmask, groupmask, sched_group_span(group));

		printk(KERN_CONT " %d:{ span=%*pbl",
				group->sgc->id,
				cpumask_pr_args(sched_group_span(group)));

		if ((sd->flags & SD_OVERLAP) &&
		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
			printk(KERN_CONT " mask=%*pbl",
				cpumask_pr_args(group_balance_mask(group)));
		}

		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
			printk(KERN_CONT " cap=%lu", group->sgc->capacity);

		if (group == sd->groups && sd->child &&
		    !cpumask_equal(sched_domain_span(sd->child),
				   sched_group_span(group))) {
			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
		}

		printk(KERN_CONT " }");

		group = group->next;

		if (group != sd->groups)
			printk(KERN_CONT ",");

	} while (group != sd->groups);
	printk(KERN_CONT "\n");

	if (!cpumask_equal(sched_domain_span(sd), groupmask))
		printk(KERN_ERR "ERROR: groups don't span domain->span\n");

	if (sd->parent &&
	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
		printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
	return 0;
}