示例#1
0
/**
 *	irq_set_affinity - Set the irq affinity of a given irq
 *	@irq:		Interrupt to set affinity
 *	@cpumask:	cpumask
 *
 */
int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irq_chip *chip = desc->irq_data.chip;
	unsigned long flags;

	if (!chip->irq_set_affinity)
		return -EINVAL;

	raw_spin_lock_irqsave(&desc->lock, flags);

#ifdef CONFIG_GENERIC_PENDING_IRQ
	if (desc->status & IRQ_MOVE_PCNTXT) {
		if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
			cpumask_copy(desc->irq_data.affinity, cpumask);
			irq_set_thread_affinity(desc);
		}
	}
	else {
		desc->status |= IRQ_MOVE_PENDING;
		cpumask_copy(desc->pending_mask, cpumask);
	}
#else
	if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
		cpumask_copy(desc->irq_data.affinity, cpumask);
		irq_set_thread_affinity(desc);
	}
#endif
	desc->status |= IRQ_AFFINITY_SET;
	raw_spin_unlock_irqrestore(&desc->lock, flags);
	return 0;
}
示例#2
0
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
{
	struct irq_chip *chip = irq_data_get_irq_chip(data);
	struct irq_desc *desc = irq_data_to_desc(data);
	int ret = 0;

	if (!chip || !chip->irq_set_affinity)
		return -EINVAL;

	if (irq_can_move_pcntxt(data)) {
		ret = chip->irq_set_affinity(data, mask, false);
		switch (ret) {
		case IRQ_SET_MASK_OK:
			cpumask_copy(data->affinity, mask);
		case IRQ_SET_MASK_OK_NOCOPY:
			irq_set_thread_affinity(desc);
			ret = 0;
		}
	} else {
		irqd_set_move_pending(data);
		irq_copy_pending(desc, mask);
	}

	if (desc->affinity_notify) {
		kref_get(&desc->affinity_notify->kref);
		schedule_work(&desc->affinity_notify->work);
	}
	irqd_set(data, IRQD_AFFINITY_SET);

	return ret;
}
示例#3
0
/*
 * Generic version of the affinity autoselector.
 */
static int
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct cpumask *set = irq_default_affinity;
	int ret;

	/* Excludes PER_CPU and NO_BALANCE interrupts */
	if (!irq_can_set_affinity(irq))
		return 0;

	/*
	 * Preserve an userspace affinity setup, but make sure that
	 * one of the targets is online.
	 */
	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
		if (cpumask_intersects(desc->irq_data.affinity,
				       cpu_online_mask))
			set = desc->irq_data.affinity;
		else
			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
	}

	cpumask_and(mask, cpu_online_mask, set);
	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
	switch (ret) {
	case IRQ_SET_MASK_OK:
		cpumask_copy(desc->irq_data.affinity, mask);
	case IRQ_SET_MASK_OK_NOCOPY:
		irq_set_thread_affinity(desc);
	}
	return 0;
}
示例#4
0
/**
 *	irq_set_affinity - Set the irq affinity of a given irq
 *	@irq:		Interrupt to set affinity
 *	@cpumask:	cpumask
 *
 */
int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
{
	struct irq_desc *desc = irq_to_desc(irq);
	unsigned long flags;

	if (!desc->chip->set_affinity)
		return -EINVAL;

	spin_lock_irqsave(&desc->lock, flags);

#ifdef CONFIG_GENERIC_PENDING_IRQ
	if (desc->status & IRQ_MOVE_PCNTXT)
		desc->chip->set_affinity(irq, cpumask);
	else {
		desc->status |= IRQ_MOVE_PENDING;
		cpumask_copy(desc->pending_mask, cpumask);
	}
#else
	cpumask_copy(desc->affinity, cpumask);
	desc->chip->set_affinity(irq, cpumask);
#endif
	irq_set_thread_affinity(desc, cpumask);
	desc->status |= IRQ_AFFINITY_SET;
	spin_unlock_irqrestore(&desc->lock, flags);
	return 0;
}
示例#5
0
void irq_move_masked_irq(struct irq_data *idata)
{
	struct irq_desc *desc = irq_data_to_desc(idata);
	struct irq_chip *chip = idata->chip;

	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
		return;

	/*
	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
	 */
	if (!irqd_can_balance(&desc->irq_data)) {
		WARN_ON(1);
		return;
	}

	irqd_clr_move_pending(&desc->irq_data);

	if (unlikely(cpumask_empty(desc->pending_mask)))
		return;

	if (!chip->irq_set_affinity)
		return;

	assert_raw_spin_locked(&desc->lock);

	/*
	 * If there was a valid mask to work with, please
	 * do the disable, re-program, enable sequence.
	 * This is *not* particularly important for level triggered
	 * but in a edge trigger case, we might be setting rte
	 * when an active trigger is coming in. This could
	 * cause some ioapics to mal-function.
	 * Being paranoid i guess!
	 *
	 * For correct operation this depends on the caller
	 * masking the irqs.
	 */
	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
		   < nr_cpu_ids)) {
		int ret = chip->irq_set_affinity(&desc->irq_data,
						 desc->pending_mask, false);
		switch (ret) {
		case IRQ_SET_MASK_OK:
			cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
		case IRQ_SET_MASK_OK_NOCOPY:
			irq_set_thread_affinity(desc);
		}
	}

	cpumask_clear(desc->pending_mask);
}
示例#6
0
/*
 * Called when affinity is set via /proc/irq
 */
int irq_select_affinity_usr(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	unsigned long flags;
	int ret;

	raw_spin_lock_irqsave(&desc->lock, flags);
	ret = setup_affinity(irq, desc);
	if (!ret)
		irq_set_thread_affinity(desc);
	raw_spin_unlock_irqrestore(&desc->lock, flags);

	return ret;
}
示例#7
0
void move_masked_irq(int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
		return;

	/*
	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
	 */
	if (CHECK_IRQ_PER_CPU(desc->status)) {
		WARN_ON(1);
		return;
	}

	desc->status &= ~IRQ_MOVE_PENDING;

	if (unlikely(cpumask_empty(desc->pending_mask)))
		return;

	if (!desc->chip->set_affinity)
		return;

	assert_spin_locked(&desc->lock);

	/*
	 * If there was a valid mask to work with, please
	 * do the disable, re-program, enable sequence.
	 * This is *not* particularly important for level triggered
	 * but in a edge trigger case, we might be setting rte
	 * when an active trigger is comming in. This could
	 * cause some ioapics to mal-function.
	 * Being paranoid i guess!
	 *
	 * For correct operation this depends on the caller
	 * masking the irqs.
	 */
	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
		   < nr_cpu_ids)) {
		int ret = chip->irq_set_affinity(irq, desc->pending_mask);
		switch (ret) {
		case IRQ_SET_MASK_OK:
			cpumask_copy(desc->affinity, desc->pending_mask);
		case IRQ_SET_MASK_OK_NOCOPY:
			irq_set_thread_affinity(desc);
		}
	}

	cpumask_clear(desc->pending_mask);
}
示例#8
0
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
			bool force)
{
	struct irq_desc *desc = irq_data_to_desc(data);
	struct irq_chip *chip = irq_data_get_irq_chip(data);
	int ret;

	ret = chip->irq_set_affinity(data, mask, force);
	switch (ret) {
	case IRQ_SET_MASK_OK:
		cpumask_copy(data->affinity, mask);
	case IRQ_SET_MASK_OK_NOCOPY:
		irq_set_thread_affinity(desc);
		ret = 0;
	}

	return ret;
}
示例#9
0
static int
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct cpumask *set = irq_default_affinity;
	int ret, node = desc->irq_data.node;

	
	if (!irq_can_set_affinity(irq))
		return 0;

	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
		if (cpumask_intersects(desc->irq_data.affinity,
				       cpu_online_mask))
			set = desc->irq_data.affinity;
		else
			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
	}

	cpumask_and(mask, cpu_online_mask, set);
	if (node != NUMA_NO_NODE) {
		const struct cpumask *nodemask = cpumask_of_node(node);

		
		if (cpumask_intersects(mask, nodemask))
			cpumask_and(mask, mask, nodemask);
	}
	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
	switch (ret) {
	case IRQ_SET_MASK_OK:
		cpumask_copy(desc->irq_data.affinity, mask);
	case IRQ_SET_MASK_OK_NOCOPY:
		irq_set_thread_affinity(desc);
	}
	return 0;
}