コード例 #1
0
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
{
	struct irq_chip *chip = irq_data_get_irq_chip(data);
	struct irq_desc *desc = irq_data_to_desc(data);
	int ret = 0;

	if (!chip || !chip->irq_set_affinity)
		return -EINVAL;

	if (irq_can_move_pcntxt(data)) {
		ret = chip->irq_set_affinity(data, mask, false);
		switch (ret) {
		case IRQ_SET_MASK_OK:
			cpumask_copy(data->affinity, mask);
		case IRQ_SET_MASK_OK_NOCOPY:
			irq_set_thread_affinity(desc);
			ret = 0;
		}
	} else {
		irqd_set_move_pending(data);
		irq_copy_pending(desc, mask);
	}

	if (desc->affinity_notify) {
		kref_get(&desc->affinity_notify->kref);
		schedule_work(&desc->affinity_notify->work);
	}
	irqd_set(data, IRQD_AFFINITY_SET);

	return ret;
}
コード例 #2
0
ファイル: dummychip.c プロジェクト: 19Dan01/linux
/*
 * What should we do if we get a hw irq event on an illegal vector?
 * Each architecture has to answer this themself.
 */
static void ack_bad(struct irq_data *data)
{
	struct irq_desc *desc = irq_data_to_desc(data);

	print_irq_desc(data->irq, desc);
	ack_bad_irq(data->irq);
}
コード例 #3
0
ファイル: manage.c プロジェクト: AICP/kernel_moto_shamu
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
			    bool force)
{
	struct irq_chip *chip = irq_data_get_irq_chip(data);
	struct irq_desc *desc = irq_data_to_desc(data);
	int ret = 0;

	if (!chip || !chip->irq_set_affinity)
		return -EINVAL;

	if (irq_can_move_pcntxt(data)) {
		ret = irq_do_set_affinity(data, mask, force);
	} else {
		irqd_set_move_pending(data);
		irq_copy_pending(desc, mask);
	}

	if (desc->affinity_notify) {
		kref_get(&desc->affinity_notify->kref);
		schedule_work(&desc->affinity_notify->work);
	}
	irqd_set(data, IRQD_AFFINITY_SET);

	return ret;
}
コード例 #4
0
ファイル: chip.c プロジェクト: ANFS/ANFS-kernel
/*
 * default shutdown function
 */
static void default_shutdown(struct irq_data *data)
{
	struct irq_desc *desc = irq_data_to_desc(data);

	desc->irq_data.chip->irq_mask(&desc->irq_data);
	desc->status |= IRQ_MASKED;
}
コード例 #5
0
ファイル: chip.c プロジェクト: ANFS/ANFS-kernel
/*
 * default startup function
 */
static unsigned int default_startup(struct irq_data *data)
{
	struct irq_desc *desc = irq_data_to_desc(data);

	desc->irq_data.chip->irq_enable(data);
	return 0;
}
コード例 #6
0
ファイル: chip.c プロジェクト: ANFS/ANFS-kernel
/*
 * default enable function
 */
static void default_enable(struct irq_data *data)
{
	struct irq_desc *desc = irq_data_to_desc(data);

	desc->irq_data.chip->irq_unmask(&desc->irq_data);
	desc->status &= ~IRQ_MASKED;
}
コード例 #7
0
ファイル: migration.c プロジェクト: 020gzh/linux
void irq_move_irq(struct irq_data *idata)
{
	bool masked;

	/*
	 * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
	 * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
	 * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
	 */
	idata = irq_desc_get_irq_data(irq_data_to_desc(idata));

	if (likely(!irqd_is_setaffinity_pending(idata)))
		return;

	if (unlikely(irqd_irq_disabled(idata)))
		return;

	/*
	 * Be careful vs. already masked interrupts. If this is a
	 * threaded interrupt with ONESHOT set, we can end up with an
	 * interrupt storm.
	 */
	masked = irqd_irq_masked(idata);
	if (!masked)
		idata->chip->irq_mask(idata);
	irq_move_masked_irq(idata);
	if (!masked)
		idata->chip->irq_unmask(idata);
}
コード例 #8
0
void irq_move_masked_irq(struct irq_data *idata)
{
	struct irq_desc *desc = irq_data_to_desc(idata);
	struct irq_chip *chip = idata->chip;

	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
		return;

	/*
	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
	 */
	if (!irqd_can_balance(&desc->irq_data)) {
		WARN_ON(1);
		return;
	}

	irqd_clr_move_pending(&desc->irq_data);

	if (unlikely(cpumask_empty(desc->pending_mask)))
		return;

	if (!chip->irq_set_affinity)
		return;

	assert_raw_spin_locked(&desc->lock);

	/*
	 * If there was a valid mask to work with, please
	 * do the disable, re-program, enable sequence.
	 * This is *not* particularly important for level triggered
	 * but in a edge trigger case, we might be setting rte
	 * when an active trigger is coming in. This could
	 * cause some ioapics to mal-function.
	 * Being paranoid i guess!
	 *
	 * For correct operation this depends on the caller
	 * masking the irqs.
	 */
	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
		   < nr_cpu_ids)) {
		int ret = chip->irq_set_affinity(&desc->irq_data,
						 desc->pending_mask, false);
		switch (ret) {
		case IRQ_SET_MASK_OK:
			cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
		case IRQ_SET_MASK_OK_NOCOPY:
			irq_set_thread_affinity(desc);
		}
	}

	cpumask_clear(desc->pending_mask);
}
コード例 #9
0
ファイル: hyperv-iommu.c プロジェクト: Anjali05/linux
static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
				     unsigned int virq, unsigned int nr_irqs,
				     void *arg)
{
	struct irq_alloc_info *info = arg;
	struct irq_data *irq_data;
	struct irq_desc *desc;
	int ret = 0;

	if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
		return -EINVAL;

	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
	if (ret < 0)
		return ret;

	irq_data = irq_domain_get_irq_data(domain, virq);
	if (!irq_data) {
		irq_domain_free_irqs_common(domain, virq, nr_irqs);
		return -EINVAL;
	}

	irq_data->chip = &hyperv_ir_chip;

	/*
	 * If there is interrupt remapping function of IOMMU, setting irq
	 * affinity only needs to change IRTE of IOMMU. But Hyper-V doesn't
	 * support interrupt remapping function, setting irq affinity of IO-APIC
	 * interrupts still needs to change IO-APIC registers. But ioapic_
	 * configure_entry() will ignore value of cfg->vector and cfg->
	 * dest_apicid when IO-APIC's parent irq domain is not the vector
	 * domain.(See ioapic_configure_entry()) In order to setting vector
	 * and dest_apicid to IO-APIC register, IO-APIC entry pointer is saved
	 * in the chip_data and hyperv_irq_remapping_activate()/hyperv_ir_set_
	 * affinity() set vector and dest_apicid directly into IO-APIC entry.
	 */
	irq_data->chip_data = info->ioapic_entry;

	/*
	 * Hypver-V IO APIC irq affinity should be in the scope of
	 * ioapic_max_cpumask because no irq remapping support.
	 */
	desc = irq_data_to_desc(irq_data);
	cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask);

	return 0;
}
コード例 #10
0
ファイル: manage.c プロジェクト: AICP/kernel_moto_shamu
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
			bool force)
{
	struct irq_desc *desc = irq_data_to_desc(data);
	struct irq_chip *chip = irq_data_get_irq_chip(data);
	int ret;

	ret = chip->irq_set_affinity(data, mask, force);
	switch (ret) {
	case IRQ_SET_MASK_OK:
		cpumask_copy(data->affinity, mask);
	case IRQ_SET_MASK_OK_NOCOPY:
		irq_set_thread_affinity(desc);
		ret = 0;
	}

	return ret;
}