Exemplo n.º 1
0
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
			      const struct cpumask *affinity, struct module *owner)
{
	int cpu;

	desc->irq_common_data.handler_data = NULL;
	desc->irq_common_data.msi_desc = NULL;

	desc->irq_data.common = &desc->irq_common_data;
	desc->irq_data.irq = irq;
	desc->irq_data.chip = &no_irq_chip;
	desc->irq_data.chip_data = NULL;
	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
	desc->handle_irq = handle_bad_irq;
	desc->depth = 1;
	desc->irq_count = 0;
	desc->irqs_unhandled = 0;
	desc->tot_count = 0;
	desc->name = NULL;
	desc->owner = owner;
	for_each_possible_cpu(cpu)
		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
	desc_smp_init(desc, node, affinity);
}
Exemplo n.º 2
0
static bool suspend_device_irq(struct irq_desc *desc, int irq)
{
	if (!desc->action || desc->no_suspend_depth)
		return false;

	if (irqd_is_wakeup_set(&desc->irq_data)) {
		irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
		/*
		 * We return true here to force the caller to issue
		 * synchronize_irq(). We need to make sure that the
		 * IRQD_WAKEUP_ARMED is visible before we return from
		 * suspend_device_irqs().
		 */
		return true;
	}

	desc->istate |= IRQS_SUSPENDED;
	__disable_irq(desc, irq);

	/*
	 * Hardware which has no wakeup source configuration facility
	 * requires that the non wakeup interrupts are masked at the
	 * chip level. The chip implementation indicates that with
	 * IRQCHIP_MASK_ON_SUSPEND.
	 */
	if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
		mask_irq(desc);
	return true;
}
Exemplo n.º 3
0
int zw_irq_set_irq_wake(unsigned int irq, unsigned int on)
{
    unsigned long flags;
    struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
    int ret = 0;

    if (!desc)
        return -EINVAL;

    if (on) {
        if (desc->zw_wake_depth != 0) {
            if (set_irq_wake_real(irq, on) == 0) {
                irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
                desc->wake_depth = desc->zw_wake_depth;
                desc->zw_wake_depth = 0;
            }
        }
    } else {
        if ((desc->zw_wake_depth == 0) && (desc->wake_depth > 0)) {
            if (set_irq_wake_real(irq, on) == 0) {
                irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
                desc->zw_wake_depth = desc->wake_depth;
                desc->wake_depth = 0x7FFFFFFFU;
            }
        }
    }
    irq_put_desc_busunlock(desc, flags);
    return ret;
}
Exemplo n.º 4
0
irqreturn_t handle_irq_event(struct irq_desc *desc)
{
	struct irqaction *action = desc->action;
	irqreturn_t ret;
#ifdef CONFIG_SLP_WAKEUP_COUNT
	unsigned long irqflags;
#endif

	desc->istate &= ~IRQS_PENDING;
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	raw_spin_unlock(&desc->lock);
#ifdef CONFIG_SLP_WAKEUP_COUNT
	spin_lock_irqsave(&wakeup_status_lock, irqflags);	/* to be safe */
	if (wakeup_state) {
		raw_spin_lock(&desc->lock);
		desc->hit_in_sleep++;
		raw_spin_unlock(&desc->lock);
		wakeup_state = 0;
	}
	spin_unlock_irqrestore(&wakeup_status_lock, irqflags);
#endif

	ret = handle_irq_event_percpu(desc, action);

	raw_spin_lock(&desc->lock);
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	return ret;
}
Exemplo n.º 5
0
/**
 *	handle_untracked_irq - Simple and software-decoded IRQs.
 *	@desc:	the interrupt description structure for this irq
 *
 *	Untracked interrupts are sent from a demultiplexing interrupt
 *	handler when the demultiplexer does not know which device it its
 *	multiplexed irq domain generated the interrupt. IRQ's handled
 *	through here are not subjected to stats tracking, randomness, or
 *	spurious interrupt detection.
 *
 *	Note: Like handle_simple_irq, the caller is expected to handle
 *	the ack, clear, mask and unmask issues if necessary.
 */
void handle_untracked_irq(struct irq_desc *desc)
{
	unsigned int flags = 0;

	raw_spin_lock(&desc->lock);

	if (!irq_may_run(desc))
		goto out_unlock;

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);

	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
		goto out_unlock;
	}

	desc->istate &= ~IRQS_PENDING;
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	raw_spin_unlock(&desc->lock);

	__handle_irq_event_percpu(desc, &flags);

	raw_spin_lock(&desc->lock);
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);

out_unlock:
	raw_spin_unlock(&desc->lock);
}
Exemplo n.º 6
0
int irq_set_irq_wake(unsigned int irq, unsigned int on)
{
	unsigned long flags;
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
	int ret = 0;

	if (!desc)
		return -EINVAL;

	if (on) {
		if (desc->wake_depth++ == 0) {
			ret = set_irq_wake_real(irq, on);
			if (ret)
				desc->wake_depth = 0;
			else
				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
		}
	} else {
		if (desc->wake_depth == 0) {
			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
		} else if (--desc->wake_depth == 0) {
			ret = set_irq_wake_real(irq, on);
			if (ret)
				desc->wake_depth = 1;
			else
				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
		}
	}
	irq_put_desc_busunlock(desc, flags);
	return ret;
}
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

	raw_spin_lock_irq(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
		goto out_unlock;
	}

	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	raw_spin_unlock_irq(&desc->lock);

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	raw_spin_lock_irq(&desc->lock);
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);

out_unlock:
	raw_spin_unlock_irq(&desc->lock);
}
Exemplo n.º 8
0
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
{
	struct irq_chip *chip = irq_data_get_irq_chip(data);
	struct irq_desc *desc = irq_data_to_desc(data);
	int ret = 0;

	if (!chip || !chip->irq_set_affinity)
		return -EINVAL;

	if (irq_can_move_pcntxt(data)) {
		ret = chip->irq_set_affinity(data, mask, false);
		switch (ret) {
		case IRQ_SET_MASK_OK:
			cpumask_copy(data->affinity, mask);
		case IRQ_SET_MASK_OK_NOCOPY:
			irq_set_thread_affinity(desc);
			ret = 0;
		}
	} else {
		irqd_set_move_pending(data);
		irq_copy_pending(desc, mask);
	}

	if (desc->affinity_notify) {
		kref_get(&desc->affinity_notify->kref);
		schedule_work(&desc->affinity_notify->work);
	}
	irqd_set(data, IRQD_AFFINITY_SET);

	return ret;
}
Exemplo n.º 9
0
/**
 *	irq_set_irq_wake - control irq power management wakeup
 *	@irq:	interrupt to control
 *	@on:	enable/disable power management wakeup
 *
 *	Enable/disable power management wakeup mode, which is
 *	disabled by default.  Enables and disables must match,
 *	just as they match for non-wakeup mode support.
 *
 *	Wakeup mode lets this IRQ wake the system from sleep
 *	states like "suspend to RAM".
 */
int irq_set_irq_wake(unsigned int irq, unsigned int on)
{
	unsigned long flags;
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
	int ret = 0;

	if (!desc)
		return -EINVAL;

	/* wakeup-capable irqs can be shared between drivers that
	 * don't need to have the same sleep mode behaviors.
	 */
	if (on) {
		if (desc->wake_depth++ == 0) {
			ret = set_irq_wake_real(irq, on);
			if (ret)
				desc->wake_depth = 0;
			else
				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
		}
	} else {
		if (desc->wake_depth == 0) {
			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
		} else if (--desc->wake_depth == 0) {
			ret = set_irq_wake_real(irq, on);
			if (ret)
				desc->wake_depth = 1;
			else
				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
		}
	}
	irq_put_desc_busunlock(desc, flags);
	return ret;
}
Exemplo n.º 10
0
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
			    bool force)
{
	struct irq_chip *chip = irq_data_get_irq_chip(data);
	struct irq_desc *desc = irq_data_to_desc(data);
	int ret = 0;

	if (!chip || !chip->irq_set_affinity)
		return -EINVAL;

	if (irq_can_move_pcntxt(data)) {
		ret = irq_do_set_affinity(data, mask, force);
	} else {
		irqd_set_move_pending(data);
		irq_copy_pending(desc, mask);
	}

	if (desc->affinity_notify) {
		kref_get(&desc->affinity_notify->kref);
		schedule_work(&desc->affinity_notify->work);
	}
	irqd_set(data, IRQD_AFFINITY_SET);

	return ret;
}
Exemplo n.º 11
0
static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
				   const struct cpumask *affinity,
				   struct module *owner)
{
	struct irq_desc *desc;

	desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
	if (!desc)
		return NULL;
	/* allocate based on nr_cpu_ids */
	desc->kstat_irqs = alloc_percpu(unsigned int);
	if (!desc->kstat_irqs)
		goto err_desc;

	if (alloc_masks(desc, node))
		goto err_kstat;

	raw_spin_lock_init(&desc->lock);
	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
	mutex_init(&desc->request_mutex);
	init_rcu_head(&desc->rcu);

	desc_set_defaults(irq, desc, node, affinity, owner);
	irqd_set(&desc->irq_data, flags);
	kobject_init(&desc->kobj, &irq_kobj_type);

	return desc;

err_kstat:
	free_percpu(desc->kstat_irqs);
err_desc:
	kfree(desc);
	return NULL;
}
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
		struct module *owner)
{
	int cpu;

	desc->irq_data.irq = irq;
	desc->irq_data.chip = &no_irq_chip;
	desc->irq_data.chip_data = NULL;
	desc->irq_data.handler_data = NULL;
	desc->irq_data.msi_desc = NULL;
	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
	desc->handle_irq = handle_bad_irq;
	desc->depth = 1;
	desc->irq_count = 0;
	desc->irqs_unhandled = 0;
	desc->name = NULL;
	desc->owner = owner;
	for_each_possible_cpu(cpu)
		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
	desc_smp_init(desc, node);
#ifdef CONFIG_SMP
	INIT_LIST_HEAD(&desc->affinity_notify);
#endif
}
Exemplo n.º 13
0
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
{
    unsigned long flags;
    struct irq_desc *desc = irq_get_desc_lock(irq, &flags);

    if (!desc)
        return;
    irq_settings_clr_and_set(desc, clr, set);

    irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
               IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
    if (irq_settings_has_no_balance_set(desc))
        irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
    if (irq_settings_is_per_cpu(desc))
        irqd_set(&desc->irq_data, IRQD_PER_CPU);
    if (irq_settings_can_move_pcntxt(desc))
        irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);

    irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

    irq_put_desc_unlock(desc, flags);
}
Exemplo n.º 14
0
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
{
	unsigned long flags, trigger, tmp;
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);

	if (!desc)
		return;

	/*
	 * Warn when a driver sets the no autoenable flag on an already
	 * active interrupt.
	 */
	WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));

	irq_settings_clr_and_set(desc, clr, set);

	trigger = irqd_get_trigger_type(&desc->irq_data);

	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
	if (irq_settings_is_level(desc))
		irqd_set(&desc->irq_data, IRQD_LEVEL);

	tmp = irq_settings_get_trigger_mask(desc);
	if (tmp != IRQ_TYPE_NONE)
		trigger = tmp;

	irqd_set(&desc->irq_data, trigger);

	irq_put_desc_unlock(desc, flags);
}
Exemplo n.º 15
0
irqreturn_t handle_irq_event(struct irq_desc *desc)
{
	struct irqaction *action = desc->action;
	irqreturn_t ret;

	desc->istate &= ~IRQS_PENDING;
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	raw_spin_unlock(&desc->lock);

	ret = handle_irq_event_percpu(desc, action);

	raw_spin_lock(&desc->lock);
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	return ret;
}
Exemplo n.º 16
0
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
{
	int cpu;

	desc->irq_data.irq = irq;
	desc->irq_data.chip = &no_irq_chip;
	desc->irq_data.chip_data = NULL;
	desc->irq_data.handler_data = NULL;
	desc->irq_data.msi_desc = NULL;
	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
	desc->handle_irq = handle_bad_irq;
	desc->depth = 1;
	desc->irq_count = 0;
	desc->irqs_unhandled = 0;
	desc->name = NULL;
	for_each_possible_cpu(cpu)
		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
	desc_smp_init(desc, node);
}
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	int mask_this_irq = 0;
	irqreturn_t action_ret;

	might_sleep();

	raw_spin_lock_irq(&desc->lock);

	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
		mask_this_irq = 1;
		goto out_unlock;
	}

	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	raw_spin_unlock_irq(&desc->lock);

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	raw_spin_lock_irq(&desc->lock);
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);

out_unlock:
	raw_spin_unlock_irq(&desc->lock);
	if (unlikely(mask_this_irq)) {
		chip_bus_lock(desc);
		mask_irq(desc);
		chip_bus_sync_unlock(desc);
	}
}
static void irq_state_set_masked(struct irq_desc *desc)
{
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
}
static void irq_state_set_disabled(struct irq_desc *desc)
{
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
}
Exemplo n.º 20
0
static void irq_state_set_started(struct irq_desc *desc)
{
	irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
}
Exemplo n.º 21
0
// ARM10C 20141004
// irq: 0, desc: kmem_cache#28-o0, node: 0, owner: null
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
		struct module *owner)
{
	int cpu;

	// desc->irq_data.irq: (kmem_cache#28-o0)->irq_data.irq, irq: 0
	desc->irq_data.irq = irq;
	// desc->irq_data.irq: (kmem_cache#28-o0)->irq_data.irq: 0

	// desc->irq_data.chip: (kmem_cache#28-o0)->irq_data.chip
	desc->irq_data.chip = &no_irq_chip;
	// desc->irq_data.chip: (kmem_cache#28-o0)->irq_data.chip: &no_irq_chip

	// desc->irq_data.chip_data: (kmem_cache#28-o0)->irq_data.chip_data
	desc->irq_data.chip_data = NULL;
	// desc->irq_data.chip_data: (kmem_cache#28-o0)->irq_data.chip_data: NULL

	// desc->irq_data.handler_data: (kmem_cache#28-o0)->irq_data.handler_data
	desc->irq_data.handler_data = NULL;
	// desc->irq_data.handler_data: (kmem_cache#28-o0)->irq_data.handler_data: NULL

	// desc->irq_data.msi_desc: (kmem_cache#28-o0)->irq_data.msi_desc
	desc->irq_data.msi_desc = NULL;
	// desc->irq_data.msi_desc: (kmem_cache#28-o0)->irq_data.msi_desc: NULL

	// desc: kmem_cache#28-o0, 0xFFFFFFFF, _IRQ_DEFAULT_INIT_FLAGS: 0xc00
	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);

	// irq_settings_clr_and_set에서 한일:
	// desc->status_use_accessors: (kmem_cache#28-o0)->status_use_accessors: 0xc00

	// &desc->irq_data: &(kmem_cache#28-o0)->irq_data, IRQD_IRQ_DISABLED: 0x10000
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);

	// irqd_set에서 한일:
	// d->state_use_accessors: (&(kmem_cache#28-o0)->irq_data)->state_use_accessors: 0x10000

	// desc->handle_irq: (kmem_cache#28-o0)->handle_irq
	desc->handle_irq = handle_bad_irq;
	// desc->handle_irq: (kmem_cache#28-o0)->handle_irq: handle_bad_irq

	// desc->depth: (kmem_cache#28-o0)->depth
	desc->depth = 1;
	// desc->depth: (kmem_cache#28-o0)->depth: 1

	// desc->irq_count: (kmem_cache#28-o0)->irq_count
	desc->irq_count = 0;
	// desc->irq_count: (kmem_cache#28-o0)->irq_count: 0

	// desc->irqs_unhandled: (kmem_cache#28-o0)->irqs_unhandled
	desc->irqs_unhandled = 0;
	// desc->irqs_unhandled: (kmem_cache#28-o0)->irqs_unhandled: 0

	// desc->name: (kmem_cache#28-o0)->name
	desc->name = NULL;
	// desc->name: (kmem_cache#28-o0)->name: NULL

	// desc->owner: (kmem_cache#28-o0)->owner, owner: null
	desc->owner = owner;
	// desc->owner: (kmem_cache#28-o0)->owner: null

	for_each_possible_cpu(cpu)
	// for ((cpu) = -1; (cpu) = cpumask_next((cpu), (cpu_possible_mask)), (cpu) < nr_cpu_ids; )
		// desc->kstat_irqs: (kmem_cache#28-o0)->kstat_irqs, cpu: 0
		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
		// [pcp0] (kmem_cache#28-o0)->kstat_irqs: 0
		// cpu: 1 .. 3 수행

	// desc: kmem_cache#28-o0, node: 0
	desc_smp_init(desc, node);

	// desc_smp_init에서 한일:
	// desc->irq_data.node: (kmem_cache#28-o0)->irq_data.node: 0
	// desc->irq_data.affinity: (kmem_cache#28-o0)->irq_data.affinity.bits[0]: 0xF
}