static irqreturn_t hi6402_btnup_eco_handler(int irq, void *data)
{
	struct hi6402_mbhc_platform_data *pdata =
			(struct hi6402_mbhc_platform_data *)data;

	BUG_ON(NULL == pdata);

	pr_err("----cg---- %s\n", __FUNCTION__);

	mask_irq(pdata->irq[HI6402_IRQ_BTNUP_ECO], true);
	mask_irq(pdata->irq[HI6402_IRQ_BTNDOWN_ECO], false);

	if (!check_headset_pluged_in(pdata))
		return IRQ_HANDLED;

	wake_lock_timeout(&pdata->wake_lock, 100);

	if (HISI_JACK_INVERT == pdata->hs_status) {
		pr_err("%s: further detect\n", __FUNCTION__);
		/* further detect */
		queue_delayed_work(pdata->headset_plug_in_delay_wq,
				&pdata->headset_plug_in_delay_work,
				msecs_to_jiffies(50));
	} else if (0 == pdata->btn_report){
		return IRQ_HANDLED;
	} else {
		queue_delayed_work(pdata->headset_btn_up_delay_wq,
				&pdata->headset_btn_up_delay_work,
				msecs_to_jiffies(50));
	}

	return IRQ_HANDLED;
}
static irqreturn_t hi6402_btndown_handler(int irq, void *data)
{
	struct hi6402_mbhc_platform_data *pdata =
			(struct hi6402_mbhc_platform_data *)data;

	BUG_ON(NULL == pdata);

	pr_err("----cg---- %s\n", __FUNCTION__);

	if (!check_headset_pluged_in(pdata))
		return IRQ_HANDLED;

	/* mask btn down interrupt*/
	mask_irq(pdata->irq[HI6402_IRQ_BTNDOWN_COMP1], true);
	/* unmask btn up interrupt*/
	mask_irq(pdata->irq[HI6402_IRQ_BTNUP_COMP1], false);

	wake_lock_timeout(&pdata->wake_lock, 50);

	queue_delayed_work(pdata->headset_btn_down_delay_wq,
				&pdata->headset_btn_down_delay_work,
				msecs_to_jiffies(30));

	return IRQ_HANDLED;
}
void hi6402_plug_in_detect(struct hi6402_mbhc_platform_data *pdata)
{
	if (!check_headset_pluged_in(pdata))
		return;

	wake_lock(&pdata->wake_lock);
	mutex_lock(&pdata->plug_mutex);

	mutex_lock(&pdata->status_mutex);
	/* todo : btn_report 4-pole headset only now */
	pdata->hs_status = HISI_JACK_HEADSET;
	pdata->btn_report = SND_JACK_HEADSET;
	mutex_unlock(&pdata->status_mutex);

	hi6402_jack_report(pdata);

	/* todo */
	hi6402_reg_clr_bit(pdata->p_irq, HI6402_MBHC_VREF_REG, 7);
	hi6402_reg_set_bit(pdata->p_irq, HI6402_MICBIAS_ECO_REG, 0);

	/* unmask btn down irq */
	mask_irq(pdata->irq[HI6402_IRQ_BTNDOWN_COMP1], false);
	mask_irq(pdata->irq[HI6402_IRQ_BTNDOWN_ECO], false);

	mutex_unlock(&pdata->plug_mutex);
	wake_unlock(&pdata->wake_lock);
}
void hi6402_plug_out_workfunc(struct work_struct *work)
{
	struct hi6402_mbhc_platform_data *pdata = container_of(work,
			struct hi6402_mbhc_platform_data,
			headset_plug_out_delay_work.work);

	pr_info("%s : hs pluged out\n", __FUNCTION__);

	BUG_ON(NULL == pdata);

	wake_lock(&pdata->wake_lock);
	mutex_lock(&pdata->plug_mutex);

	mask_irq(pdata->irq[HI6402_IRQ_BTNDOWN_COMP1], true);
	mask_irq(pdata->irq[HI6402_IRQ_BTNUP_COMP1], true);
	mask_irq(pdata->irq[HI6402_IRQ_BTNDOWN_ECO], true);
	/* todo */
	hi6402_reg_clr_bit(pdata->p_irq, HI6402_MICBIAS_ECO_REG, 0);
	hi6402_reg_set_bit(pdata->p_irq, HI6402_MBHC_VREF_REG, 7);

	mutex_lock(&pdata->status_mutex);
	pdata->hs_status = HISI_JACK_NONE;
	pdata->btn_report = 0;
	mutex_unlock(&pdata->status_mutex);

	hi6402_jack_report(pdata);

	mutex_unlock(&pdata->plug_mutex);
	wake_unlock(&pdata->wake_lock);

	/* unmask plugin interrupt */
	mask_irq(pdata->irq[HI6402_IRQ_PLUGIN], false);
}
Exemple #5
0
/**
 *	handle_fasteoi_irq - irq handler for transparent controllers
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Only a single callback will be issued to the chip: an ->eoi()
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
    struct irqaction *action;
    irqreturn_t action_ret;

    raw_spin_lock(&desc->lock);

    if (unlikely(desc->status & IRQ_INPROGRESS))
        goto out;

    desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
    kstat_incr_irqs_this_cpu(irq, desc);

    /*
     * If its disabled or no action available
     * then mask it and get out of here:
     */
    action = desc->action;
    if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
        desc->status |= IRQ_PENDING;
        mask_irq(desc);
        goto out;
    }

    if (desc->status & IRQ_ONESHOT)
        mask_irq(desc);

    desc->status |= IRQ_INPROGRESS;
    desc->status &= ~IRQ_PENDING;
    raw_spin_unlock(&desc->lock);

    action_ret = handle_IRQ_event(irq, action);
    if (!noirqdebug)
        note_interrupt(irq, desc, action_ret);

    raw_spin_lock(&desc->lock);
    desc->status &= ~IRQ_INPROGRESS;
#ifdef CONFIG_IPIPE
    if (!(desc->status & IRQ_MASKED))
        desc->irq_data.chip->irq_unmask(&desc->irq_data);
out:
#else
out:
    desc->irq_data.chip->irq_eoi(&desc->irq_data);
#endif

    raw_spin_unlock(&desc->lock);
}
Exemple #6
0
static void __irq_disable(struct irq_desc *desc, bool mask)
{
	if (irqd_irq_disabled(&desc->irq_data)) {
		if (mask)
			mask_irq(desc);
	} else {
		irq_state_set_disabled(desc);
		if (desc->irq_data.chip->irq_disable) {
			desc->irq_data.chip->irq_disable(&desc->irq_data);
			irq_state_set_masked(desc);
		} else if (mask) {
			mask_irq(desc);
		}
	}
}
Exemple #7
0
static bool suspend_device_irq(struct irq_desc *desc, int irq)
{
	if (!desc->action || desc->no_suspend_depth)
		return false;

	if (irqd_is_wakeup_set(&desc->irq_data)) {
		irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
		/*
		 * We return true here to force the caller to issue
		 * synchronize_irq(). We need to make sure that the
		 * IRQD_WAKEUP_ARMED is visible before we return from
		 * suspend_device_irqs().
		 */
		return true;
	}

	desc->istate |= IRQS_SUSPENDED;
	__disable_irq(desc, irq);

	/*
	 * Hardware which has no wakeup source configuration facility
	 * requires that the non wakeup interrupts are masked at the
	 * chip level. The chip implementation indicates that with
	 * IRQCHIP_MASK_ON_SUSPEND.
	 */
	if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
		mask_irq(desc);
	return true;
}
Exemple #8
0
static inline void device_interrupt(int irq, int ack, struct pt_regs * regs)
{
	struct irqaction * action;

	if ((unsigned) irq > NR_IRQS) {
		printk("device_interrupt: unexpected interrupt %d\n", irq);
		return;
	}

	kstat.interrupts[irq]++;
	action = irq_action[irq];
	/*
	 * For normal interrupts, we mask it out, and then ACK it.
	 * This way another (more timing-critical) interrupt can
	 * come through while we're doing this one.
	 *
	 * Note! A irq without a handler gets masked and acked, but
	 * never unmasked. The autoirq stuff depends on this (it looks
	 * at the masks before and after doing the probing).
	 */
	mask_irq(ack);
	ack_irq(ack);
	if (!action)
		return;
	if (action->flags & SA_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
	do {
		action->handler(irq, action->dev_id, regs);
		action = action->next;
	} while (action);
	unmask_irq(ack);
}
Exemple #9
0
void free_irq(unsigned int irq, void *dev_id)
{
	struct irqaction * action, **p;
	unsigned long flags;

	if (irq >= NR_IRQS) {
		printk("Trying to free IRQ%d\n",irq);
		return;
	}
	if (IS_RESERVED_IRQ(irq)) {
		printk("Trying to free reserved IRQ %d\n", irq);
		return;
	}
	for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) {
		if (action->dev_id != dev_id)
			continue;

		/* Found it - now free it */
		save_flags(flags);
		cli();
		*p = action->next;
		if (!irq[irq_action])
			mask_irq(irq);
		restore_flags(flags);
		kfree(action);
		return;
	}
	printk("Trying to free free IRQ%d\n",irq);
}
Exemple #10
0
Fichier : irq.c Projet : nhanh0/hah
void do_irq_mask(unsigned long mask, struct irq_region *region, struct pt_regs *regs)
{
	unsigned long bit;
	int irq;
	int cpu = smp_processor_id();

#ifdef DEBUG_IRQ
	if (mask != (1L << MAX_CPU_IRQ))
	    printk("do_irq_mask %08lx %p %p\n", mask, region, regs);
#endif

	for(bit=(1L<<MAX_CPU_IRQ), irq = 0; mask && bit; bit>>=1, irq++) {
		int irq_num;
		if(!(bit&mask))
			continue;

		irq_num = region->data.irqbase + irq;

		++kstat.irqs[cpu][IRQ_FROM_REGION(CPU_IRQ_REGION) | irq];
		if (IRQ_REGION(irq_num) != CPU_IRQ_REGION)
		    ++kstat.irqs[cpu][irq_num];

		mask_irq(irq_num);
		do_irq(&region->action[irq], irq_num, regs);
		unmask_irq(irq_num);
	}
}
void hi6402es_sound_triger_workfunc(struct work_struct *work)
{
	struct hi6402es_mbhc_platform_data *pdata = container_of(work,
			struct hi6402es_mbhc_platform_data,
			headset_sound_triger_delay_work.work);

	BUG_ON(NULL == pdata);

	wake_lock(&pdata->wake_lock);

	/* clr VAD INTR */
	hi6402es_irq_write(pdata->p_irq, HI6402ES_VAD_INT_SET, 0);
	hi6402es_irq_write(pdata->p_irq, HI6402ES_REG_IRQ_1, 1<<HI6402ES_VAD_BIT);

	mask_irq(pdata->irq[HI6402ES_IRQ_SOUND_TRIGER], false);

	hi6402es_soc_jack_report(SND_JACK_BTN_5, SND_JACK_BTN_5);

	hi6402es_soc_jack_report(0, SND_JACK_BTN_5);

	pr_info("%s(%u): sound_triger = 0x%x\n",
			__FUNCTION__, __LINE__, SND_JACK_BTN_5);

	wake_unlock(&pdata->wake_lock);

	return;
}
Exemple #12
0
/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
 *	signal. The occurence is latched into the irq controller hardware
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
 *	is handled by the associated event handler. If this happens it
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
    raw_spin_lock(&desc->lock);

    desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);

    /*
     * If we're currently running this IRQ, or its disabled,
     * we shouldn't process the IRQ. Mark it pending, handle
     * the necessary masking and go out
     */
    if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
                 !desc->action)) {
        desc->status |= (IRQ_PENDING | IRQ_MASKED);
        mask_ack_irq(desc, irq);
        goto out_unlock;
    }
    kstat_incr_irqs_this_cpu(irq, desc);

    /* Start handling the irq */
    if (desc->chip->ack)
        desc->chip->ack(irq);

    /* Mark the IRQ currently in progress.*/
    desc->status |= IRQ_INPROGRESS;

    do {
        struct irqaction *action = desc->action;
        irqreturn_t action_ret;

        if (unlikely(!action)) {
            mask_irq(desc, irq);
            goto out_unlock;
        }

        /*
         * When another irq arrived while we were handling
         * one, we could have masked the irq.
         * Renable it, if it was not disabled in meantime.
         */
        if (unlikely((desc->status &
                      (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
                     (IRQ_PENDING | IRQ_MASKED))) {
            unmask_irq(desc, irq);
        }

        desc->status &= ~IRQ_PENDING;
        raw_spin_unlock(&desc->lock);
        action_ret = handle_IRQ_event(irq, action);
        if (!noirqdebug)
            note_interrupt(irq, desc, action_ret);
        raw_spin_lock(&desc->lock);

    } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);

    desc->status &= ~IRQ_INPROGRESS;
out_unlock:
    raw_spin_unlock(&desc->lock);
}
Exemple #13
0
/*
 * do_IRQ handles IRQ's that have been installed without the
 * SA_INTERRUPT flag: it uses the full signal-handling return
 * and runs with other interrupts enabled. All relatively slow
 * IRQ's should use this format: notably the keyboard/timer
 * routines.
 */
static void do_IRQ(int irq, struct pt_regs * regs)
{
	struct irqaction *action;
	int do_random, cpu;

	cpu = smp_processor_id();
	irq_enter(cpu);
	kstat.irqs[cpu][irq]++;

	mask_irq(irq);  
	action = *(irq + irq_action);
	if (action) {
		if (!(action->flags & SA_INTERRUPT))
			__sti();
		action = *(irq + irq_action);
		do_random = 0;
        	do {
			do_random |= action->flags;
			action->handler(irq, action->dev_id, regs);
			action = action->next;
        	} while (action);
		if (do_random & SA_SAMPLE_RANDOM)
			add_interrupt_randomness(irq);
		__cli();
	} else {
		printk("do_IRQ: Unregistered IRQ (0x%X) occured\n", irq);
	}
	unmask_irq(irq);
	irq_exit(cpu);

	/* unmasking and bottom half handling is done magically for us. */
}
Exemple #14
0
/**
 *	handle_fasteoi_irq - irq handler for transparent controllers
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Only a single callback will be issued to the chip: an ->eoi()
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
bool
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
	bool handled = false;

	raw_spin_lock(&desc->lock);

	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
		if (!irq_check_poll(desc))
			goto out;

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
	kstat_incr_irqs_this_cpu(irq, desc);

	/*
	 * If its disabled or no action available
	 * then mask it and get out of here:
	 */
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		if (!irq_settings_is_level(desc))
			desc->istate |= IRQS_PENDING;
		mask_irq(desc);
		goto out;
	}

	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

	preflow_handler(desc);
	handle_irq_event(desc);

	if (desc->istate & IRQS_ONESHOT)
		cond_unmask_irq(desc);

	handled = true;

out_eoi:
	desc->irq_data.chip->irq_eoi(&desc->irq_data);
out_unlock:
	raw_spin_unlock(&desc->lock);
	return handled;
out:
	if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
		goto out_eoi;
	goto out_unlock;
}
Exemple #15
0
void disable_irq(unsigned int irq_nr)
{
	unsigned long flags;

	save_and_cli(flags);
	mask_irq(irq_nr);
	restore_flags(flags);
}
Exemple #16
0
// Stop watching an IRQ that was watched
// with watch_irq.
void release_irq(u_char irq_number, u_long key)
{
	if((irq_keys[irq_number]-1) == key && irq_number != 0)
	{
		mask_irq(irq_number); // mask the IRQ
		disable_gate(irq_number+0x40); // and disable the interrupt
		irq_keys[irq_number]--;
	};
};
Exemple #17
0
void irq_dispatch(regs r, uint32 number)
{
    mask_irq(number);    
    if(irq_task_map[number]){
        if(irq_task_map[number]->flags == tSLEEP_IRQ){
            preempt(irq_task_map[number],ERR_NONE);            
        }
    }    
}
Exemple #18
0
void mask_irq_count(int irq_nr) 
{
	unsigned long flags;
	int pil = irq_to_pil(irq_nr);
	
	save_and_cli(flags);
	if (!--pil_in_use[pil])
		mask_irq(irq_nr);
	restore_flags(flags);
}
Exemple #19
0
/**
 * irq_disable - Mark interrupt disabled
 * @desc:	irq descriptor which should be disabled
 *
 * If the chip does not implement the irq_disable callback, we
 * use a lazy disable approach. That means we mark the interrupt
 * disabled, but leave the hardware unmasked. That's an
 * optimization because we avoid the hardware access for the
 * common case where no interrupt happens after we marked it
 * disabled. If an interrupt happens, then the interrupt flow
 * handler masks the line at the hardware level and marks it
 * pending.
 *
 * If the interrupt chip does not implement the irq_disable callback,
 * a driver can disable the lazy approach for a particular irq line by
 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
 * be used for devices which cannot disable the interrupt at the
 * device level under certain circumstances and have to use
 * disable_irq[_nosync] instead.
 */
void irq_disable(struct irq_desc *desc)
{
	irq_state_set_disabled(desc);
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
		irq_state_set_masked(desc);
	} else if (irq_settings_disable_unlazy(desc)) {
		mask_irq(desc);
	}
}
Exemple #20
0
void
disable_irq(unsigned int irq_nr)
{
	unsigned long flags;
	
	local_save_flags(flags);
	local_irq_disable();
	mask_irq(irq_nr);
	local_irq_restore(flags);
}
Exemple #21
0
static inline void mask_ack_irq(struct irq_desc *desc)
{
	if (desc->irq_data.chip->irq_mask_ack) {
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
		irq_state_set_masked(desc);
	} else {
		mask_irq(desc);
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
	}
}
Exemple #22
0
void unregister_irq_handler(int irq, irq_t isr)
{
	DBG_ASSERT(irq < IDT_IRQS);

	const int intno = irq + IDT_EXCEPTIONS;

	if (handler[irq] == isr) {
		mask_irq(irq);
		handler[irq] = (irq_t)0;
		clear_idt_entry(intno);
	}
}
Exemple #23
0
/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
 *	signal. The occurrence is latched into the irq controller hardware
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
 *	is handled by the associated event handler. If this happens it
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);

	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
	}

	/*
	 * If its disabled or no action available then mask it and get
	 * out of here.
	 */
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
	}

	kstat_incr_irqs_this_cpu(irq, desc);

	/* Start handling the irq */
	desc->irq_data.chip->irq_ack(&desc->irq_data);

	do {
		if (unlikely(!desc->action)) {
			mask_irq(desc);
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
		if (unlikely(desc->istate & IRQS_PENDING)) {
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
				unmask_irq(desc);
		}

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

out_unlock:
	raw_spin_unlock(&desc->lock);
}
void hi6402_btnup_workfunc(struct work_struct *work)
{
	struct hi6402_mbhc_platform_data *pdata = container_of(work,
			struct hi6402_mbhc_platform_data,
			headset_btn_up_delay_work.work);

	BUG_ON(NULL == pdata);

	pr_info("%s(%u) : btn up !\n", __FUNCTION__, __LINE__);

	pdata->btn_report = 0;

	hi6402_soc_jack_report(pdata->btn_report, HI6402_BTN_MASK);

	/* mask btn up interrupt*/
	mask_irq(pdata->irq[HI6402_IRQ_BTNUP_COMP1], true);
	/* unmask btn down interrupt*/
	mask_irq(pdata->irq[HI6402_IRQ_BTNDOWN_COMP1], false);

	return;
}
Exemple #25
0
/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
 *	signal. The occurrence is latched into the irq controller hardware
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
 *	is handled by the associated event handler. If this happens it
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
bool
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
	bool handled = false;

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
		if (!irq_check_poll(desc)) {
			desc->istate |= IRQS_PENDING;
			mask_ack_irq(desc);
			goto out_unlock;
		}
	}
	kstat_incr_irqs_this_cpu(irq, desc);

	/* Start handling the irq */
	desc->irq_data.chip->irq_ack(&desc->irq_data);

	do {
		if (unlikely(!desc->action)) {
			mask_irq(desc);
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
		if (unlikely(desc->istate & IRQS_PENDING)) {
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
				unmask_irq(desc);
		}

		handle_irq_event(desc);
		handled = true;

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

out_unlock:
	raw_spin_unlock(&desc->lock);
	return handled;
}
Exemple #26
0
/**
 *	handle_fasteoi_ack_irq - irq handler for edge hierarchy
 *	stacked on transparent controllers
 *
 *	@desc:	the interrupt description structure for this irq
 *
 *	Like handle_fasteoi_irq(), but for use with hierarchy where
 *	the irq_chip also needs to have its ->irq_ack() function
 *	called.
 */
void handle_fasteoi_ack_irq(struct irq_desc *desc)
{
	struct irq_chip *chip = desc->irq_data.chip;

	raw_spin_lock(&desc->lock);

	if (!irq_may_run(desc))
		goto out;

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);

	/*
	 * If its disabled or no action available
	 * then mask it and get out of here:
	 */
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
		mask_irq(desc);
		goto out;
	}

	kstat_incr_irqs_this_cpu(desc);
	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

	/* Start handling the irq */
	desc->irq_data.chip->irq_ack(&desc->irq_data);

	preflow_handler(desc);
	handle_irq_event(desc);

	cond_unmask_eoi_irq(desc, chip);

	raw_spin_unlock(&desc->lock);
	return;
out:
	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
		chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
Exemple #27
0
/**
 *	handle_fasteoi_irq - irq handler for transparent controllers
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Only a single callback will be issued to the chip: an ->eoi()
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
    raw_spin_lock(&desc->lock);

    if (unlikely(desc->istate & IRQS_INPROGRESS))
        if (!irq_check_poll(desc))
            goto out;

    desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    kstat_incr_irqs_this_cpu(irq, desc);

    /*
     * If its disabled or no action available
     * then mask it and get out of here:
     */
    if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) {
        irq_compat_set_pending(desc);
        desc->istate |= IRQS_PENDING;
        mask_irq(desc);
        goto out;
    }

    if (desc->istate & IRQS_ONESHOT)
        mask_irq(desc);

    preflow_handler(desc);
    handle_irq_event(desc);

out_eoi:
    desc->irq_data.chip->irq_eoi(&desc->irq_data);
out_unlock:
    raw_spin_unlock(&desc->lock);
    return;
out:
    if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
        goto out_eoi;
    goto out_unlock;
}
Exemple #28
0
/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
 *	signal. The occurence is latched into the irq controller hardware
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
 *	is handled by the associated event handler. If this happens it
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
    raw_spin_lock(&desc->lock);

    desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    /*
     * If we're currently running this IRQ, or its disabled,
     * we shouldn't process the IRQ. Mark it pending, handle
     * the necessary masking and go out
     */
    if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) ||
                  !desc->action))) {
        if (!irq_check_poll(desc)) {
            irq_compat_set_pending(desc);
            desc->istate |= IRQS_PENDING;
            mask_ack_irq(desc);
            goto out_unlock;
        }
    }
    kstat_incr_irqs_this_cpu(irq, desc);

    /* Start handling the irq */
    desc->irq_data.chip->irq_ack(&desc->irq_data);

    do {
        if (unlikely(!desc->action)) {
            mask_irq(desc);
            goto out_unlock;
        }

        /*
         * When another irq arrived while we were handling
         * one, we could have masked the irq.
         * Renable it, if it was not disabled in meantime.
         */
        if (unlikely(desc->istate & IRQS_PENDING)) {
            if (!(desc->istate & IRQS_DISABLED) &&
                    (desc->istate & IRQS_MASKED))
                unmask_irq(desc);
        }

        handle_irq_event(desc);

    } while ((desc->istate & IRQS_PENDING) &&
             !(desc->istate & IRQS_DISABLED));

out_unlock:
    raw_spin_unlock(&desc->lock);
}
Exemple #29
0
void disable_pic(void)
{
	int i;
	int configPR;

	for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++) {
		mask_irq(i);	/* mask the irq just in case  */
	}
	/* Priority level 0 */
	PNX8550_GIC_PRIMASK_0 = PNX8550_GIC_PRIMASK_1 = 0;

	/* Set int vector table address */
	PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
}
Exemple #30
0
int irq_handler( int irq )
{
	acquire_spinlock( &irq_lock );
	mask_irq( irq );

	dmesg("%!IRQ %i occurred on %i\n", irq, CPUID );
	
		if ( handlers[irq] != NULL )
		{
			queue( handlers[irq] );
		}
	

	ack_irq( irq );
	release_spinlock( &irq_lock );
	return 0;
}