static irqreturn_t nj_u_interrupt(int intno, void *dev_id, struct pt_regs *regs) { struct IsdnCardState *cs = dev_id; u8 val, sval; spin_lock(&cs->lock); if (!((sval = bytein(cs->hw.njet.base + NETJET_IRQSTAT1)) & NETJET_ISACIRQ)) { val = NETjet_ReadIC(cs, ICC_ISTA); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "tiger: i1 %x %x", sval, val); if (val) { icc_interrupt(cs, val); NETjet_WriteIC(cs, ICC_MASK, 0xFF); NETjet_WriteIC(cs, ICC_MASK, 0x0); } } /* start new code 13/07/00 GE */ /* set bits in sval to indicate which page is free */ if (inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR) < inl(cs->hw.njet.base + NETJET_DMA_WRITE_IRQ)) /* the 2nd write page is free */ sval = 0x08; else /* the 1st write page is free */ sval = 0x04; if (inl(cs->hw.njet.base + NETJET_DMA_READ_ADR) < inl(cs->hw.njet.base + NETJET_DMA_READ_IRQ)) /* the 2nd read page is free */ sval = sval | 0x02; else /* the 1st read page is free */ sval = sval | 0x01; if (sval != cs->hw.njet.last_is0) /* we have a DMA interrupt */ { cs->hw.njet.irqstat0 = sval; if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_READ) != (cs->hw.njet.last_is0 & NETJET_IRQM0_READ)) /* we have a read dma int */ read_tiger(cs); if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE) != (cs->hw.njet.last_is0 & NETJET_IRQM0_WRITE)) /* we have a write dma int */ write_tiger(cs); /* end new code 13/07/00 GE */ } /* if (!testcnt--) { cs->hw.njet.dmactrl = 0; byteout(cs->hw.njet.base + NETJET_DMACTRL, cs->hw.njet.dmactrl); byteout(cs->hw.njet.base + NETJET_IRQMASK0, 0); } */ spin_unlock(&cs->lock); return IRQ_HANDLED; }
static irqreturn_t netjet_u_interrupt(int intno, void *dev_id, struct pt_regs *regs) { struct IsdnCardState *cs = dev_id; u_char val, sval; u_long flags; spin_lock_irqsave(&cs->lock, flags); if (!((sval = bytein(cs->hw.njet.base + NETJET_IRQSTAT1)) & NETJET_ISACIRQ)) { val = NETjet_ReadIC(cs, ICC_ISTA); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "tiger: i1 %x %x", sval, val); if (val) { icc_interrupt(cs, val); NETjet_WriteIC(cs, ICC_MASK, 0xFF); NETjet_WriteIC(cs, ICC_MASK, 0x0); } } /* start new code 13/07/00 GE */ /* set bits in sval to indicate which page is free */ if (inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR) < inl(cs->hw.njet.base + NETJET_DMA_WRITE_IRQ)) /* the 2nd write page is free */ sval = 0x08; else /* the 1st write page is free */ sval = 0x04; if (inl(cs->hw.njet.base + NETJET_DMA_READ_ADR) < inl(cs->hw.njet.base + NETJET_DMA_READ_IRQ)) /* the 2nd read page is free */ sval = sval | 0x02; else /* the 1st read page is free */ sval = sval | 0x01; if (sval != cs->hw.njet.last_is0) /* we have a DMA interrupt */ { if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } cs->hw.njet.irqstat0 = sval; if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_READ) != (cs->hw.njet.last_is0 & NETJET_IRQM0_READ)) /* we have a read dma int */ read_tiger(cs); if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE) != (cs->hw.njet.last_is0 & NETJET_IRQM0_WRITE)) /* we have a write dma int */ write_tiger(cs); /* end new code 13/07/00 GE */ test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; }
static void nj_u_init(struct IsdnCardState *cs) { inittiger(cs); initicc(cs); /* Reenable all IRQ */ NETjet_WriteIC(cs, ICC_MASK, 0); }
static irqreturn_t netjet_s_interrupt(int intno, void *dev_id, struct pt_regs *regs) { struct IsdnCardState *cs = dev_id; u_char val, s1val, s0val; u_long flags; spin_lock_irqsave(&cs->lock, flags); s1val = bytein(cs->hw.njet.base + NETJET_IRQSTAT1); if (!(s1val & NETJET_ISACIRQ)) { val = NETjet_ReadIC(cs, ISAC_ISTA); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "tiger: i1 %x %x", s1val, val); if (val) { isac_interrupt(cs, val); NETjet_WriteIC(cs, ISAC_MASK, 0xFF); NETjet_WriteIC(cs, ISAC_MASK, 0x0); } s1val = 1; } else s1val = 0; /* * read/write stat0 is better, because lower IRQ rate * Note the IRQ is on for 125 us if a condition match * thats long on modern CPU and so the IRQ is reentered * all the time. */ s0val = bytein(cs->hw.njet.base + NETJET_IRQSTAT0); if ((s0val | s1val)==0) { // shared IRQ spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } if (s0val) byteout(cs->hw.njet.base + NETJET_IRQSTAT0, s0val); /* start new code 13/07/00 GE */ /* set bits in sval to indicate which page is free */ if (inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR) < inl(cs->hw.njet.base + NETJET_DMA_WRITE_IRQ)) /* the 2nd write page is free */ s0val = 0x08; else /* the 1st write page is free */ s0val = 0x04; if (inl(cs->hw.njet.base + NETJET_DMA_READ_ADR) < inl(cs->hw.njet.base + NETJET_DMA_READ_IRQ)) /* the 2nd read page is free */ s0val |= 0x02; else /* the 1st read page is free */ s0val |= 0x01; if (s0val != cs->hw.njet.last_is0) /* we have a DMA interrupt */ { if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { printk(KERN_WARNING "nj LOCK_ATOMIC s0val %x->%x\n", cs->hw.njet.last_is0, s0val); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } cs->hw.njet.irqstat0 = s0val; if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_READ) != (cs->hw.njet.last_is0 & NETJET_IRQM0_READ)) /* we have a read dma int */ read_tiger(cs); if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE) != (cs->hw.njet.last_is0 & NETJET_IRQM0_WRITE)) /* we have a write dma int */ write_tiger(cs); /* end new code 13/07/00 GE */ test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; }
static void netjet_u_interrupt(int intno, void *dev_id, struct pt_regs *regs) { struct IsdnCardState *cs = dev_id; u_char val, sval; long flags; if (!cs) { printk(KERN_WARNING "NETspider-U: Spurious interrupt!\n"); return; } if (!((sval = bytein(cs->hw.njet.base + NETJET_IRQSTAT1)) & NETJET_ISACIRQ)) { val = NETjet_ReadIC(cs, ICC_ISTA); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "tiger: i1 %x %x", sval, val); if (val) { icc_interrupt(cs, val); NETjet_WriteIC(cs, ICC_MASK, 0xFF); NETjet_WriteIC(cs, ICC_MASK, 0x0); } } save_flags(flags); cli(); /* start new code 13/07/00 GE */ /* set bits in sval to indicate which page is free */ if (inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR) < inl(cs->hw.njet.base + NETJET_DMA_WRITE_IRQ)) /* the 2nd write page is free */ sval = 0x08; else /* the 1st write page is free */ sval = 0x04; if (inl(cs->hw.njet.base + NETJET_DMA_READ_ADR) < inl(cs->hw.njet.base + NETJET_DMA_READ_IRQ)) /* the 2nd read page is free */ sval = sval | 0x02; else /* the 1st read page is free */ sval = sval | 0x01; if (sval != cs->hw.njet.last_is0) /* we have a DMA interrupt */ { if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { restore_flags(flags); return; } cs->hw.njet.irqstat0 = sval; restore_flags(flags); if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_READ) != (cs->hw.njet.last_is0 & NETJET_IRQM0_READ)) /* we have a read dma int */ read_tiger(cs); if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE) != (cs->hw.njet.last_is0 & NETJET_IRQM0_WRITE)) /* we have a write dma int */ write_tiger(cs); /* end new code 13/07/00 GE */ test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else restore_flags(flags); /* if (!testcnt--) { cs->hw.njet.dmactrl = 0; byteout(cs->hw.njet.base + NETJET_DMACTRL, cs->hw.njet.dmactrl); byteout(cs->hw.njet.base + NETJET_IRQMASK0, 0); } */ }