Esempio n. 1
0
File: iwch_ev.c Progetto: 710leo/LVS
void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
{
	struct iwch_dev *rnicp;
	struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
	struct iwch_cq *chp;
	struct iwch_qp *qhp;
	u32 cqid = RSPQ_CQID(rsp_msg);

	rnicp = (struct iwch_dev *) rdev_p->ulp;
	spin_lock(&rnicp->lock);
	chp = get_chp(rnicp, cqid);
	qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
	if (!chp || !qhp) {
		printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
		       "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n",
		       cqid, CQE_QPID(rsp_msg->cqe),
		       CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
		       CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
		       CQE_WRID_LOW(rsp_msg->cqe));
		spin_unlock(&rnicp->lock);
		goto out;
	}
	iwch_qp_add_ref(&qhp->ibqp);
	atomic_inc(&chp->refcnt);
	spin_unlock(&rnicp->lock);

	/*
	 * 1) completion of our sending a TERMINATE.
	 * 2) incoming TERMINATE message.
	 */
	if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
	    (CQE_STATUS(rsp_msg->cqe) == 0)) {
		if (SQ_TYPE(rsp_msg->cqe)) {
			PDBG("%s QPID 0x%x ep %p disconnecting\n",
			     __func__, qhp->wq.qpid, qhp->ep);
			iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
		} else {
			PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__,
			     qhp->wq.qpid);
			post_qp_event(rnicp, chp, rsp_msg,
				      IB_EVENT_QP_REQ_ERR, 0);
			iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
		}
		goto done;
	}

	/* Bad incoming Read request */
	if (SQ_TYPE(rsp_msg->cqe) &&
	    (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
		goto done;
	}

	/* Bad incoming write */
	if (RQ_TYPE(rsp_msg->cqe) &&
	    (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
		goto done;
	}

	switch (CQE_STATUS(rsp_msg->cqe)) {

	/* Completion Events */
	case TPT_ERR_SUCCESS:

		/*
		 * Confirm the destination entry if this is a RECV completion.
		 */
		if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
			dst_confirm(qhp->ep->dst);
		(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
		break;

	case TPT_ERR_STAG:
	case TPT_ERR_PDID:
	case TPT_ERR_QPID:
	case TPT_ERR_ACCESS:
	case TPT_ERR_WRAP:
	case TPT_ERR_BOUND:
	case TPT_ERR_INVALIDATE_SHARED_MR:
	case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
		break;

	/* Device Fatal Errors */
	case TPT_ERR_ECC:
	case TPT_ERR_ECC_PSTAG:
	case TPT_ERR_INTERNAL_ERR:
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
		break;

	/* QP Fatal Errors */
	case TPT_ERR_OUT_OF_RQE:
	case TPT_ERR_PBL_ADDR_BOUND:
	case TPT_ERR_CRC:
	case TPT_ERR_MARKER:
	case TPT_ERR_PDU_LEN_ERR:
	case TPT_ERR_DDP_VERSION:
	case TPT_ERR_RDMA_VERSION:
	case TPT_ERR_OPCODE:
	case TPT_ERR_DDP_QUEUE_NUM:
	case TPT_ERR_MSN:
	case TPT_ERR_TBIT:
	case TPT_ERR_MO:
	case TPT_ERR_MSN_GAP:
	case TPT_ERR_MSN_RANGE:
	case TPT_ERR_RQE_ADDR_BOUND:
	case TPT_ERR_IRD_OVERFLOW:
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
		break;

	default:
		printk(KERN_ERR MOD "Unknown T3 status 0x%x QPID 0x%x\n",
		       CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
		break;
	}
done:
	if (atomic_dec_and_test(&chp->refcnt))
	        wake_up(&chp->wait);
	iwch_qp_rem_ref(&qhp->ibqp);
out:
	dev_kfree_skb_irq(skb);
}
Esempio n. 2
0
static void elp_interrupt(int irq, void *dev_id, struct pt_regs *reg_ptr)
{
	int len;
	int dlen;
	int icount = 0;
	struct net_device *dev;
	elp_device *adapter;
	int timeout;

	dev = dev_id;
	adapter = (elp_device *) dev->priv;
	
	spin_lock(&adapter->lock);

	do {
		/*
		 * has a DMA transfer finished?
		 */
		if (inb_status(dev->base_addr) & DONE) {
			if (!adapter->dmaing) {
				printk("%s: phantom DMA completed\n", dev->name);
			}
			if (elp_debug >= 3) {
				printk("%s: %s DMA complete, status %02x\n", dev->name, adapter->current_dma.direction ? "tx" : "rx", inb_status(dev->base_addr));
			}

			outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
			if (adapter->current_dma.direction) {
				dev_kfree_skb_irq(adapter->current_dma.skb);
			} else {
				struct sk_buff *skb = adapter->current_dma.skb;
				if (skb) {
					if (adapter->current_dma.target) {
				  	/* have already done the skb_put() */
				  	memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length);
					}
					skb->protocol = eth_type_trans(skb,dev);
					adapter->stats.rx_bytes += skb->len;
					netif_rx(skb);
					dev->last_rx = jiffies;
				}
			}
			adapter->dmaing = 0;
			if (adapter->rx_backlog.in != adapter->rx_backlog.out) {
				int t = adapter->rx_backlog.length[adapter->rx_backlog.out];
				adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out);
				if (elp_debug >= 2)
					printk("%s: receiving backlogged packet (%d)\n", dev->name, t);
				receive_packet(dev, t);
			} else {
				adapter->busy = 0;
			}
		} else {
			/* has one timed out? */
			check_3c505_dma(dev);
		}

		/*
		 * receive a PCB from the adapter
		 */
		timeout = jiffies + 3*HZ/100;
		while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) {
			if (receive_pcb(dev, &adapter->irx_pcb)) {
				switch (adapter->irx_pcb.command) 
				{
				case 0:
					break;
					/*
					 * received a packet - this must be handled fast
					 */
				case 0xff:
				case CMD_RECEIVE_PACKET_COMPLETE:
					/* if the device isn't open, don't pass packets up the stack */
					if (!netif_running(dev))
						break;
					len = adapter->irx_pcb.data.rcv_resp.pkt_len;
					dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
					if (adapter->irx_pcb.data.rcv_resp.timeout != 0) {
						printk(KERN_ERR "%s: interrupt - packet not received correctly\n", dev->name);
					} else {
						if (elp_debug >= 3) {
							printk("%s: interrupt - packet received of length %i (%i)\n", dev->name, len, dlen);
						}
						if (adapter->irx_pcb.command == 0xff) {
							if (elp_debug >= 2)
								printk("%s: adding packet to backlog (len = %d)\n", dev->name, dlen);
							adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen;
							adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in);
						} else {
							receive_packet(dev, dlen);
						}
						if (elp_debug >= 3)
							printk("%s: packet received\n", dev->name);
					}
					break;

					/*
					 * 82586 configured correctly
					 */
				case CMD_CONFIGURE_82586_RESPONSE:
					adapter->got[CMD_CONFIGURE_82586] = 1;
					if (elp_debug >= 3)
						printk("%s: interrupt - configure response received\n", dev->name);
					break;

					/*
					 * Adapter memory configuration
					 */
				case CMD_CONFIGURE_ADAPTER_RESPONSE:
					adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1;
					if (elp_debug >= 3)
						printk("%s: Adapter memory configuration %s.\n", dev->name,
						       adapter->irx_pcb.data.failed ? "failed" : "succeeded");
					break;

					/*
					 * Multicast list loading
					 */
				case CMD_LOAD_MULTICAST_RESPONSE:
					adapter->got[CMD_LOAD_MULTICAST_LIST] = 1;
					if (elp_debug >= 3)
						printk("%s: Multicast address list loading %s.\n", dev->name,
						       adapter->irx_pcb.data.failed ? "failed" : "succeeded");
					break;

					/*
					 * Station address setting
					 */
				case CMD_SET_ADDRESS_RESPONSE:
					adapter->got[CMD_SET_STATION_ADDRESS] = 1;
					if (elp_debug >= 3)
						printk("%s: Ethernet address setting %s.\n", dev->name,
						       adapter->irx_pcb.data.failed ? "failed" : "succeeded");
					break;


					/*
					 * received board statistics
					 */
				case CMD_NETWORK_STATISTICS_RESPONSE:
					adapter->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
					adapter->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
					adapter->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
					adapter->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
					adapter->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
					adapter->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
					adapter->got[CMD_NETWORK_STATISTICS] = 1;
					if (elp_debug >= 3)
						printk("%s: interrupt - statistics response received\n", dev->name);
					break;

					/*
					 * sent a packet
					 */
				case CMD_TRANSMIT_PACKET_COMPLETE:
					if (elp_debug >= 3)
						printk("%s: interrupt - packet sent\n", dev->name);
					if (!netif_running(dev))
						break;
					switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
					case 0xffff:
						adapter->stats.tx_aborted_errors++;
						printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name);
						break;
					case 0xfffe:
						adapter->stats.tx_fifo_errors++;
						printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name);
						break;
					}
					netif_wake_queue(dev);
					break;

					/*
					 * some unknown PCB
					 */
				default:
					printk(KERN_DEBUG "%s: unknown PCB received - %2.2x\n", dev->name, adapter->irx_pcb.command);
					break;
				}
			} else {
				printk("%s: failed to read PCB on interrupt\n", dev->name);
				adapter_reset(dev);
			}
		}

	} while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE)));

	prime_rx(dev);

	/*
	 * indicate no longer in interrupt routine
	 */
	spin_unlock(&adapter->lock);
}
Esempio n. 3
0
static void fs_enet_tx(struct net_device *dev)
{
	struct fs_enet_private *fep = netdev_priv(dev);
	cbd_t __iomem *bdp;
	struct sk_buff *skb;
	int dirtyidx, do_wake, do_restart;
	u16 sc;

	spin_lock(&fep->tx_lock);
	bdp = fep->dirty_tx;

	do_wake = do_restart = 0;
	while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
		dirtyidx = bdp - fep->tx_bd_base;

		if (fep->tx_free == fep->tx_ring)
			break;

		skb = fep->tx_skbuff[dirtyidx];

		/*
		 * Check for errors.
		 */
		if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
			  BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {

			if (sc & BD_ENET_TX_HB)	/* No heartbeat */
				fep->stats.tx_heartbeat_errors++;
			if (sc & BD_ENET_TX_LC)	/* Late collision */
				fep->stats.tx_window_errors++;
			if (sc & BD_ENET_TX_RL)	/* Retrans limit */
				fep->stats.tx_aborted_errors++;
			if (sc & BD_ENET_TX_UN)	/* Underrun */
				fep->stats.tx_fifo_errors++;
			if (sc & BD_ENET_TX_CSL)	/* Carrier lost */
				fep->stats.tx_carrier_errors++;

			if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
				fep->stats.tx_errors++;
				do_restart = 1;
			}
		} else
			fep->stats.tx_packets++;

		if (sc & BD_ENET_TX_READY) {
			dev_warn(fep->dev,
				 "HEY! Enet xmit interrupt and TX_READY.\n");
		}

		/*
		 * Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (sc & BD_ENET_TX_DEF)
			fep->stats.collisions++;

		/* unmap */
		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				skb->len, DMA_TO_DEVICE);

		/*
		 * Free the sk buffer associated with this last transmit.
		 */
		dev_kfree_skb_irq(skb);
		fep->tx_skbuff[dirtyidx] = NULL;

		/*
		 * Update pointer to next buffer descriptor to be transmitted.
		 */
		if ((sc & BD_ENET_TX_WRAP) == 0)
			bdp++;
		else
			bdp = fep->tx_bd_base;

		/*
		 * Since we have freed up a buffer, the ring is no longer
		 * full.
		 */
		if (!fep->tx_free++)
			do_wake = 1;
	}

	fep->dirty_tx = bdp;

	if (do_restart)
		(*fep->ops->tx_restart)(dev);

	spin_unlock(&fep->tx_lock);

	if (do_wake)
		netif_wake_queue(dev);
}
Esempio n. 4
0
/*
 * The typical workload of the driver:
 * Handle the network interface interrupts.
 */
static irqreturn_t sonic_interrupt(int irq, void *dev_id)
{
    struct net_device *dev = dev_id;
    struct sonic_local *lp = netdev_priv(dev);
    int status;

    if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
        return IRQ_NONE;

    do {
        if (status & SONIC_INT_PKTRX) {
            if (sonic_debug > 2)
                printk("%s: packet rx\n", dev->name);
            sonic_rx(dev);	/* got packet(s) */
            SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
        }

        if (status & SONIC_INT_TXDN) {
            int entry = lp->cur_tx;
            int td_status;
            int freed_some = 0;

            /* At this point, cur_tx is the index of a TD that is one of:
             *   unallocated/freed                          (status set   & tx_skb[entry] clear)
             *   allocated and sent                         (status set   & tx_skb[entry] set  )
             *   allocated and not yet sent                 (status clear & tx_skb[entry] set  )
             *   still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
             */

            if (sonic_debug > 2)
                printk("%s: tx done\n", dev->name);

            while (lp->tx_skb[entry] != NULL) {
                if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
                    break;

                if (td_status & 0x0001) {
                    lp->stats.tx_packets++;
                    lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
                } else {
                    lp->stats.tx_errors++;
                    if (td_status & 0x0642)
                        lp->stats.tx_aborted_errors++;
                    if (td_status & 0x0180)
                        lp->stats.tx_carrier_errors++;
                    if (td_status & 0x0020)
                        lp->stats.tx_window_errors++;
                    if (td_status & 0x0004)
                        lp->stats.tx_fifo_errors++;
                }

                /* We must free the original skb */
                dev_kfree_skb_irq(lp->tx_skb[entry]);
                lp->tx_skb[entry] = NULL;
                /* and unmap DMA buffer */
                dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
                lp->tx_laddr[entry] = (dma_addr_t)0;
                freed_some = 1;

                if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
                    entry = (entry + 1) & SONIC_TDS_MASK;
                    break;
                }
                entry = (entry + 1) & SONIC_TDS_MASK;
            }

            if (freed_some || lp->tx_skb[entry] == NULL)
                netif_wake_queue(dev);  /* The ring is no longer full */
            lp->cur_tx = entry;
            SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
        }

        /*
         * check error conditions
         */
        if (status & SONIC_INT_RFO) {
            if (sonic_debug > 1)
                printk("%s: rx fifo overrun\n", dev->name);
            lp->stats.rx_fifo_errors++;
            SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
        }
        if (status & SONIC_INT_RDE) {
            if (sonic_debug > 1)
                printk("%s: rx descriptors exhausted\n", dev->name);
            lp->stats.rx_dropped++;
            SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
        }
        if (status & SONIC_INT_RBAE) {
            if (sonic_debug > 1)
                printk("%s: rx buffer area exceeded\n", dev->name);
            lp->stats.rx_dropped++;
            SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
        }

        /* counter overruns; all counters are 16bit wide */
        if (status & SONIC_INT_FAE) {
            lp->stats.rx_frame_errors += 65536;
            SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
        }
        if (status & SONIC_INT_CRC) {
            lp->stats.rx_crc_errors += 65536;
            SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
        }
        if (status & SONIC_INT_MP) {
            lp->stats.rx_missed_errors += 65536;
            SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
        }

        /* transmit error */
        if (status & SONIC_INT_TXER) {
            if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2))
                printk(KERN_ERR "%s: tx fifo underrun\n", dev->name);
            SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
        }

        /* bus retry */
        if (status & SONIC_INT_BR) {
            printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
                   dev->name);
            /* ... to help debug DMA problems causing endless interrupts. */
            /* Bounce the eth interface to turn on the interrupt again. */
            SONIC_WRITE(SONIC_IMR, 0);
            SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
        }

        /* load CAM done */
        if (status & SONIC_INT_LCD)
            SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
    } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
    return IRQ_HANDLED;
}
Esempio n. 5
0
static
int receive_dmsg(struct IsdnCardState *cs)
{
	struct sk_buff *skb;
	int idx;
	int rcnt, z1, z2;
	u_char stat, cip, f1, f2;
	int chksum;
	int count=5;
	u_char *ptr;

	if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
		debugl1(cs, "rec_dmsg blocked");
		return(1);
	}
	SelFiFo(cs, 4 | HFCD_REC);
	cip = HFCD_FIFO | HFCD_F1 | HFCD_REC;
	WaitNoBusy(cs);
	f1 = cs->readisac(cs, cip) & 0xf;
	cip = HFCD_FIFO | HFCD_F2 | HFCD_REC;
	WaitNoBusy(cs);
	f2 = cs->readisac(cs, cip) & 0xf;
	while ((f1 != f2) && count--) {
		z1 = ReadZReg(cs, HFCD_FIFO | HFCD_Z1 | HFCD_REC);
		z2 = ReadZReg(cs, HFCD_FIFO | HFCD_Z2 | HFCD_REC);
		rcnt = z1 - z2;
		if (rcnt < 0)
			rcnt += cs->hw.hfcD.dfifosize;
		rcnt++;
		if (cs->debug & L1_DEB_ISAC)
			debugl1(cs, "hfcd recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
				f1, f2, z1, z2, rcnt);
		idx = 0;
		cip = HFCD_FIFO | HFCD_FIFO_OUT | HFCD_REC;
		if (rcnt > MAX_DFRAME_LEN + 3) {
			if (cs->debug & L1_DEB_WARN)
				debugl1(cs, "empty_fifo d: incoming packet too large");
			while (idx < rcnt) {
				if (!(WaitNoBusy(cs)))
					break;
				ReadReg(cs, HFCD_DATA_NODEB, cip);
				idx++;
			}
		} else if (rcnt < 4) {
			if (cs->debug & L1_DEB_WARN)
				debugl1(cs, "empty_fifo d: incoming packet too small");
			while ((idx++ < rcnt) && WaitNoBusy(cs))
				ReadReg(cs, HFCD_DATA_NODEB, cip);
		} else if ((skb = dev_alloc_skb(rcnt - 3))) {
			ptr = skb_put(skb, rcnt - 3);
			while (idx < (rcnt - 3)) {
				if (!(WaitNoBusy(cs)))
					break;
				*ptr = ReadReg(cs, HFCD_DATA_NODEB, cip);
				idx++;
				ptr++;
			}
			if (idx != (rcnt - 3)) {
				debugl1(cs, "RFIFO D BUSY error");
				printk(KERN_WARNING "HFC DFIFO channel BUSY Error\n");
				dev_kfree_skb_irq(skb);
				skb = NULL;
#ifdef ERROR_STATISTIC
				cs->err_rx++;
#endif
			} else {
				WaitNoBusy(cs);
				chksum = (ReadReg(cs, HFCD_DATA, cip) << 8);
				WaitNoBusy(cs);
				chksum += ReadReg(cs, HFCD_DATA, cip);
				WaitNoBusy(cs);
				stat = ReadReg(cs, HFCD_DATA, cip);
				if (cs->debug & L1_DEB_ISAC)
					debugl1(cs, "empty_dfifo chksum %x stat %x",
						chksum, stat);
				if (stat) {
					debugl1(cs, "FIFO CRC error");
					dev_kfree_skb_irq(skb);
					skb = NULL;
#ifdef ERROR_STATISTIC
					cs->err_crc++;
#endif
				} else {
					skb_queue_tail(&cs->rq, skb);
					schedule_event(cs, D_RCVBUFREADY);
				}
			}
		} else
			printk(KERN_WARNING "HFC: D receive out of memory\n");
		WaitForBusy(cs);
		cip = HFCD_FIFO | HFCD_F2_INC | HFCD_REC;
		WaitNoBusy(cs);
		stat = ReadReg(cs, HFCD_DATA, cip);
		WaitForBusy(cs);
		cip = HFCD_FIFO | HFCD_F2 | HFCD_REC;
		WaitNoBusy(cs);
		f2 = cs->readisac(cs, cip) & 0xf;
	}
	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
	return(1);
} 
Esempio n. 6
0
File: diva.c Progetto: nhanh0/hah
static inline void
Memhscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
{
	u_char r;
	struct BCState *bcs = cs->bcs + hscx;
	struct sk_buff *skb;
	int fifo_size = test_bit(HW_IPAC, &cs->HW_Flags)? 64: 32;
	int count;

	if (!test_bit(BC_FLG_INIT, &bcs->Flag))
		return;

	if (val & 0x80) {	/* RME */
		r = MemReadHSCX(cs, hscx, HSCX_RSTA);
		if ((r & 0xf0) != 0xa0) {
			if (!(r & 0x80))
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX invalid frame");
			if ((r & 0x40) && bcs->mode)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX RDO mode=%d",
						bcs->mode);
			if (!(r & 0x20))
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX CRC error");
			MemWriteHSCXCMDR(cs, hscx, 0x80);
		} else {
			count = MemReadHSCX(cs, hscx, HSCX_RBCL) & (
				test_bit(HW_IPAC, &cs->HW_Flags)? 0x3f: 0x1f);
			if (count == 0)
				count = fifo_size;
			Memhscx_empty_fifo(bcs, count);
			if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
				if (cs->debug & L1_DEB_HSCX_FIFO)
					debugl1(cs, "HX Frame %d", count);
				if (!(skb = dev_alloc_skb(count)))
					printk(KERN_WARNING "HSCX: receive out of memory\n");
				else {
					memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count);
					skb_queue_tail(&bcs->rqueue, skb);
				}
			}
		}
		bcs->hw.hscx.rcvidx = 0;
		hscx_sched_event(bcs, B_RCVBUFREADY);
	}
	if (val & 0x40) {	/* RPF */
		Memhscx_empty_fifo(bcs, fifo_size);
		if (bcs->mode == L1_MODE_TRANS) {
			/* receive audio data */
			if (!(skb = dev_alloc_skb(fifo_size)))
				printk(KERN_WARNING "HiSax: receive out of memory\n");
			else {
				memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size);
				skb_queue_tail(&bcs->rqueue, skb);
			}
			bcs->hw.hscx.rcvidx = 0;
			hscx_sched_event(bcs, B_RCVBUFREADY);
		}
	}
	if (val & 0x10) {	/* XPR */
		if (bcs->tx_skb) {
			if (bcs->tx_skb->len) {
				Memhscx_fill_fifo(bcs);
				return;
			} else {
				if (bcs->st->lli.l1writewakeup &&
					(PACKET_NOACK != bcs->tx_skb->pkt_type))
					bcs->st->lli.l1writewakeup(bcs->st, bcs->hw.hscx.count);
				dev_kfree_skb_irq(bcs->tx_skb);
				bcs->hw.hscx.count = 0; 
				bcs->tx_skb = NULL;
			}
		}
		if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
			bcs->hw.hscx.count = 0;
			test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
			Memhscx_fill_fifo(bcs);
		} else {
			test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
			hscx_sched_event(bcs, B_XMTBUFREADY);
		}
	}
}
Esempio n. 7
0
static void mc32_tx_ring(struct net_device *dev) 
{
	struct mc32_local *lp = netdev_priv(dev);
	volatile struct skb_header *np;

	/*
	 * We rely on head==tail to mean 'queue empty'.
	 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
	 * tx_ring_head wrapping to tail and confusing a 'queue empty'
	 * condition with 'queue full'
	 */

	while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))  
	{   
		u16 t; 

		t=next_tx(lp->tx_ring_tail); 
		np=lp->tx_ring[t].p; 

		if(!(np->status & (1<<7))) 
		{
			/* Not COMPLETED */ 
			break; 
		} 
		lp->net_stats.tx_packets++;
		if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
		{
			lp->net_stats.tx_errors++;   

			switch(np->status&0x0F)
			{
				case 1:
					lp->net_stats.tx_aborted_errors++;
					break; /* Max collisions */ 
				case 2:
					lp->net_stats.tx_fifo_errors++;
					break;
				case 3:
					lp->net_stats.tx_carrier_errors++;
					break;
				case 4:
					lp->net_stats.tx_window_errors++;
					break;  /* CTS Lost */ 
				case 5:
					lp->net_stats.tx_aborted_errors++;
					break; /* Transmit timeout */ 
			}
		}
		/* Packets are sent in order - this is
		    basically a FIFO queue of buffers matching
		    the card ring */
		lp->net_stats.tx_bytes+=lp->tx_ring[t].skb->len;
		dev_kfree_skb_irq(lp->tx_ring[t].skb);
		lp->tx_ring[t].skb=NULL;
		atomic_inc(&lp->tx_count);
		netif_wake_queue(dev);

		lp->tx_ring_tail=t; 
	}

} 
Esempio n. 8
0
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
				    struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u8 *rxdesc = skb->data;
	struct ieee80211_hdr *hdr;
	bool unicast = false;
	__le16 fc;
	struct ieee80211_rx_status rx_status = {0};
	struct rtl_stats stats = {
		.signal = 0,
		.noise = -98,
		.rate = 0,
	};

	skb_pull(skb, RTL_RX_DESC_SIZE);
	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
	hdr = (struct ieee80211_hdr *)(skb->data);
	fc = hdr->frame_control;
	if (!stats.crc) {
		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

		if (is_broadcast_ether_addr(hdr->addr1)) {
			/*TODO*/;
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			/*TODO*/
		} else {
			unicast = true;
			rtlpriv->stats.rxbytesunicast +=  skb->len;
		}

		rtl_is_special_data(hw, skb, false);

		if (ieee80211_is_data(fc)) {
			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

			if (unicast)
				rtlpriv->link_info.num_rx_inperiod++;
		}
	}
}

static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
				      struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u8 *rxdesc = skb->data;
	struct ieee80211_hdr *hdr;
	bool unicast = false;
	__le16 fc;
	struct ieee80211_rx_status rx_status = {0};
	struct rtl_stats stats = {
		.signal = 0,
		.noise = -98,
		.rate = 0,
	};

	skb_pull(skb, RTL_RX_DESC_SIZE);
	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
	hdr = (struct ieee80211_hdr *)(skb->data);
	fc = hdr->frame_control;
	if (!stats.crc) {
		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

		if (is_broadcast_ether_addr(hdr->addr1)) {
			/*TODO*/;
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			/*TODO*/
		} else {
			unicast = true;
			rtlpriv->stats.rxbytesunicast +=  skb->len;
		}

		rtl_is_special_data(hw, skb, false);

		if (ieee80211_is_data(fc)) {
			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

			if (unicast)
				rtlpriv->link_info.num_rx_inperiod++;
		}
		if (likely(rtl_action_proc(hw, skb, false))) {
			struct sk_buff *uskb = NULL;
			u8 *pdata;

			uskb = dev_alloc_skb(skb->len + 128);
			if (uskb) {	/* drop packet on allocation failure */
				memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
				       sizeof(rx_status));
				pdata = (u8 *)skb_put(uskb, skb->len);
				memcpy(pdata, skb->data, skb->len);
				ieee80211_rx_irqsafe(hw, uskb);
			}
			dev_kfree_skb_any(skb);
		} else {
			dev_kfree_skb_any(skb);
		}
	}
}

static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct sk_buff *_skb;
	struct sk_buff_head rx_queue;
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	skb_queue_head_init(&rx_queue);
	if (rtlusb->usb_rx_segregate_hdl)
		rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
	WARN_ON(skb_queue_empty(&rx_queue));
	while (!skb_queue_empty(&rx_queue)) {
		_skb = skb_dequeue(&rx_queue);
		_rtl_usb_rx_process_agg(hw, _skb);
		ieee80211_rx_irqsafe(hw, _skb);
	}
}

static void _rtl_rx_completed(struct urb *_urb)
{
	struct sk_buff *skb = (struct sk_buff *)_urb->context;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
	struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	int err = 0;

	if (unlikely(IS_USB_STOP(rtlusb)))
		goto free;

	if (likely(0 == _urb->status)) {
		/* If this code were moved to work queue, would CPU
		 * utilization be improved?  NOTE: We shall allocate another skb
		 * and reuse the original one.
		 */
		skb_put(skb, _urb->actual_length);

		if (likely(!rtlusb->usb_rx_segregate_hdl)) {
			struct sk_buff *_skb;
			_rtl_usb_rx_process_noagg(hw, skb);
			_skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC);
			if (IS_ERR(_skb)) {
				err = PTR_ERR(_skb);
				RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
					 "Can't allocate skb for bulk IN!\n");
				return;
			}
			skb = _skb;
		} else{
			/* TO DO */
			_rtl_rx_pre_process(hw, skb);
			pr_err("rx agg not supported\n");
		}
		goto resubmit;
	}

	switch (_urb->status) {
	/* disconnect */
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
		goto free;
	default:
		break;
	}

resubmit:
	skb_reset_tail_pointer(skb);
	skb_trim(skb, 0);

	usb_anchor_urb(_urb, &rtlusb->rx_submitted);
	err = usb_submit_urb(_urb, GFP_ATOMIC);
	if (unlikely(err)) {
		usb_unanchor_urb(_urb);
		goto free;
	}
	return;

free:
	dev_kfree_skb_irq(skb);
}

static int _rtl_usb_receive(struct ieee80211_hw *hw)
{
	struct urb *urb;
	struct sk_buff *skb;
	int err;
	int i;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	WARN_ON(0 == rtlusb->rx_urb_num);
	/* 1600 == 1514 + max WLAN header + rtk info */
	WARN_ON(rtlusb->rx_max_size < 1600);

	for (i = 0; i < rtlusb->rx_urb_num; i++) {
		err = -ENOMEM;
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Failed to alloc URB!!\n");
			goto err_out;
		}

		skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
		if (IS_ERR(skb)) {
			RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
				 "Failed to prep_rx_urb!!\n");
			err = PTR_ERR(skb);
			goto err_out;
		}

		usb_anchor_urb(urb, &rtlusb->rx_submitted);
		err = usb_submit_urb(urb, GFP_KERNEL);
		if (err)
			goto err_out;
		usb_free_urb(urb);
	}
	return 0;

err_out:
	usb_kill_anchored_urbs(&rtlusb->rx_submitted);
	return err;
}

static int rtl_usb_start(struct ieee80211_hw *hw)
{
	int err;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

	err = rtlpriv->cfg->ops->hw_init(hw);
	if (!err) {
		rtl_init_rx_config(hw);

		/* Enable software */
		SET_USB_START(rtlusb);
		/* should after adapter start and interrupt enable. */
		set_hal_start(rtlhal);

		/* Start bulk IN */
		err = _rtl_usb_receive(hw);
	}

	return err;
}
/**
 *
 *
 */

/*=======================  tx =========================================*/
static void rtl_usb_cleanup(struct ieee80211_hw *hw)
{
	u32 i;
	struct sk_buff *_skb;
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
	struct ieee80211_tx_info *txinfo;

	SET_USB_STOP(rtlusb);

	/* clean up rx stuff. */
	usb_kill_anchored_urbs(&rtlusb->rx_submitted);

	/* clean up tx stuff */
	for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
		while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
			rtlusb->usb_tx_cleanup(hw, _skb);
			txinfo = IEEE80211_SKB_CB(_skb);
			ieee80211_tx_info_clear_status(txinfo);
			txinfo->flags |= IEEE80211_TX_STAT_ACK;
			ieee80211_tx_status_irqsafe(hw, _skb);
		}
		usb_kill_anchored_urbs(&rtlusb->tx_pending[i]);
	}
	usb_kill_anchored_urbs(&rtlusb->tx_submitted);
}
Esempio n. 9
0
static void
W6692B_interrupt(struct IsdnCardState *cs, u_char bchan)
{
	u_char val;
	u_char r;
	struct BCState *bcs;
	struct sk_buff *skb;
	int count;

	bcs = (cs->bcs->channel == bchan) ? cs->bcs : (cs->bcs+1);
	val = cs->BC_Read_Reg(cs, bchan, W_B_EXIR);
	debugl1(cs, "W6692B chan %d B_EXIR 0x%02X", bchan, val);

	if (!test_bit(BC_FLG_INIT, &bcs->Flag)) {
		debugl1(cs, "W6692B not INIT yet");
		return;
	}
	if (val & W_B_EXI_RME) {	/* RME */
		r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
		if (r & (W_B_STAR_RDOV | W_B_STAR_CRCE | W_B_STAR_RMB)) {
			if (cs->debug & L1_DEB_WARN)
				debugl1(cs, "W6692 B STAR %x", r);
			if ((r & W_B_STAR_RDOV) && bcs->mode)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "W6692 B RDOV mode=%d",
						bcs->mode);
			if (r & W_B_STAR_CRCE)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "W6692 B CRC error");
			cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT);
		} else {
			count = cs->BC_Read_Reg(cs, bchan, W_B_RBCL) & (W_B_FIFO_THRESH - 1);
			if (count == 0)
				count = W_B_FIFO_THRESH;
			W6692B_empty_fifo(bcs, count);
			if ((count = bcs->hw.w6692.rcvidx) > 0) {
				if (cs->debug & L1_DEB_HSCX_FIFO)
					debugl1(cs, "W6692 Bchan Frame %d", count);
				if (!(skb = dev_alloc_skb(count)))
					printk(KERN_WARNING "W6692: Bchan receive out of memory\n");
				else {
					memcpy(skb_put(skb, count), bcs->hw.w6692.rcvbuf, count);
					skb_queue_tail(&bcs->rqueue, skb);
				}
			}
		}
		bcs->hw.w6692.rcvidx = 0;
		schedule_event(bcs, B_RCVBUFREADY);
	}
	if (val & W_B_EXI_RMR) {	/* RMR */
		W6692B_empty_fifo(bcs, W_B_FIFO_THRESH);
		r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
		if (r & W_B_STAR_RDOV) {
			if (cs->debug & L1_DEB_WARN)
				debugl1(cs, "W6692 B RDOV(RMR) mode=%d",bcs->mode);
			cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT);
			if (bcs->mode != L1_MODE_TRANS)
				bcs->hw.w6692.rcvidx = 0;
		}
		if (bcs->mode == L1_MODE_TRANS) {
			/* receive audio data */
			if (!(skb = dev_alloc_skb(W_B_FIFO_THRESH)))
				printk(KERN_WARNING "HiSax: receive out of memory\n");
			else {
				memcpy(skb_put(skb, W_B_FIFO_THRESH), bcs->hw.w6692.rcvbuf, W_B_FIFO_THRESH);
				skb_queue_tail(&bcs->rqueue, skb);
			}
			bcs->hw.w6692.rcvidx = 0;
			schedule_event(bcs, B_RCVBUFREADY);
		}
	}
	if (val & W_B_EXI_XDUN) {	/* XDUN */
		cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT);
		if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "W6692 B EXIR %x Lost TX", val);
		if (bcs->mode == 1)
			W6692B_fill_fifo(bcs);
		else {
			/* Here we lost an TX interrupt, so
			   * restart transmitting the whole frame.
			 */
			if (bcs->tx_skb) {
				skb_push(bcs->tx_skb, bcs->hw.w6692.count);
				bcs->tx_cnt += bcs->hw.w6692.count;
				bcs->hw.w6692.count = 0;
			}
		}
		return;
	}
	if (val & W_B_EXI_XFR) {	/* XFR */
		r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
		if (r & W_B_STAR_XDOW) {
			if (cs->debug & L1_DEB_WARN)
				debugl1(cs, "W6692 B STAR %x XDOW", r);
			cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT);
			if (bcs->tx_skb && (bcs->mode != 1)) {
				skb_push(bcs->tx_skb, bcs->hw.w6692.count);
				bcs->tx_cnt += bcs->hw.w6692.count;
				bcs->hw.w6692.count = 0;
			}
		}
		if (bcs->tx_skb) {
			if (bcs->tx_skb->len) {
				W6692B_fill_fifo(bcs);
				return;
			} else {
				if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
					(PACKET_NOACK != bcs->tx_skb->pkt_type)) {
					u_long	flags;
					spin_lock_irqsave(&bcs->aclock, flags);
					bcs->ackcnt += bcs->hw.w6692.count;
					spin_unlock_irqrestore(&bcs->aclock, flags);
					schedule_event(bcs, B_ACKPENDING);
				}
				dev_kfree_skb_irq(bcs->tx_skb);
				bcs->hw.w6692.count = 0;
				bcs->tx_skb = NULL;
			}
		}
		if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
			bcs->hw.w6692.count = 0;
			test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
			W6692B_fill_fifo(bcs);
		} else {
			test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
			schedule_event(bcs, B_XMTBUFREADY);
		}
	}
}
Esempio n. 10
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
	struct net_device *dev = (struct net_device *) arg;
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = 0;
	int sz;
	u32 opmode = p->operation_mode;
	unsigned long flags;

	for (;;) {
		sz = smd_cur_packet_size(p->ch);
		if (sz == 0) break;
		if (smd_read_avail(p->ch) < sz) break;

		skb = dev_alloc_skb(sz + NET_IP_ALIGN);
		if (skb == NULL) {
			pr_err("[%s] rmnet_recv() cannot allocate skb\n",
			       dev->name);
			/* out of memory, reschedule a later attempt */
			smd_net_data_tasklet.data = (unsigned long)dev;
			tasklet_schedule(&smd_net_data_tasklet);
			break;
		} else {
			skb->dev = dev;
			skb_reserve(skb, NET_IP_ALIGN);
			ptr = skb_put(skb, sz);
			wake_lock_timeout(&p->wake_lock, HZ / 2);
			if (smd_read(p->ch, ptr, sz) != sz) {
				pr_err("[%s] rmnet_recv() smd lied about avail?!",
					dev->name);
				ptr = 0;
				dev_kfree_skb_irq(skb);
			} else {
				/* Handle Rx frame format */
				spin_lock_irqsave(&p->lock, flags);
				opmode = p->operation_mode;
				spin_unlock_irqrestore(&p->lock, flags);

				if (RMNET_IS_MODE_IP(opmode)) {
					/* Driver in IP mode */
					skb->protocol =
					  rmnet_ip_type_trans(skb, dev);
				} else {
					/* Driver in Ethernet mode */
					skb->protocol =
					  eth_type_trans(skb, dev);
				}
				if (RMNET_IS_MODE_IP(opmode) ||
				    count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
					p->wakeups_rcv +=
					rmnet_cause_wakeup(p);
#endif
					p->stats.rx_packets++;
					p->stats.rx_bytes += skb->len;
				}
				DBG1("[%s] Rx packet #%lu len=%d\n",
					dev->name, p->stats.rx_packets,
					skb->len);

				/* Deliver to network stack */
				netif_rx(skb);
			}
			continue;
		}
		if (smd_read(p->ch, ptr, sz) != sz)
			pr_err("[%s] rmnet_recv() smd lied about avail?!",
				dev->name);
	}
}
Esempio n. 11
0
/**
 * Transmit a packet.
 * This is a helper function for ctcm_tx().
 *
 *  ch		Channel to be used for sending.
 *  skb		Pointer to struct sk_buff of packet to send.
 *            The linklevel header has already been set up
 *            by ctcm_tx().
 *
 * returns 0 on success, -ERRNO on failure. (Never fails.)
 */
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
	unsigned long saveflags;
	struct ll_header header;
	int rc = 0;
	__u16 block_len;
	int ccw_idx;
	struct sk_buff *nskb;
	unsigned long hi;

	/* we need to acquire the lock for testing the state
	 * otherwise we can have an IRQ changing the state to
	 * TXIDLE after the test but before acquiring the lock.
	 */
	spin_lock_irqsave(&ch->collect_lock, saveflags);
	if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
		int l = skb->len + LL_HEADER_LENGTH;

		if (ch->collect_len + l > ch->max_bufsize - 2) {
			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
			return -EBUSY;
		} else {
			atomic_inc(&skb->users);
			header.length = l;
			header.type = skb->protocol;
			header.unused = 0;
			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
			       LL_HEADER_LENGTH);
			skb_queue_tail(&ch->collect_queue, skb);
			ch->collect_len += l;
		}
		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
				goto done;
	}
	spin_unlock_irqrestore(&ch->collect_lock, saveflags);
	/*
	 * Protect skb against beeing free'd by upper
	 * layers.
	 */
	atomic_inc(&skb->users);
	ch->prof.txlen += skb->len;
	header.length = skb->len + LL_HEADER_LENGTH;
	header.type = skb->protocol;
	header.unused = 0;
	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
	block_len = skb->len + 2;
	*((__u16 *)skb_push(skb, 2)) = block_len;

	/*
	 * IDAL support in CTCM is broken, so we have to
	 * care about skb's above 2G ourselves.
	 */
	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
	if (hi) {
		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		if (!nskb) {
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		} else {
			memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
			atomic_inc(&nskb->users);
			atomic_dec(&skb->users);
			dev_kfree_skb_irq(skb);
			skb = nskb;
		}
	}

	ch->ccw[4].count = block_len;
	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
		/*
		 * idal allocation failed, try via copying to
		 * trans_skb. trans_skb usually has a pre-allocated
		 * idal.
		 */
		if (ctcm_checkalloc_buffer(ch)) {
			/*
			 * Remove our header. It gets added
			 * again on retransmit.
			 */
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		}

		skb_reset_tail_pointer(ch->trans_skb);
		ch->trans_skb->len = 0;
		ch->ccw[1].count = skb->len;
		skb_copy_from_linear_data(skb,
				skb_put(ch->trans_skb, skb->len), skb->len);
		atomic_dec(&skb->users);
		dev_kfree_skb_irq(skb);
		ccw_idx = 0;
	} else {
		skb_queue_tail(&ch->io_queue, skb);
		ccw_idx = 3;
	}
	ch->retry = 0;
	fsm_newstate(ch->fsm, CTC_STATE_TX);
	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
	ch->prof.send_stamp = current_kernel_time(); /* xtime */
	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
					(unsigned long)ch, 0xff, 0);
	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
	if (ccw_idx == 3)
		ch->prof.doios_single++;
	if (rc != 0) {
		fsm_deltimer(&ch->timer);
		ctcm_ccw_check_rc(ch, rc, "single skb TX");
		if (ccw_idx == 3)
			skb_dequeue_tail(&ch->io_queue);
		/*
		 * Remove our header. It gets added
		 * again on retransmit.
		 */
		skb_pull(skb, LL_HEADER_LENGTH + 2);
	} else if (ccw_idx == 0) {
		struct net_device *dev = ch->netdev;
		struct ctcm_priv *priv = dev->ml_priv;
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
	}
done:
	ctcm_clear_busy(ch->netdev);
	return rc;
}
Esempio n. 12
0
static void tms380tr_rcv_status_irq(struct net_device *dev)
{
	struct net_local *tp = netdev_priv(dev);
	unsigned char *ReceiveDataPtr;
	struct sk_buff *skb;
	unsigned int Length, Length2;
	RPL *rpl;
	RPL *SaveHead;
	dma_addr_t dmabuf;

	

	for(;;)
	{
		rpl = tp->RplHead;
		if(rpl->Status & RX_VALID)
			break;		

		
		SaveHead = tp->RplHead;
		tp->RplHead = rpl->NextRPLPtr;

		
		Length = be16_to_cpu(rpl->FrameSize);

		
		if((rpl->Status & VALID_SINGLE_BUFFER_FRAME)
			== VALID_SINGLE_BUFFER_FRAME)
		{
			ReceiveDataPtr = rpl->MData;

			
			Length2 = be16_to_cpu(rpl->FrameSize);

			if(Length == 0 || Length != Length2)
			{
				tp->RplHead = SaveHead;
				break;	
			}
			tms380tr_update_rcv_stats(tp,ReceiveDataPtr,Length);
			  
			if(tms380tr_debug > 3)
				printk(KERN_DEBUG "%s: Packet Length %04X (%d)\n",
					dev->name, Length, Length);
			  
			
			skb = rpl->Skb;
			if(rpl->SkbStat == SKB_UNAVAILABLE)
			{
				
				skb = dev_alloc_skb(tp->MaxPacketSize);
				if(skb == NULL)
				{
					
				}
				else
				{
					skb_put(skb, tp->MaxPacketSize);
					rpl->SkbStat 	= SKB_DATA_COPY;
					ReceiveDataPtr 	= rpl->MData;
				}
			}

			if(skb && (rpl->SkbStat == SKB_DATA_COPY
				|| rpl->SkbStat == SKB_DMA_DIRECT))
			{
				if(rpl->SkbStat == SKB_DATA_COPY)
					skb_copy_to_linear_data(skb, ReceiveDataPtr,
						       Length);

				
				rpl->Skb = NULL;
				skb_trim(skb,Length);
				skb->protocol = tr_type_trans(skb,dev);
				netif_rx(skb);
			}
		}
		else	
		{
			if(rpl->Skb != NULL)
				dev_kfree_skb_irq(rpl->Skb);

			
			if(rpl->Status & RX_START_FRAME)
				
				tp->MacStat.rx_errors++;
		}
		if (rpl->DMABuff)
			dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE);
		rpl->DMABuff = 0;

		
		rpl->Skb = dev_alloc_skb(tp->MaxPacketSize);
		
		if(rpl->Skb == NULL)
		{
			rpl->SkbStat = SKB_UNAVAILABLE;
			rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
			rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
		}
		else	
		{
			rpl->Skb->dev = dev;
			skb_put(rpl->Skb, tp->MaxPacketSize);

			
			dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
			if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
			{
				rpl->SkbStat = SKB_DATA_COPY;
				rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
				rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
			}
			else
			{
				
				rpl->SkbStat = SKB_DMA_DIRECT;
				rpl->FragList[0].DataAddr = htonl(dmabuf);
				rpl->MData = rpl->Skb->data;
				rpl->DMABuff = dmabuf;
			}
		}

		rpl->FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize);
		rpl->FrameSize = 0;

		
		tp->RplTail->FrameSize = 0;

		
		tms380tr_write_rpl_status(tp->RplTail, RX_VALID | RX_FRAME_IRQ);

		
		tp->RplTail = tp->RplTail->NextRPLPtr;

		
		tms380tr_exec_sifcmd(dev, CMD_RX_VALID);
	}

	return;
}
Esempio n. 13
0
static void tms380tr_tx_status_irq(struct net_device *dev)
{
	struct net_local *tp = netdev_priv(dev);
	unsigned char HighByte, HighAc, LowAc;
	TPL *tpl;

	

	for(;;)
	{
		tpl = tp->TplBusy;
		if(!tpl->BusyFlag || (tpl->Status
			& (TX_VALID | TX_FRAME_COMPLETE))
			!= TX_FRAME_COMPLETE)
		{
			break;
		}

		
		tp->TplBusy = tpl->NextTPLPtr ;

		
		if(DIRECTED_FRAME(tpl) && (tpl->Status & TX_ERROR) == 0)
		{
			HighByte = GET_TRANSMIT_STATUS_HIGH_BYTE(tpl->Status);
			HighAc   = GET_FRAME_STATUS_HIGH_AC(HighByte);
			LowAc    = GET_FRAME_STATUS_LOW_AC(HighByte);

			if((HighAc != LowAc) || (HighAc == AC_NOT_RECOGNIZED))
			{
				printk(KERN_DEBUG "%s: (DA=%08lX not recognized)\n",
					dev->name,
					*(unsigned long *)&tpl->MData[2+2]);
			}
			else
			{
				if(tms380tr_debug > 3)
					printk(KERN_DEBUG "%s: Directed frame tx'd\n", 
						dev->name);
			}
		}
		else
		{
			if(!DIRECTED_FRAME(tpl))
			{
				if(tms380tr_debug > 3)
					printk(KERN_DEBUG "%s: Broadcast frame tx'd\n",
						dev->name);
			}
		}

		tp->MacStat.tx_packets++;
		if (tpl->DMABuff)
			dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
		dev_kfree_skb_irq(tpl->Skb);
		tpl->BusyFlag = 0;	
	}

	if(!tp->TplFree->NextTPLPtr->BusyFlag)
		netif_wake_queue(dev);
	return;
}
Esempio n. 14
0
/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
{
	struct net_device *dev = (struct net_device *)dev_instance;
	struct netdev_private *np;
	long ioaddr;
	int boguscnt = max_interrupt_work;

	ioaddr = dev->base_addr;
	np = dev->priv;
	spin_lock(&np->lock);

	do {
		int intr_status = readw(ioaddr + IntrStatus);
		writew(intr_status & (IntrRxDone | IntrRxDMADone | IntrPCIErr |
			IntrDrvRqst | IntrTxDone | IntrTxDMADone | StatsMax | 
			LinkChange), ioaddr + IntrStatus);

		if (debug > 4)
			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
				   dev->name, intr_status);

		if (intr_status == 0)
			break;

		if (intr_status & (IntrRxDone|IntrRxDMADone))
			netdev_rx(dev);

		if (intr_status & IntrTxDone) {
			int boguscnt = 32;
			int tx_status = readw(ioaddr + TxStatus);
			while (tx_status & 0x80) {
				if (debug > 4)
					printk("%s: Transmit status is %2.2x.\n",
						   dev->name, tx_status);
				if (tx_status & 0x1e) {
					np->stats.tx_errors++;
					if (tx_status & 0x10)  np->stats.tx_fifo_errors++;
#ifdef ETHER_STATS
					if (tx_status & 0x08)  np->stats.collisions16++;
#else
					if (tx_status & 0x08)  np->stats.collisions++;
#endif
					if (tx_status & 0x04)  np->stats.tx_fifo_errors++;
					if (tx_status & 0x02)  np->stats.tx_window_errors++;
					/* This reset has not been verified!. */
					if (tx_status & 0x10) {			/* Reset the Tx. */
						writew(0x001c, ioaddr + ASICCtrl + 2);
#if 0					/* Do we need to reset the Tx pointer here? */
						writel(np->tx_ring_dma
							+ np->dirty_tx*sizeof(*np->tx_ring),
							dev->base_addr + TxListPtr);
#endif
					}
					if (tx_status & 0x1e) 		/* Restart the Tx. */
						writew(TxEnable, ioaddr + MACCtrl1);
				}
				/* Yup, this is a documentation bug.  It cost me *hours*. */
				writew(0, ioaddr + TxStatus);
				tx_status = readb(ioaddr + TxStatus);
				if (--boguscnt < 0)
					break;
			}
		}
		for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
			int entry = np->dirty_tx % TX_RING_SIZE;
			struct sk_buff *skb;

			if ( ! (np->tx_ring[entry].status & 0x00010000))
				break;
			skb = np->tx_skbuff[entry];
			/* Free the original skb. */
			pci_unmap_single(np->pci_dev, 
				np->tx_ring[entry].frag[0].addr, 
				skb->len, PCI_DMA_TODEVICE);
			dev_kfree_skb_irq(skb);
			np->tx_skbuff[entry] = 0;
		}
		if (np->tx_full
			&& np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
			/* The ring is no longer full, clear tbusy. */
			np->tx_full = 0;
			netif_wake_queue(dev);
		}

		/* Abnormal error summary/uncommon events handlers. */
		if (intr_status & (IntrDrvRqst | IntrPCIErr | LinkChange | StatsMax))
			netdev_error(dev, intr_status);
		if (--boguscnt < 0) {
			get_stats(dev);
			if (debug > 1) 
				printk(KERN_WARNING "%s: Too much work at interrupt, "
				   "status=0x%4.4x / 0x%4.4x.\n",
				   dev->name, intr_status, readw(ioaddr + IntrClear));
			/* Re-enable us in 3.2msec. */
			writew(0, ioaddr + IntrEnable);
			writew(1000, ioaddr + DownCounter);
			writew(IntrDrvRqst, ioaddr + IntrEnable);
			break;
		}
	} while (1);

	if (debug > 3)
		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
			   dev->name, readw(ioaddr + IntrStatus));

	spin_unlock(&np->lock);
}
Esempio n. 15
0
struct sk_buff *acd_venet_drop_rx(acd_venet_rx_info_t *info)
{
    dev_kfree_skb_irq(info->avri_skb);
    info->avri_skb = NULL;
    return NULL;
}
Esempio n. 16
0
static irqreturn_t
W6692_interrupt(int intno, void *dev_id)
{
	struct IsdnCardState	*cs = dev_id;
	u_char			val, exval, v1;
	struct sk_buff		*skb;
	u_int			count;
	u_long			flags;
	int			icnt = 5;

	spin_lock_irqsave(&cs->lock, flags);
	val = cs->readW6692(cs, W_ISTA);
	if (!val) {
		spin_unlock_irqrestore(&cs->lock, flags);
		return IRQ_NONE;
	}
      StartW6692:
	if (cs->debug & L1_DEB_ISAC)
		debugl1(cs, "W6692 ISTA %x", val);

	if (val & W_INT_D_RME) {	/* RME */
		exval = cs->readW6692(cs, W_D_RSTA);
		if (exval & (W_D_RSTA_RDOV | W_D_RSTA_CRCE | W_D_RSTA_RMB)) {
			if (exval & W_D_RSTA_RDOV)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "W6692 RDOV");
			if (exval & W_D_RSTA_CRCE)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "W6692 D-channel CRC error");
			if (exval & W_D_RSTA_RMB)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "W6692 D-channel ABORT");
			cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK | W_D_CMDR_RRST);
		} else {
			count = cs->readW6692(cs, W_D_RBCL) & (W_D_FIFO_THRESH - 1);
			if (count == 0)
				count = W_D_FIFO_THRESH;
			W6692_empty_fifo(cs, count);
			if ((count = cs->rcvidx) > 0) {
				cs->rcvidx = 0;
				if (!(skb = alloc_skb(count, GFP_ATOMIC)))
					printk(KERN_WARNING "HiSax: D receive out of memory\n");
				else {
					memcpy(skb_put(skb, count), cs->rcvbuf, count);
					skb_queue_tail(&cs->rq, skb);
				}
			}
		}
		cs->rcvidx = 0;
		schedule_event(cs, D_RCVBUFREADY);
	}
	if (val & W_INT_D_RMR) {	/* RMR */
		W6692_empty_fifo(cs, W_D_FIFO_THRESH);
	}
	if (val & W_INT_D_XFR) {	/* XFR */
		if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
			del_timer(&cs->dbusytimer);
		if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
			schedule_event(cs, D_CLEARBUSY);
		if (cs->tx_skb) {
			if (cs->tx_skb->len) {
				W6692_fill_fifo(cs);
				goto afterXFR;
			} else {
				dev_kfree_skb_irq(cs->tx_skb);
				cs->tx_cnt = 0;
				cs->tx_skb = NULL;
			}
		}
		if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
			cs->tx_cnt = 0;
			W6692_fill_fifo(cs);
		} else
			schedule_event(cs, D_XMTBUFREADY);
	}
      afterXFR:
	if (val & (W_INT_XINT0 | W_INT_XINT1)) {	/* XINT0/1 - never */
		if (cs->debug & L1_DEB_ISAC)
			debugl1(cs, "W6692 spurious XINT!");
	}
	if (val & W_INT_D_EXI) {	/* EXI */
		exval = cs->readW6692(cs, W_D_EXIR);
		if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "W6692 D_EXIR %02x", exval);
		if (exval & (W_D_EXI_XDUN | W_D_EXI_XCOL)) {	/* Transmit underrun/collision */
			debugl1(cs, "W6692 D-chan underrun/collision");
			printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL\n");
			if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
				del_timer(&cs->dbusytimer);
			if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
				schedule_event(cs, D_CLEARBUSY);
			if (cs->tx_skb) {	/* Restart frame */
				skb_push(cs->tx_skb, cs->tx_cnt);
				cs->tx_cnt = 0;
				W6692_fill_fifo(cs);
			} else {
				printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL no skb\n");
				debugl1(cs, "W6692 XDUN/XCOL no skb");
				cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_XRST);
			}
		}
		if (exval & W_D_EXI_RDOV) {	/* RDOV */
			debugl1(cs, "W6692 D-channel RDOV");
			printk(KERN_WARNING "HiSax: W6692 D-RDOV\n");
			cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RRST);
		}
		if (exval & W_D_EXI_TIN2) {	/* TIN2 - never */
			debugl1(cs, "W6692 spurious TIN2 interrupt");
		}
		if (exval & W_D_EXI_MOC) {	/* MOC - not supported */
			debugl1(cs, "W6692 spurious MOC interrupt");
			v1 = cs->readW6692(cs, W_MOSR);
			debugl1(cs, "W6692 MOSR %02x", v1);
		}
		if (exval & W_D_EXI_ISC) {	/* ISC - Level1 change */
			v1 = cs->readW6692(cs, W_CIR);
			if (cs->debug & L1_DEB_ISAC)
				debugl1(cs, "W6692 ISC CIR=0x%02X", v1);
			if (v1 & W_CIR_ICC) {
				cs->dc.w6692.ph_state = v1 & W_CIR_COD_MASK;
				if (cs->debug & L1_DEB_ISAC)
					debugl1(cs, "ph_state_change %x", cs->dc.w6692.ph_state);
				schedule_event(cs, D_L1STATECHANGE);
			}
			if (v1 & W_CIR_SCC) {
				v1 = cs->readW6692(cs, W_SQR);
				debugl1(cs, "W6692 SCC SQR=0x%02X", v1);
			}
		}
		if (exval & W_D_EXI_WEXP) {
			debugl1(cs, "W6692 spurious WEXP interrupt!");
		}
		if (exval & W_D_EXI_TEXP) {
			debugl1(cs, "W6692 spurious TEXP interrupt!");
		}
	}
	if (val & W_INT_B1_EXI) {
		debugl1(cs, "W6692 B channel 1 interrupt");
		W6692B_interrupt(cs, 0);
	}
	if (val & W_INT_B2_EXI) {
		debugl1(cs, "W6692 B channel 2 interrupt");
		W6692B_interrupt(cs, 1);
	}
	val = cs->readW6692(cs, W_ISTA);
	if (val && icnt) {
		icnt--;
		goto StartW6692;
	}
	if (!icnt) {
		printk(KERN_WARNING "W6692 IRQ LOOP\n");
		cs->writeW6692(cs, W_IMASK, 0xff);
	}
	spin_unlock_irqrestore(&cs->lock, flags);
	return IRQ_HANDLED;
}
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
	struct net_device *dev = (struct net_device *) arg;
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = 0;
	int sz;
	u32 opmode = p->operation_mode;
	unsigned long flags;

	for (;;) {
		sz = smd_cur_packet_size(p->ch);
		if (sz == 0) break;
		if (smd_read_avail(p->ch) < sz) break;

		if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
						(sz > (dev->mtu + ETH_HLEN))) {
			pr_err("rmnet_recv() discarding %d len (%d mtu)\n",
				sz, RMNET_IS_MODE_IP(opmode) ?
					dev->mtu : (dev->mtu + ETH_HLEN));
			ptr = 0;
		} else {
			skb = dev_alloc_skb(sz + NET_IP_ALIGN);
			if (skb == NULL) {
				pr_err("rmnet_recv() cannot allocate skb\n");
			} else {
				skb->dev = dev;
				skb_reserve(skb, NET_IP_ALIGN);
				ptr = skb_put(skb, sz);
				wake_lock_timeout(&p->wake_lock, HZ / 2);
				if (smd_read(p->ch, ptr, sz) != sz) {
					pr_err("rmnet_recv() smd lied about avail?!");
					ptr = 0;
					dev_kfree_skb_irq(skb);
				} else {
					/* Handle Rx frame format */
					spin_lock_irqsave(&p->lock, flags);
					opmode = p->operation_mode;
					spin_unlock_irqrestore(&p->lock, flags);

					if (RMNET_IS_MODE_IP(opmode)) {
						/* Driver in IP mode */
						skb->protocol =
						  rmnet_ip_type_trans(skb, dev);
					} else {
						/* Driver in Ethernet mode */
						skb->protocol =
						  eth_type_trans(skb, dev);
					}
					if (RMNET_IS_MODE_IP(opmode) ||
					    count_this_packet(ptr, skb->len)) {
#if 0
						p->wakeups_rcv +=
							rmnet_cause_wakeup(p);
#endif
						p->stats.rx_packets++;
						p->stats.rx_bytes += skb->len;
					}
					netif_rx(skb);
				}
				continue;
			}
		}
		if (smd_read(p->ch, ptr, sz) != sz)
			pr_err("rmnet_recv() smd lied about avail?!");
	}
}
Esempio n. 18
0
void
icc_interrupt(struct IsdnCardState *cs, u_char val)
{
	u_char exval, v1;
	struct sk_buff *skb;
	unsigned int count;

	if (cs->debug & L1_DEB_ISAC)
		debugl1(cs, "ICC interrupt %x", val);
	if (val & 0x80) {	/* RME */
		exval = cs->readisac(cs, ICC_RSTA);
		if ((exval & 0x70) != 0x20) {
			if (exval & 0x40) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "ICC RDO");
#ifdef ERROR_STATISTIC
				cs->err_rx++;
#endif
			}
			if (!(exval & 0x20)) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "ICC CRC error");
#ifdef ERROR_STATISTIC
				cs->err_crc++;
#endif
			}
			cs->writeisac(cs, ICC_CMDR, 0x80);
		} else {
			count = cs->readisac(cs, ICC_RBCL) & 0x1f;
			if (count == 0)
				count = 32;
			icc_empty_fifo(cs, count);
			if ((count = cs->rcvidx) > 0) {
				cs->rcvidx = 0;
				if (!(skb = alloc_skb(count, GFP_ATOMIC)))
					printk(KERN_WARNING "HiSax: D receive out of memory\n");
				else {
					memcpy(skb_put(skb, count), cs->rcvbuf, count);
					skb_queue_tail(&cs->rq, skb);
				}
			}
		}
		cs->rcvidx = 0;
		schedule_event(cs, D_RCVBUFREADY);
	}
	if (val & 0x40) {	/* RPF */
		icc_empty_fifo(cs, 32);
	}
	if (val & 0x20) {	/* RSC */
		/* never */
		if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "ICC RSC interrupt");
	}
	if (val & 0x10) {	/* XPR */
		if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
			del_timer(&cs->dbusytimer);
		if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
			schedule_event(cs, D_CLEARBUSY);
		if (cs->tx_skb) {
			if (cs->tx_skb->len) {
				icc_fill_fifo(cs);
				goto afterXPR;
			} else {
				dev_kfree_skb_irq(cs->tx_skb);
				cs->tx_cnt = 0;
				cs->tx_skb = NULL;
			}
		}
		if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
			cs->tx_cnt = 0;
			icc_fill_fifo(cs);
		} else
			schedule_event(cs, D_XMTBUFREADY);
	}
      afterXPR:
	if (val & 0x04) {	/* CISQ */
		exval = cs->readisac(cs, ICC_CIR0);
		if (cs->debug & L1_DEB_ISAC)
			debugl1(cs, "ICC CIR0 %02X", exval );
		if (exval & 2) {
			cs->dc.icc.ph_state = (exval >> 2) & 0xf;
			if (cs->debug & L1_DEB_ISAC)
				debugl1(cs, "ph_state change %x", cs->dc.icc.ph_state);
			schedule_event(cs, D_L1STATECHANGE);
		}
void Amd7930_interrupt(struct IsdnCardState *cs, BYTE irflags)
{
	BYTE dsr1, dsr2, lsr;
        WORD der;

 while (irflags)
 {

        dsr1 = rByteAMD(cs, 0x02);
        der  = rWordAMD(cs, 0x03);
        dsr2 = rByteAMD(cs, 0x07);
        lsr  = rByteAMD(cs, 0xA1);

	if (cs->debug & L1_DEB_ISAC)
		debugl1(cs, "Amd7930: interrupt: flags: 0x%02X, DSR1: 0x%02X, DSR2: 0x%02X, LSR: 0x%02X, DER=0x%04X", irflags, dsr1, dsr2, lsr, der);

        /* D error -> read DER and DSR2 bit 2 */
	if (der || (dsr2 & 4)) {

                if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "Amd7930: interrupt: D error DER=0x%04X", der);

                /* RX, TX abort if collision detected */
                if (der & 2) {
                        wByteAMD(cs, 0x21, 0xC2);
                        wByteAMD(cs, 0x21, 0x02);
			if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
				del_timer(&cs->dbusytimer);
			if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
				schedule_event(cs, D_CLEARBUSY);
                        /* restart frame */
                        if (cs->tx_skb) {
				skb_push(cs->tx_skb, cs->tx_cnt);
				cs->tx_cnt = 0;
                                cs->dc.amd7930.tx_xmtlen = 0;
				Amd7930_fill_Dfifo(cs);
			} else {
				printk(KERN_WARNING "HiSax: Amd7930 D-Collision, no skb\n");
				debugl1(cs, "Amd7930: interrupt: D-Collision, no skb");
			}
                }
                /* remove damaged data from fifo */
		Amd7930_empty_Dfifo(cs, 1);

		if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
			del_timer(&cs->dbusytimer);
		if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
			schedule_event(cs, D_CLEARBUSY);
                /* restart TX-Frame */
                if (cs->tx_skb) {
			skb_push(cs->tx_skb, cs->tx_cnt);
			cs->tx_cnt = 0;
                        cs->dc.amd7930.tx_xmtlen = 0;
			Amd7930_fill_Dfifo(cs);
		}
	}

        /* D TX FIFO empty -> fill */
	if (irflags & 1) {
		if (cs->debug & L1_DEB_ISAC)
			debugl1(cs, "Amd7930: interrupt: clear Timer and fill D-TX-FIFO if data");

		/* AMD interrupts off */
                AmdIrqOff(cs);

                if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
			del_timer(&cs->dbusytimer);
		if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
			schedule_event(cs, D_CLEARBUSY);
		if (cs->tx_skb) {
			if (cs->tx_skb->len)
				Amd7930_fill_Dfifo(cs);
		}
		/* AMD interrupts on */
                AmdIrqOn(cs);
	}


        /* D RX FIFO full or tiny packet in Fifo -> empty */
	if ((irflags & 2) || (dsr1 & 2)) {
                if (cs->debug & L1_DEB_ISAC)
			debugl1(cs, "Amd7930: interrupt: empty D-FIFO");
                Amd7930_empty_Dfifo(cs, 0);
	}


        /* D-Frame transmit complete */
	if (dsr1 & 64) {
		if (cs->debug & L1_DEB_ISAC) {
			debugl1(cs, "Amd7930: interrupt: transmit packet ready");
        	}
		/* AMD interrupts off */
                AmdIrqOff(cs);

                if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
			del_timer(&cs->dbusytimer);
		if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
			schedule_event(cs, D_CLEARBUSY);

                if (cs->tx_skb) {
        		if (cs->debug & L1_DEB_ISAC)
	        		debugl1(cs, "Amd7930: interrupt: TX-Packet ready, freeing skb");
                        dev_kfree_skb_irq(cs->tx_skb);
			cs->tx_cnt = 0;
                        cs->dc.amd7930.tx_xmtlen=0;
			cs->tx_skb = NULL;
                }
                if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
        		if (cs->debug & L1_DEB_ISAC)
	        		debugl1(cs, "Amd7930: interrupt: TX-Packet ready, next packet dequeued");
	        	cs->tx_cnt = 0;
                        cs->dc.amd7930.tx_xmtlen=0;
			Amd7930_fill_Dfifo(cs);
		}
                else
			schedule_event(cs, D_XMTBUFREADY);
		/* AMD interrupts on */
                AmdIrqOn(cs);
        }

	/* LIU status interrupt -> read LSR, check statechanges */
	if (lsr & 0x38) {
                /* AMD interrupts off */
                AmdIrqOff(cs);

		if (cs->debug & L1_DEB_ISAC)
			debugl1(cs, "Amd: interrupt: LSR=0x%02X, LIU is in state %d", lsr, ((lsr & 0x7) +2));

		cs->dc.amd7930.ph_state = (lsr & 0x7) + 2;

		schedule_event(cs, D_L1STATECHANGE);
		/* AMD interrupts on */
                AmdIrqOn(cs);
	}

        /* reads Interrupt-Register again. If there is a new interrupt-flag: restart handler */
        irflags = rByteAMD(cs, 0x00);
 }

}
Esempio n. 20
0
/* The interrupt handler.
 * This is called from the CPM handler, not the MPC core interrupt.
 */
static void
scc_enet_interrupt(void *dev_id, struct pt_regs *regs)
{
	struct	net_device *dev = dev_id;
	volatile struct	scc_enet_private *cep;
	volatile cbd_t	*bdp;
	ushort	int_events;
	int	must_restart;

	cep = (struct scc_enet_private *)dev->priv;

	/* Get the interrupt events that caused us to be here.
	*/
	int_events = cep->sccp->scc_scce;
	cep->sccp->scc_scce = int_events;
	must_restart = 0;

	/* Handle receive event in its own function.
	*/
	if (int_events & SCCE_ENET_RXF)
		scc_enet_rx(dev_id);

	/* Check for a transmit error.  The manual is a little unclear
	 * about this, so the debug code until I get it figured out.  It
	 * appears that if TXE is set, then TXB is not set.  However,
	 * if carrier sense is lost during frame transmission, the TXE
	 * bit is set, "and continues the buffer transmission normally."
	 * I don't know if "normally" implies TXB is set when the buffer
	 * descriptor is closed.....trial and error :-).
	 */

	/* Transmit OK, or non-fatal error.  Update the buffer descriptors.
	*/
	if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) {
	    spin_lock(&cep->lock);
	    bdp = cep->dirty_tx;
	    while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
		if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
		    break;

		if (bdp->cbd_sc & BD_ENET_TX_HB)	/* No heartbeat */
			cep->stats.tx_heartbeat_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_LC)	/* Late collision */
			cep->stats.tx_window_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_RL)	/* Retrans limit */
			cep->stats.tx_aborted_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_UN)	/* Underrun */
			cep->stats.tx_fifo_errors++;
		if (bdp->cbd_sc & BD_ENET_TX_CSL)	/* Carrier lost */
			cep->stats.tx_carrier_errors++;


		/* No heartbeat or Lost carrier are not really bad errors.
		 * The others require a restart transmit command.
		 */
		if (bdp->cbd_sc &
		    (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
			must_restart = 1;
			cep->stats.tx_errors++;
		}

		cep->stats.tx_packets++;

		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (bdp->cbd_sc & BD_ENET_TX_DEF)
			cep->stats.collisions++;

		/* Free the sk buffer associated with this last transmit.
		*/
		dev_kfree_skb_irq(cep->tx_skbuff[cep->skb_dirty]);
		cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;

		/* Update pointer to next buffer descriptor to be transmitted.
		*/
		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
			bdp = cep->tx_bd_base;
		else
			bdp++;

		/* I don't know if we can be held off from processing these
		 * interrupts for more than one frame time.  I really hope
		 * not.  In such a case, we would now want to check the
		 * currently available BD (cur_tx) and determine if any
		 * buffers between the dirty_tx and cur_tx have also been
		 * sent.  We would want to process anything in between that
		 * does not have BD_ENET_TX_READY set.
		 */

		/* Since we have freed up a buffer, the ring is no longer
		 * full.
		 */
		if (cep->tx_full) {
			cep->tx_full = 0;
			if (netif_queue_stopped(dev))
				netif_wake_queue(dev);
		}

		cep->dirty_tx = (cbd_t *)bdp;
	    }

	    if (must_restart) {
		volatile cpm8xx_t *cp;

		/* Some transmit errors cause the transmitter to shut
		 * down.  We now issue a restart transmit.  Since the
		 * errors close the BD and update the pointers, the restart
		 * _should_ pick up without having to reset any of our
		 * pointers either.
		 */
		cp = cpmp;
		cp->cp_cpcr =
		    mk_cr_cmd(CPM_CR_ENET, CPM_CR_RESTART_TX) | CPM_CR_FLG;
		while (cp->cp_cpcr & CPM_CR_FLG);
	    }
	    spin_unlock(&cep->lock);
	}

	/* Check for receive busy, i.e. packets coming but no place to
	 * put them.  This "can't happen" because the receive interrupt
	 * is tossing previous frames.
	 */
	if (int_events & SCCE_ENET_BSY) {
		cep->stats.rx_dropped++;
		printk("CPM ENET: BSY can't happen.\n");
	}

	return;
}
Esempio n. 21
0
/* packet receiver */
static void rx(struct net_device *dev, int bufnum,
	       struct archdr *pkthdr, int length)
{
	struct arcnet_local *lp = netdev_priv(dev);
	struct sk_buff *skb;
	struct archdr *pkt = pkthdr;
	struct arc_rfc1201 *soft = &pkthdr->soft.rfc1201;
	int saddr = pkt->hard.source, ofs;
	struct Incoming *in = &lp->rfc1201.incoming[saddr];

	BUGMSG(D_DURING, "it's an RFC1201 packet (length=%d)\n", length);

	if (length >= MinTU)
		ofs = 512 - length;
	else
		ofs = 256 - length;

	if (soft->split_flag == 0xFF) {		/* Exception Packet */
		if (length >= 4 + RFC1201_HDR_SIZE)
			BUGMSG(D_DURING, "compensating for exception packet\n");
		else {
			BUGMSG(D_EXTRA, "short RFC1201 exception packet from %02Xh",
			       saddr);
			return;
		}

		/* skip over 4-byte junkola */
		length -= 4;
		ofs += 4;
		lp->hw.copy_from_card(dev, bufnum, 512 - length,
				      soft, sizeof(pkt->soft));
	}
	if (!soft->split_flag) {	/* not split */
		BUGMSG(D_RX, "incoming is not split (splitflag=%d)\n",
		       soft->split_flag);

		if (in->skb) {	/* already assembling one! */
			BUGMSG(D_EXTRA, "aborting assembly (seq=%d) for unsplit packet (splitflag=%d, seq=%d)\n",
			 in->sequence, soft->split_flag, soft->sequence);
			lp->rfc1201.aborted_seq = soft->sequence;
			dev_kfree_skb_irq(in->skb);
			lp->stats.rx_errors++;
			lp->stats.rx_missed_errors++;
			in->skb = NULL;
		}
		in->sequence = soft->sequence;

		skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
		if (skb == NULL) {
			BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
			lp->stats.rx_dropped++;
			return;
		}
		skb_put(skb, length + ARC_HDR_SIZE);
		skb->dev = dev;

		pkt = (struct archdr *) skb->data;
		soft = &pkt->soft.rfc1201;

		/* up to sizeof(pkt->soft) has already been copied from the card */
		memcpy(pkt, pkthdr, sizeof(struct archdr));
		if (length > sizeof(pkt->soft))
			lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
				       pkt->soft.raw + sizeof(pkt->soft),
					      length - sizeof(pkt->soft));

		/*
		 * ARP packets have problems when sent from some DOS systems: the
		 * source address is always 0!  So we take the hardware source addr
		 * (which is impossible to fumble) and insert it ourselves.
		 */
		if (soft->proto == ARC_P_ARP) {
			struct arphdr *arp = (struct arphdr *) soft->payload;

			/* make sure addresses are the right length */
			if (arp->ar_hln == 1 && arp->ar_pln == 4) {
				uint8_t *cptr = (uint8_t *) arp + sizeof(struct arphdr);

				if (!*cptr) {	/* is saddr = 00? */
					BUGMSG(D_EXTRA,
					       "ARP source address was 00h, set to %02Xh.\n",
					       saddr);
					lp->stats.rx_crc_errors++;
					*cptr = saddr;
				} else {
					BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n",
					       *cptr);
				}
			} else {
				BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n",
				       arp->ar_hln, arp->ar_pln);
				lp->stats.rx_errors++;
				lp->stats.rx_crc_errors++;
			}
		}
		BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");

		skb->protocol = type_trans(skb, dev);
		netif_rx(skb);
	} else {		/* split packet */
		/*
		 * NOTE: MSDOS ARP packet correction should only need to apply to
		 * unsplit packets, since ARP packets are so short.
		 *
		 * My interpretation of the RFC1201 document is that if a packet is
		 * received out of order, the entire assembly process should be
		 * aborted.
		 *
		 * The RFC also mentions "it is possible for successfully received
		 * packets to be retransmitted." As of 0.40 all previously received
		 * packets are allowed, not just the most recent one.
		 *
		 * We allow multiple assembly processes, one for each ARCnet card
		 * possible on the network.  Seems rather like a waste of memory,
		 * but there's no other way to be reliable.
		 */

		BUGMSG(D_RX, "packet is split (splitflag=%d, seq=%d)\n",
		       soft->split_flag, in->sequence);

		if (in->skb && in->sequence != soft->sequence) {
			BUGMSG(D_EXTRA, "wrong seq number (saddr=%d, expected=%d, seq=%d, splitflag=%d)\n",
			       saddr, in->sequence, soft->sequence,
			       soft->split_flag);
			dev_kfree_skb_irq(in->skb);
			in->skb = NULL;
			lp->stats.rx_errors++;
			lp->stats.rx_missed_errors++;
			in->lastpacket = in->numpackets = 0;
		}
		if (soft->split_flag & 1) {	/* first packet in split */
			BUGMSG(D_RX, "brand new splitpacket (splitflag=%d)\n",
			       soft->split_flag);
			if (in->skb) {	/* already assembling one! */
				BUGMSG(D_EXTRA, "aborting previous (seq=%d) assembly "
				       "(splitflag=%d, seq=%d)\n",
				       in->sequence, soft->split_flag,
				       soft->sequence);
				lp->stats.rx_errors++;
				lp->stats.rx_missed_errors++;
				dev_kfree_skb_irq(in->skb);
			}
			in->sequence = soft->sequence;
			in->numpackets = ((unsigned) soft->split_flag >> 1) + 2;
			in->lastpacket = 1;

			if (in->numpackets > 16) {
				BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n",
				       soft->split_flag);
				lp->rfc1201.aborted_seq = soft->sequence;
				lp->stats.rx_errors++;
				lp->stats.rx_length_errors++;
				return;
			}
			in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE,
						  GFP_ATOMIC);
			if (skb == NULL) {
				BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n");
				lp->rfc1201.aborted_seq = soft->sequence;
				lp->stats.rx_dropped++;
				return;
			}
			skb->dev = dev;
			pkt = (struct archdr *) skb->data;
			soft = &pkt->soft.rfc1201;

			memcpy(pkt, pkthdr, ARC_HDR_SIZE + RFC1201_HDR_SIZE);
			skb_put(skb, ARC_HDR_SIZE + RFC1201_HDR_SIZE);

			soft->split_flag = 0;	/* end result won't be split */
		} else {	/* not first packet */
Esempio n. 22
0
static void send_queued_packets(struct s_smc *smc)
{
	skfddi_priv *bp = &smc->os;
	struct sk_buff *skb;
	unsigned char fc;
	int queue;
	struct s_smt_fp_txd *txd;	
	dma_addr_t dma_address;
	unsigned long Flags;

	int frame_status;	

	pr_debug(KERN_INFO "send queued packets\n");
	for (;;) {
		
		skb = skb_dequeue(&bp->SendSkbQueue);

		if (!skb) {
			pr_debug(KERN_INFO "queue empty\n");
			return;
		}		

		spin_lock_irqsave(&bp->DriverLock, Flags);
		fc = skb->data[0];
		queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
#ifdef ESS
		

		if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
			
			if (!smc->ess.sync_bw_available)
				fc &= ~FC_SYNC_BIT; 

			else {	

				if (smc->mib.fddiESSSynchTxMode) {
					
					fc |= FC_SYNC_BIT;
				}
			}
		}
#endif				
		frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);

		if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
			

			if ((frame_status & RING_DOWN) != 0) {
				
				pr_debug("Tx attempt while ring down.\n");
			} else if ((frame_status & OUT_OF_TXD) != 0) {
				pr_debug("%s: out of TXDs.\n", bp->dev->name);
			} else {
				pr_debug("%s: out of transmit resources",
					bp->dev->name);
			}

			
			
			skb_queue_head(&bp->SendSkbQueue, skb);
			spin_unlock_irqrestore(&bp->DriverLock, Flags);
			return;	

		}		

		bp->QueueSkb++;	

		
		CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);

		txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);

		dma_address = pci_map_single(&bp->pdev, skb->data,
					     skb->len, PCI_DMA_TODEVICE);
		if (frame_status & LAN_TX) {
			txd->txd_os.skb = skb;			
			txd->txd_os.dma_addr = dma_address;	
		}
		hwm_tx_frag(smc, skb->data, dma_address, skb->len,
                      frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);

		if (!(frame_status & LAN_TX)) {		
			pci_unmap_single(&bp->pdev, dma_address,
					 skb->len, PCI_DMA_TODEVICE);
			dev_kfree_skb_irq(skb);
		}
		spin_unlock_irqrestore(&bp->DriverLock, Flags);
	}			

	return;			

}				
Esempio n. 23
0
static struct sk_buff
*hfc_empty_fifo(struct BCState *bcs, int count)
{
	u_char *ptr;
	struct sk_buff *skb;
	struct IsdnCardState *cs = bcs->cs;
	int idx;
	int chksum;
	u_char stat, cip;
	
	if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
		debugl1(cs, "hfc_empty_fifo");
	idx = 0;
	if (count > HSCX_BUFMAX + 3) {
		if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "hfc_empty_fifo: incoming packet too large");
		cip = HFCB_FIFO | HFCB_FIFO_OUT | HFCB_REC | HFCB_CHANNEL(bcs->channel);
		while (idx++ < count) {
			WaitNoBusy(cs);
			ReadReg(cs, HFCD_DATA_NODEB, cip);
		}
		skb = NULL;
	} else if (count < 4) {
		if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "hfc_empty_fifo: incoming packet too small");
		cip = HFCB_FIFO | HFCB_FIFO_OUT | HFCB_REC | HFCB_CHANNEL(bcs->channel);
#ifdef ERROR_STATISTIC
		bcs->err_inv++;
#endif
		while ((idx++ < count) && WaitNoBusy(cs))
			ReadReg(cs, HFCD_DATA_NODEB, cip);
		skb = NULL;
	} else if (!(skb = dev_alloc_skb(count - 3)))
		printk(KERN_WARNING "HFC: receive out of memory\n");
	else {
		ptr = skb_put(skb, count - 3);
		idx = 0;
		cip = HFCB_FIFO | HFCB_FIFO_OUT | HFCB_REC | HFCB_CHANNEL(bcs->channel);
		while (idx < (count - 3)) {
			if (!WaitNoBusy(cs))
				break;
			*ptr = ReadReg(cs,  HFCD_DATA_NODEB, cip);
			ptr++;
			idx++;
		}
		if (idx != count - 3) {
			debugl1(cs, "RFIFO BUSY error");
			printk(KERN_WARNING "HFC FIFO channel %d BUSY Error\n", bcs->channel);
			dev_kfree_skb_irq(skb);
			skb = NULL;
		} else {
			WaitNoBusy(cs);
			chksum = (ReadReg(cs, HFCD_DATA, cip) << 8);
			WaitNoBusy(cs);
			chksum += ReadReg(cs, HFCD_DATA, cip);
			WaitNoBusy(cs);
			stat = ReadReg(cs, HFCD_DATA, cip);
			if (cs->debug & L1_DEB_HSCX)
				debugl1(cs, "hfc_empty_fifo %d chksum %x stat %x",
					bcs->channel, chksum, stat);
			if (stat) {
				debugl1(cs, "FIFO CRC error");
				dev_kfree_skb_irq(skb);
				skb = NULL;
#ifdef ERROR_STATISTIC
				bcs->err_crc++;
#endif
			}
		}
	}
	WaitForBusy(cs);
	WaitNoBusy(cs);
	stat = ReadReg(cs, HFCD_DATA, HFCB_FIFO | HFCB_F2_INC |
		HFCB_REC | HFCB_CHANNEL(bcs->channel));
	WaitForBusy(cs);
	return (skb);
}
Esempio n. 24
0
/*
 * Description: s_nsBulkOutIoCompleteWrite
 *     1a) Indicate to the protocol the status of the write.
 *     1b) Return ownership of the packet to the protocol.
 *
 *     2)  If any more packets are queue for sending, send another packet
 *         to USBD.
 *         If the attempt to send the packet to the driver fails,
 *         return ownership of the packet to the protocol and
 *         try another packet (until one succeeds).
 *
 * Parameters:
 *  In:
 *      pdoUsbDevObj  - pointer to the USB device object which
 *                      completed the irp
 *      pIrp          - the irp which was completed by the
 *                      device object
 *      pContext      - the context given to IoSetCompletionRoutine
 *                      before calling IoCallDriver on the irp
 *                      The pContext is a pointer to the USB device object.
 *  Out:
 *      none
 *
 * Return Value: STATUS_MORE_PROCESSING_REQUIRED - allows the completion routine
 *               (IofCompleteRequest) to stop working on the irp.
 *
 */
static
void
s_nsBulkOutIoCompleteWrite(
     struct urb *urb
    )
{
    PSDevice            pDevice;
    int status;
    CONTEXT_TYPE        ContextType;
    unsigned long               ulBufLen;
    PUSB_SEND_CONTEXT   pContext;


    DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n");
    //
    // The context given to IoSetCompletionRoutine is an USB_CONTEXT struct
    //
    pContext = (PUSB_SEND_CONTEXT) urb->context;
    ASSERT( NULL != pContext );

    pDevice = pContext->pDevice;
    ContextType = pContext->Type;
    ulBufLen = pContext->uBufLen;

    if (!netif_device_present(pDevice->dev))
	    return;

   //
    // Perform various IRP, URB, and buffer 'sanity checks'
    //

    status = urb->status;
    //we should have failed, succeeded, or cancelled, but NOT be pending
    STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkOutStat, status);

    if(status == STATUS_SUCCESS) {
        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Write %d bytes\n",(int)ulBufLen);
        pDevice->ulBulkOutBytesWrite += ulBufLen;
        pDevice->ulBulkOutContCRCError = 0;
	pDevice->nTxDataTimeCout = 0;

    } else {
        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK Out failed %d\n", status);
        pDevice->ulBulkOutError++;
    }

//    pDevice->ulCheckForHangCount = 0;
//    pDevice->pPendingBulkOutContext = NULL;

    if ( CONTEXT_DATA_PACKET == ContextType ) {
        // Indicate to the protocol the status of the sent packet and return
        // ownership of the packet.
	    if (pContext->pPacket != NULL) {
	        dev_kfree_skb_irq(pContext->pPacket);
	        pContext->pPacket = NULL;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"tx  %d bytes\n",(int)ulBufLen);
	    }

        pDevice->dev->trans_start = jiffies;


        if (status == STATUS_SUCCESS) {
            pDevice->packetsSent++;
        }
        else {
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send USB error! [%08xh]\n", status);
            pDevice->packetsSentDropped++;
        }

    }
    if (pDevice->bLinkPass == TRUE) {
        if (netif_queue_stopped(pDevice->dev))
            netif_wake_queue(pDevice->dev);
    }
    pContext->bBoolInUse = FALSE;

    return;
}
Esempio n. 25
0
void
hfc2bds0_interrupt(struct IsdnCardState *cs, u_char val)
{
       	u_char exval;
       	struct BCState *bcs;
	int count=15;

	if (cs->debug & L1_DEB_ISAC)
		debugl1(cs, "HFCD irq %x %s", val,
			test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
			"locked" : "unlocked");
	val &= cs->hw.hfcD.int_m1;
	if (val & 0x40) { /* TE state machine irq */
		exval = cs->readisac(cs, HFCD_STATES) & 0xf;
		if (cs->debug & L1_DEB_ISAC)
			debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcd.ph_state,
				exval);
		cs->dc.hfcd.ph_state = exval;
		schedule_event(cs, D_L1STATECHANGE);
		val &= ~0x40;
	}
	while (val) {
		if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
			cs->hw.hfcD.int_s1 |= val;
			return;
		}
		if (cs->hw.hfcD.int_s1 & 0x18) {
			exval = val;
			val =  cs->hw.hfcD.int_s1;
			cs->hw.hfcD.int_s1 = exval;
		}	
		if (val & 0x08) {
			if (!(bcs=Sel_BCS(cs, 0))) {
				if (cs->debug)
					debugl1(cs, "hfcd spurious 0x08 IRQ");
			} else 
				main_rec_2bds0(bcs);
		}
		if (val & 0x10) {
			if (!(bcs=Sel_BCS(cs, 1))) {
				if (cs->debug)
					debugl1(cs, "hfcd spurious 0x10 IRQ");
			} else 
				main_rec_2bds0(bcs);
		}
		if (val & 0x01) {
			if (!(bcs=Sel_BCS(cs, 0))) {
				if (cs->debug)
					debugl1(cs, "hfcd spurious 0x01 IRQ");
			} else {
				if (bcs->tx_skb) {
					if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
						hfc_fill_fifo(bcs);
						test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
					} else
						debugl1(cs,"fill_data %d blocked", bcs->channel);
				} else {
					if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
						if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
							hfc_fill_fifo(bcs);
							test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
						} else
							debugl1(cs,"fill_data %d blocked", bcs->channel);
					} else {
						schedule_event(bcs, B_XMTBUFREADY);
					}
				}
			}
		}
		if (val & 0x02) {
			if (!(bcs=Sel_BCS(cs, 1))) {
				if (cs->debug)
					debugl1(cs, "hfcd spurious 0x02 IRQ");
			} else {
				if (bcs->tx_skb) {
					if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
						hfc_fill_fifo(bcs);
						test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
					} else
						debugl1(cs,"fill_data %d blocked", bcs->channel);
				} else {
					if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
						if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
							hfc_fill_fifo(bcs);
							test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
						} else
							debugl1(cs,"fill_data %d blocked", bcs->channel);
					} else {
						schedule_event(bcs, B_XMTBUFREADY);
					}
				}
			}
		}
		if (val & 0x20) {	/* receive dframe */
			receive_dmsg(cs);
		}
		if (val & 0x04) {	/* dframe transmitted */
			if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
				del_timer(&cs->dbusytimer);
			if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
				schedule_event(cs, D_CLEARBUSY);
			if (cs->tx_skb) {
				if (cs->tx_skb->len) {
					if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
						hfc_fill_dfifo(cs);
						test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
					} else {
						debugl1(cs, "hfc_fill_dfifo irq blocked");
					}
					goto afterXPR;
				} else {
					dev_kfree_skb_irq(cs->tx_skb);
					cs->tx_cnt = 0;
					cs->tx_skb = NULL;
				}
			}
			if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
				cs->tx_cnt = 0;
				if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
					hfc_fill_dfifo(cs);
					test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
				} else {
					debugl1(cs, "hfc_fill_dfifo irq blocked");
				}
			} else
				schedule_event(cs, D_XMTBUFREADY);
		}
      afterXPR:
		if (cs->hw.hfcD.int_s1 && count--) {
			val = cs->hw.hfcD.int_s1;
			cs->hw.hfcD.int_s1 = 0;
			if (cs->debug & L1_DEB_ISAC)
				debugl1(cs, "HFCD irq %x loop %d", val, 15-count);
		} else
			val = 0;
	}
}
Esempio n. 26
0
static inline void
jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade)
{
	u_char r;
	struct BCState *bcs = cs->bcs + jade;
	struct sk_buff *skb;
	int fifo_size = 32;
	int count;
	int i_jade = (int) jade; /* To satisfy the compiler */
	
	if (!test_bit(BC_FLG_INIT, &bcs->Flag))
		return;

	if (val & 0x80) {	/* RME */
		r = READJADE(cs, i_jade, jade_HDLC_RSTA);
		if ((r & 0xf0) != 0xa0) {
			if (!(r & 0x80))
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "JADE %s invalid frame", (jade ? "B":"A"));
			if ((r & 0x40) && bcs->mode)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "JADE %c RDO mode=%d", 'A'+jade, bcs->mode);
			if (!(r & 0x20))
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "JADE %c CRC error", 'A'+jade);
			WriteJADECMDR(cs, jade, jade_HDLC_RCMD, jadeRCMD_RMC);
		} else {
			count = READJADE(cs, i_jade, jade_HDLC_RBCL) & 0x1F;
			if (count == 0)
				count = fifo_size;
			jade_empty_fifo(bcs, count);
			if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
				if (cs->debug & L1_DEB_HSCX_FIFO)
					debugl1(cs, "HX Frame %d", count);
				if (!(skb = dev_alloc_skb(count)))
					printk(KERN_WARNING "JADE %s receive out of memory\n", (jade ? "B":"A"));
				else {
					memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count);
					skb_queue_tail(&bcs->rqueue, skb);
				}
			}
		}
		bcs->hw.hscx.rcvidx = 0;
		schedule_event(bcs, B_RCVBUFREADY);
	}
	if (val & 0x40) {	/* RPF */
		jade_empty_fifo(bcs, fifo_size);
		if (bcs->mode == L1_MODE_TRANS) {
			/* receive audio data */
			if (!(skb = dev_alloc_skb(fifo_size)))
				printk(KERN_WARNING "HiSax: receive out of memory\n");
			else {
				memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size);
				skb_queue_tail(&bcs->rqueue, skb);
			}
			bcs->hw.hscx.rcvidx = 0;
			schedule_event(bcs, B_RCVBUFREADY);
		}
	}
	if (val & 0x10) {	/* XPR */
		if (bcs->tx_skb) {
			if (bcs->tx_skb->len) {
				jade_fill_fifo(bcs);
				return;
			} else {
				if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
					(PACKET_NOACK != bcs->tx_skb->pkt_type)) {
					u_long	flags;
					spin_lock_irqsave(&bcs->aclock, flags);
					bcs->ackcnt += bcs->hw.hscx.count;
					spin_unlock_irqrestore(&bcs->aclock, flags);
					schedule_event(bcs, B_ACKPENDING);
				}
				dev_kfree_skb_irq(bcs->tx_skb);
				bcs->hw.hscx.count = 0;
				bcs->tx_skb = NULL;
			}
		}
		if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
			bcs->hw.hscx.count = 0;
			test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
			jade_fill_fifo(bcs);
		} else {
			test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
			schedule_event(bcs, B_XMTBUFREADY);
		}
	}
}
Esempio n. 27
0
static void
hscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
{
	u_char r;
	struct BCState *bcs = cs->bcs + hscx;
	struct sk_buff *skb;
	int fifo_size = test_bit(HW_IPAC, &cs->HW_Flags)? 64: 32;
	int count;

	if (!test_bit(BC_FLG_INIT, &bcs->Flag))
		return;

	if (val & 0x80) {	/* RME */
		r = READHSCX(cs, hscx, HSCX_RSTA);
		if ((r & 0xf0) != 0xa0) {
			if (!(r & 0x80)) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX invalid frame");
#ifdef ERROR_STATISTIC
				bcs->err_inv++;
#endif
			}
			if ((r & 0x40) && bcs->mode) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX RDO mode=%d",
						bcs->mode);
#ifdef ERROR_STATISTIC
				bcs->err_rdo++;
#endif
			}
			if (!(r & 0x20)) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX CRC error");
#ifdef ERROR_STATISTIC
				bcs->err_crc++;
#endif
			}
			WriteHSCXCMDR(cs, hscx, 0x80);
		} else {
			count = READHSCX(cs, hscx, HSCX_RBCL) & (
				test_bit(HW_IPAC, &cs->HW_Flags)? 0x3f: 0x1f);
			if (count == 0)
				count = fifo_size;
			hscx_empty_fifo(bcs, count);
			if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
				if (cs->debug & L1_DEB_HSCX_FIFO)
					debugl1(cs, "HX Frame %d", count);
				if (!(skb = dev_alloc_skb(count)))
;
				else {
					memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count);
					skb_queue_tail(&bcs->rqueue, skb);
				}
			}
		}
		bcs->hw.hscx.rcvidx = 0;
		schedule_event(bcs, B_RCVBUFREADY);
	}
	if (val & 0x40) {	/* RPF */
		hscx_empty_fifo(bcs, fifo_size);
		if (bcs->mode == L1_MODE_TRANS) {
			/* receive audio data */
			if (!(skb = dev_alloc_skb(fifo_size)))
;
			else {
				memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size);
				skb_queue_tail(&bcs->rqueue, skb);
			}
			bcs->hw.hscx.rcvidx = 0;
			schedule_event(bcs, B_RCVBUFREADY);
		}
	}
	if (val & 0x10) {	/* XPR */
		if (bcs->tx_skb) {
			if (bcs->tx_skb->len) {
				hscx_fill_fifo(bcs);
				return;
			} else {
				if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
					(PACKET_NOACK != bcs->tx_skb->pkt_type)) {
					u_long	flags;
					spin_lock_irqsave(&bcs->aclock, flags);
					bcs->ackcnt += bcs->hw.hscx.count;
					spin_unlock_irqrestore(&bcs->aclock, flags);
					schedule_event(bcs, B_ACKPENDING);
				}
				dev_kfree_skb_irq(bcs->tx_skb);
				bcs->hw.hscx.count = 0; 
				bcs->tx_skb = NULL;
			}
		}
		if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
			bcs->hw.hscx.count = 0;
			test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
			hscx_fill_fifo(bcs);
		} else {
			test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
			schedule_event(bcs, B_XMTBUFREADY);
		}
	}
}
Esempio n. 28
0
static void pxa250_irda_txdma_irq(int ch, void *id , struct pt_regs *regs)
{
   struct net_device *dev=id;
   struct pxa250_irda *si=dev->priv;
   struct sk_buff *skb = si->txskb;
   u_int dcsr;


   __ECHO_IN;
   DBG_IRQ("transmit\n"); 
   
     
   /* 
    * Make sure that irq is our.
    */

   if ( ch != si->txdma_ch )
      /*just*/ return;


   /*
    * Check status 
    */
   dcsr = DCSR(ch);

   DBG("DCSR=%x",dcsr);

   if (dcsr &  DCSR_STOPSTATE )
   {
      DBG("Chanel %d in stop state\n",ch);
   }

   if (dcsr &  DCSR_BUSERR )
   {
      DBG("PXA IrDA: bus error interrupt on channel %d\n", ch);
      DCSR(ch) |= DCSR_BUSERR;
      si->txskb = NULL;
   }

   if (dcsr &  DCSR_ENDINTR )
   {
      DBG("PXA IrDA: Normal end of dma channel %d\n", ch);
      DCSR(ch) |= DCSR_ENDINTR;
      si->txskb = NULL;
   }

   /*
    * Account and free the packet.
    */
   if (skb)
   {
      si->stats.tx_packets ++;
      si->stats.tx_bytes += skb->len;
      dev_kfree_skb_irq(skb);
   }

	/*Disable transceiver and enable receiver*/

	if (si->newspeed) {
	   pxa250_irda_set_speed(dev, si->newspeed);
	   si->newspeed = 0;
	}

	while (ICSR1 & ICSR1_TBY)
	   udelay(1);
	
     	ICCR0 &= ~ICCR0_TXE;

	
	enable_irq(si->fir_irq);

  	ICCR0 |= ICCR0_RXE; 

	/*
	 * Make sure that the TX queue is available for sending
	 * (for retries).  TX has priority over RX at all times.
	 */
	netif_wake_queue(dev);
	
	__ECHO_OUT;
}
Esempio n. 29
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
    struct net_device *dev = (struct net_device *) arg;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb;
    void *ptr = 0;
    int sz;
    u32 opmode = p->operation_mode;
//	unsigned long flags;
//   int max_package_size;
    for (;;) {
        sz = smd_cur_packet_size(p->ch);
        if (sz == 0) break;
        if (smd_read_avail(p->ch) < sz) break;
//ZTE_RIL_WANGCHENG_20110425 start
#ifdef CONFIG_ZTE_PLATFORM

        if (RMNET_IS_MODE_IP(opmode) ? (sz > ((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN)) :
                (sz > (((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN) + ETH_HLEN))) {
#else
        if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
                (sz > (dev->mtu + ETH_HLEN))) {

#endif

            pr_err("rmnet_recv() discarding %d len (%d mtu)\n",
                   sz, RMNET_IS_MODE_IP(opmode) ?
                   dev->mtu : (dev->mtu + ETH_HLEN));
            ptr = 0;
        } else {
            skb = dev_alloc_skb(sz + NET_IP_ALIGN);
            if (skb == NULL) {
                pr_err("rmnet_recv() cannot allocate skb\n");
            } else {
                skb->dev = dev;
                skb_reserve(skb, NET_IP_ALIGN);
                ptr = skb_put(skb, sz);
                wake_lock_timeout(&p->wake_lock, HZ / 2);
                if (smd_read(p->ch, ptr, sz) != sz) {
                    pr_err("rmnet_recv() smd lied about avail?!");
                    ptr = 0;
                    dev_kfree_skb_irq(skb);
                } else {
                    /* Handle Rx frame format */
                    //spin_lock_irqsave(&p->lock, flags);
                    //opmode = p->operation_mode;
                    //spin_unlock_irqrestore(&p->lock, flags);

                    if (RMNET_IS_MODE_IP(opmode)) {
                        /* Driver in IP mode */
                        skb->protocol =
                            rmnet_ip_type_trans(skb, dev);
                    } else {
                        /* Driver in Ethernet mode */
                        skb->protocol =
                            eth_type_trans(skb, dev);
                    }
                    if (RMNET_IS_MODE_IP(opmode) ||
                            count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
                        p->wakeups_rcv +=
                            rmnet_cause_wakeup(p);
#endif
                        p->stats.rx_packets++;
                        p->stats.rx_bytes += skb->len;
                    }
                    netif_rx(skb);
                }
                continue;
            }
        }
        if (smd_read(p->ch, ptr, sz) != sz)
            pr_err("rmnet_recv() smd lied about avail?!");
    }
}

//ZTE_RIL_RJG_20101103 end

static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);

static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rmnet_private *p = netdev_priv(dev);
    smd_channel_t *ch = p->ch;
    int smd_ret;
    struct QMI_QOS_HDR_S *qmih;
    u32 opmode;
    unsigned long flags;

    /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
    spin_lock_irqsave(&p->lock, flags);
    opmode = p->operation_mode;
    spin_unlock_irqrestore(&p->lock, flags);

    if (RMNET_IS_MODE_QOS(opmode)) {
        qmih = (struct QMI_QOS_HDR_S *)
               skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
        qmih->version = 1;
        qmih->flags = 0;
        qmih->flow_id = skb->mark;
    }

    dev->trans_start = jiffies;
    smd_ret = smd_write(ch, skb->data, skb->len);
    if (smd_ret != skb->len) {
        pr_err("%s: smd_write returned error %d", __func__, smd_ret);
        goto xmit_out;
    }

    if (RMNET_IS_MODE_IP(opmode) ||
            count_this_packet(skb->data, skb->len)) {
        p->stats.tx_packets++;
        p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
        p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
    }

xmit_out:
    /* data xmited, safe to release skb */
    dev_kfree_skb_irq(skb);
    return 0;
}

static void _rmnet_resume_flow(unsigned long param)
{
    struct net_device *dev = (struct net_device *)param;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb = NULL;
    unsigned long flags;

    /* xmit and enable the flow only once even if
       multiple tasklets were scheduled by smd_net_notify */
    spin_lock_irqsave(&p->lock, flags);
    if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
        skb = p->skb;
        p->skb = NULL;
        spin_unlock_irqrestore(&p->lock, flags);
        _rmnet_xmit(skb, dev);
        netif_wake_queue(dev);
    } else
        spin_unlock_irqrestore(&p->lock, flags);
}

static void msm_rmnet_unload_modem(void *pil)
{
    if (pil)
        pil_put(pil);
}

static void *msm_rmnet_load_modem(struct net_device *dev)
{
    void *pil;
    int rc;
    struct rmnet_private *p = netdev_priv(dev);

    pil = pil_get("modem");
    if (IS_ERR(pil))
        pr_err("%s: modem load failed\n", __func__);
    else if (msm_rmnet_modem_wait) {
        rc = wait_for_completion_interruptible_timeout(
                 &p->complete,
                 msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
        if (!rc)
            rc = -ETIMEDOUT;
        if (rc < 0) {
            pr_err("%s: wait for rmnet port failed %d\n",
                   __func__, rc);
            msm_rmnet_unload_modem(pil);
            pil = ERR_PTR(rc);
        }
    }

    return pil;
}
Esempio n. 30
0
/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
	struct net_device *dev = (struct net_device *)dev_instance;
	struct tulip_private *tp = netdev_priv(dev);
	void __iomem *ioaddr = tp->base_addr;
	int csr5;
	int missed;
	int rx = 0;
	int tx = 0;
	int oi = 0;
	int maxrx = RX_RING_SIZE;
	int maxtx = TX_RING_SIZE;
	int maxoi = TX_RING_SIZE;
#ifdef CONFIG_TULIP_NAPI
	int rxd = 0;
#else
	int entry;
#endif
	unsigned int work_count = tulip_max_interrupt_work;
	unsigned int handled = 0;

	/* Let's see whether the interrupt really is for us */
	csr5 = ioread32(ioaddr + CSR5);

        if (tp->flags & HAS_PHY_IRQ) 
	        handled = phy_interrupt (dev);
    
	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
		return IRQ_RETVAL(handled);

	tp->nir++;

	do {

#ifdef CONFIG_TULIP_NAPI

		if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
			rxd++;
			/* Mask RX intrs and add the device to poll list. */
			iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
			netif_rx_schedule(dev);
			
			if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
                               break;
		}
		
               /* Acknowledge the interrupt sources we handle here ASAP
                  the poll function does Rx and RxNoBuf acking */
		
		iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);

#else 
		/* Acknowledge all of the current interrupt sources ASAP. */
		iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);


		if (csr5 & (RxIntr | RxNoBuf)) {
				rx += tulip_rx(dev);
			tulip_refill_rx(dev);
		}

#endif /*  CONFIG_TULIP_NAPI */
		
		if (tulip_debug > 4)
			printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
			       dev->name, csr5, ioread32(ioaddr + CSR5));
		

		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
			unsigned int dirty_tx;

			spin_lock(&tp->lock);

			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
				 dirty_tx++) {
				int entry = dirty_tx % TX_RING_SIZE;
				int status = le32_to_cpu(tp->tx_ring[entry].status);

				if (status < 0)
					break;			/* It still has not been Txed */

				/* Check for Rx filter setup frames. */
				if (tp->tx_buffers[entry].skb == NULL) {
					/* test because dummy frames not mapped */
					if (tp->tx_buffers[entry].mapping)
						pci_unmap_single(tp->pdev,
							 tp->tx_buffers[entry].mapping,
							 sizeof(tp->setup_frame),
							 PCI_DMA_TODEVICE);
					continue;
				}

				if (status & 0x8000) {
					/* There was an major error, log it. */
#ifndef final_version
					if (tulip_debug > 1)
						printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
							   dev->name, status);
#endif
					tp->stats.tx_errors++;
					if (status & 0x4104) tp->stats.tx_aborted_errors++;
					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
					if (status & 0x0200) tp->stats.tx_window_errors++;
					if (status & 0x0002) tp->stats.tx_fifo_errors++;
					if ((status & 0x0080) && tp->full_duplex == 0)
						tp->stats.tx_heartbeat_errors++;
				} else {
					tp->stats.tx_bytes +=
						tp->tx_buffers[entry].skb->len;
					tp->stats.collisions += (status >> 3) & 15;
					tp->stats.tx_packets++;
				}

				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
						 tp->tx_buffers[entry].skb->len,
						 PCI_DMA_TODEVICE);

				/* Free the original skb. */
				dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
				tp->tx_buffers[entry].skb = NULL;
				tp->tx_buffers[entry].mapping = 0;
				tx++;
			}

#ifndef final_version
			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
				printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
					   dev->name, dirty_tx, tp->cur_tx);
				dirty_tx += TX_RING_SIZE;
			}
#endif

			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
				netif_wake_queue(dev);

			tp->dirty_tx = dirty_tx;
			if (csr5 & TxDied) {
				if (tulip_debug > 2)
					printk(KERN_WARNING "%s: The transmitter stopped."
						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
						   dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
				tulip_restart_rxtx(tp);
			}
			spin_unlock(&tp->lock);
		}

		/* Log errors. */
		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
			if (csr5 == 0xffffffff)
				break;
			if (csr5 & TxJabber) tp->stats.tx_errors++;
			if (csr5 & TxFIFOUnderflow) {
				if ((tp->csr6 & 0xC000) != 0xC000)
					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
				else
					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
				/* Restart the transmit process. */
				tulip_restart_rxtx(tp);
				iowrite32(0, ioaddr + CSR1);
			}
			if (csr5 & (RxDied | RxNoBuf)) {
				if (tp->flags & COMET_MAC_ADDR) {
					iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
					iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
				}
			}
			if (csr5 & RxDied) {		/* Missed a Rx frame. */
                                tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
				tp->stats.rx_errors++;
				tulip_start_rxtx(tp);
			}
			/*
			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
			 * call is ever done under the spinlock
			 */
			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
				if (tp->link_change)
					(tp->link_change)(dev, csr5);
			}
			if (csr5 & SytemError) {
				int error = (csr5 >> 23) & 7;
				/* oops, we hit a PCI error.  The code produced corresponds
				 * to the reason:
				 *  0 - parity error
				 *  1 - master abort
				 *  2 - target abort
				 * Note that on parity error, we should do a software reset
				 * of the chip to get it back into a sane state (according
				 * to the 21142/3 docs that is).
				 *   -- rmk
				 */
				printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
					dev->name, tp->nir, error);
			}
			/* Clear all error sources, included undocumented ones! */
			iowrite32(0x0800f7ba, ioaddr + CSR5);
			oi++;
		}