/** * ks8695_init_partial_multicast - Init the mcast addr registers * @ksp: The device to initialise * @addr: The multicast address list to use * @nr_addr: The number of addresses in the list * * This routine is a helper for ks8695_set_multicast - it writes * the additional-address registers in the KS8695 ethernet device * and cleans up any others left behind. */ static void ks8695_init_partial_multicast(struct ks8695_priv *ksp, struct net_device *ndev) { u32 low, high; int i; struct netdev_hw_addr *ha; i = 0; netdev_for_each_mc_addr(ha, ndev) { /* Ran out of space in chip? */ BUG_ON(i == KS8695_NR_ADDRESSES); low = (ha->addr[2] << 24) | (ha->addr[3] << 16) | (ha->addr[4] << 8) | (ha->addr[5]); high = (ha->addr[0] << 8) | (ha->addr[1]); ks8695_writereg(ksp, KS8695_AAL_(i), low); ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high); i++; }
/** * ks8695_rx_irq - Receive IRQ handler * @irq: The IRQ which went off (ignored) * @dev_id: The net_device for the interrupt * * Process the RX ring, passing any received packets up to the * host. If we received anything other than errors, we then * refill the ring. */ static irqreturn_t ks8695_rx_irq(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct ks8695_priv *ksp = netdev_priv(ndev); struct sk_buff *skb; int buff_n; u32 flags; int pktlen; int last_rx_processed = -1; buff_n = ksp->next_rx_desc_read; do { if (ksp->rx_buffers[buff_n].skb && !(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN))) { rmb(); flags = le32_to_cpu(ksp->rx_ring[buff_n].status); /* Found an SKB which we own, this means we * received a packet */ if ((flags & (RDES_FS | RDES_LS)) != (RDES_FS | RDES_LS)) { /* This packet is not the first and * the last segment. Therefore it is * a "spanning" packet and we can't * handle it */ goto rx_failure; } if (flags & (RDES_ES | RDES_RE)) { /* It's an error packet */ ndev->stats.rx_errors++; if (flags & RDES_TL) ndev->stats.rx_length_errors++; if (flags & RDES_RF) ndev->stats.rx_length_errors++; if (flags & RDES_CE) ndev->stats.rx_crc_errors++; if (flags & RDES_RE) ndev->stats.rx_missed_errors++; goto rx_failure; } pktlen = flags & RDES_FLEN; pktlen -= 4; /* Drop the CRC */ /* Retrieve the sk_buff */ skb = ksp->rx_buffers[buff_n].skb; /* Clear it from the ring */ ksp->rx_buffers[buff_n].skb = NULL; ksp->rx_ring[buff_n].data_ptr = 0; /* Unmap the SKB */ dma_unmap_single(ksp->dev, ksp->rx_buffers[buff_n].dma_ptr, ksp->rx_buffers[buff_n].length, DMA_FROM_DEVICE); /* Relinquish the SKB to the network layer */ skb_put(skb, pktlen); skb->protocol = eth_type_trans(skb, ndev); netif_rx(skb); /* Record stats */ ndev->last_rx = jiffies; ndev->stats.rx_packets++; ndev->stats.rx_bytes += pktlen; goto rx_finished; rx_failure: /* This ring entry is an error, but we can * re-use the skb */ /* Give the ring entry back to the hardware */ ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); rx_finished: /* And note this as processed so we can start * from here next time */ last_rx_processed = buff_n; } else { /* Ran out of things to process, stop now */ break; } buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; } while (buff_n != ksp->next_rx_desc_read); /* And note which RX descriptor we last did anything with */ if (likely(last_rx_processed != -1)) ksp->next_rx_desc_read = (last_rx_processed + 1) & MAX_RX_DESC_MASK; /* And refill the buffers */ ks8695_refill_rxbuffers(ksp); /* Kick the RX DMA engine, in case it became suspended */ ks8695_writereg(ksp, KS8695_DRSC, 0); return IRQ_HANDLED; }