static void update_pcap_next_dump(struct ctx *ctx, unsigned long snaplen, int *fd, int sock, bool is_v3) { if (!dump_to_pcap(ctx)) return; if (ctx->dump_mode == DUMP_INTERVAL_SIZE) { interval += snaplen; if (interval > ctx->dump_interval) { next_dump = true; interval = 0; } } if (sighup) { if (ctx->verbose) printf("SIGHUP received, prematurely rotating pcap\n"); sighup = 0; next_dump = true; reset_interval(ctx); } if (next_dump) { *fd = next_multi_pcap_file(ctx, *fd); next_dump = false; if (update_rx_stats(ctx, sock, is_v3)) return; if (ctx->verbose && ctx->print_mode == PRINT_NONE) printf(".(+%"PRIu64"/-%"PRIu64")", ctx->pkts_recvd_last - ctx->pkts_drops_last, ctx->pkts_drops_last); } }
static int au1k_irda_rx(struct net_device *dev) { struct au1k_private *aup = netdev_priv(dev); volatile struct ring_dest *prxd; struct sk_buff *skb; struct db_dest *pDB; u32 flags, count; prxd = aup->rx_ring[aup->rx_head]; flags = prxd->flags; while (!(flags & AU_OWN)) { pDB = aup->rx_db_inuse[aup->rx_head]; count = (prxd->count_1 << 8) | prxd->count_0; if (!(flags & IR_RX_ERROR)) { /* good frame */ update_rx_stats(dev, flags, count); skb = alloc_skb(count + 1, GFP_ATOMIC); if (skb == NULL) { dev->stats.rx_dropped++; continue; } skb_reserve(skb, 1); if (aup->speed == 4000000) skb_put(skb, count); else skb_put(skb, count - 2); skb_copy_to_linear_data(skb, (void *)pDB->vaddr, count - 2); skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); prxd->count_0 = 0; prxd->count_1 = 0; } prxd->flags |= AU_OWN; aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1); irda_write(aup, IR_RING_PROMPT, 0); /* next descriptor */ prxd = aup->rx_ring[aup->rx_head]; flags = prxd->flags; } return 0; }
static void dump_rx_stats(struct ctx *ctx, int sock, bool is_v3) { if (update_rx_stats(ctx, sock, is_v3)) return; printf("\r%12"PRIu64" packets incoming (%"PRIu64" unread on exit)\n", is_v3 ? ctx->pkts_seen : ctx->pkts_recvd, is_v3 ? ctx->pkts_recvd - ctx->pkts_seen : 0); printf("\r%12"PRIu64" packets passed filter\n", ctx->pkts_recvd - ctx->pkts_drops); printf("\r%12"PRIu64" packets failed filter (out of space)\n", ctx->pkts_drops); if (ctx->pkts_recvd > 0) printf("\r%12.4lf%% packet droprate\n", (1.0 * ctx->pkts_drops / ctx->pkts_recvd) * 100.0); }
/* * Au1000 receive routine. */ static int au1000_rx(struct net_device *dev) { struct au1000_private *aup = (struct au1000_private *) dev->priv; struct sk_buff *skb; volatile rx_dma_t *prxd; u32 buff_stat, status; db_dest_t *pDB; if (au1000_debug > 4) printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head); prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; while (buff_stat & RX_T_DONE) { status = prxd->status; pDB = aup->rx_db_inuse[aup->rx_head]; update_rx_stats(dev, status); if (!(status & RX_ERROR)) { /* good frame */ skb = dev_alloc_skb((status & RX_FRAME_LEN_MASK) + 2); if (skb == NULL) { printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); aup->stats.rx_dropped++; continue; } skb->dev = dev; skb_reserve(skb, 2); /* 16 byte IP header align */ eth_copy_and_sum(skb, (unsigned char *)pDB->vaddr, status & RX_FRAME_LEN_MASK, 0); skb_put(skb, status & RX_FRAME_LEN_MASK); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* pass the packet to upper layers */ } else { if (au1000_debug > 4) { if (status & RX_MISSED_FRAME) printk("rx miss\n"); if (status & RX_WDOG_TIMER) printk("rx wdog\n"); if (status & RX_RUNT) printk("rx runt\n"); if (status & RX_OVERLEN) printk("rx overlen\n"); if (status & RX_COLL) printk("rx coll\n"); if (status & RX_MII_ERROR) printk("rx mii error\n"); if (status & RX_CRC_ERROR) printk("rx crc error\n"); if (status & RX_LEN_ERROR) printk("rx len error\n"); if (status & RX_U_CNTRL_FRAME) printk("rx u control frame\n"); if (status & RX_MISSED_FRAME) printk("rx miss\n"); } } prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); au_sync(); /* next descriptor */ prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; dev->last_rx = jiffies; } return 0; }
/* If this function is called without the NONBLOCK flag from a reactor * handler running in coprocess context, the call to flux_sleep_on() * will allow the reactor to run until a message matching 'match' arrives. * The flux_sleep_on() call will then resume, and the next call to recv() * will return the matching message. If not running in coprocess context, * flux_sleep_on() will fail with EINVAL. In that case, the do loop * reading messages and comparing them to match criteria may have to read * a few non-matching messages before finding a match. On return, those * non-matching messages have to be requeued in the handle, hence the * defer_*() helper calls. */ flux_msg_t *flux_recv (flux_t h, struct flux_match match, int flags) { zlist_t *l = NULL; flux_msg_t *msg = NULL; int saved_errno; flags |= h->flags; if (!(flags & FLUX_O_NONBLOCK) && (flags & FLUX_O_COPROC) && flux_sleep_on (h, match) < 0) { if (errno != EINVAL) goto fatal; errno = 0; } do { if (!(msg = flux_recv_any (h, flags))) { if (errno != EAGAIN && errno != EWOULDBLOCK) goto fatal; if (defer_requeue (&l, h) < 0) goto fatal; defer_destroy (&l); errno = EWOULDBLOCK; return NULL; } if (!flux_msg_cmp (msg, match)) { if (defer_enqueue (&l, msg) < 0) goto fatal; msg = NULL; } } while (!msg); update_rx_stats (h, msg); if ((flags & FLUX_O_TRACE)) flux_msg_fprint (stderr, msg); if (defer_requeue (&l, h) < 0) goto fatal; defer_destroy (&l); #if HAVE_CALIPER cali_begin_int (h->prof.msg_match_type, match.typemask); cali_begin_int (h->prof.msg_match_tag, match.matchtag); cali_begin_string (h->prof.msg_match_glob, match.topic_glob ? match.topic_glob : "NONE"); char *sender = NULL; flux_msg_get_route_first (msg, &sender); if (sender) cali_begin_string (h->prof.msg_sender, sender); profiling_msg_snapshot (h, msg, flags, "recv"); if (sender) cali_end (h->prof.msg_sender); cali_end (h->prof.msg_match_type); cali_end (h->prof.msg_match_tag); cali_end (h->prof.msg_match_glob); free (sender); #endif return msg; fatal: saved_errno = errno; FLUX_FATAL (h); if (msg) flux_msg_destroy (msg); defer_destroy (&l); errno = saved_errno; return NULL; }
static int au1000_rx(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); struct sk_buff *skb; volatile rx_dma_t *prxd; u32 buff_stat, status; db_dest_t *pDB; u32 frmlen; if (au1000_debug > 5) printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head); prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; while (buff_stat & RX_T_DONE) { status = prxd->status; pDB = aup->rx_db_inuse[aup->rx_head]; update_rx_stats(dev, status); if (!(status & RX_ERROR)) { frmlen = (status & RX_FRAME_LEN_MASK); frmlen -= 4; skb = dev_alloc_skb(frmlen + 2); if (skb == NULL) { printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; continue; } skb_reserve(skb, 2); skb_copy_to_linear_data(skb, (unsigned char *)pDB->vaddr, frmlen); skb_put(skb, frmlen); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); } else { if (au1000_debug > 4) { if (status & RX_MISSED_FRAME) printk("rx miss\n"); if (status & RX_WDOG_TIMER) printk("rx wdog\n"); if (status & RX_RUNT) printk("rx runt\n"); if (status & RX_OVERLEN) printk("rx overlen\n"); if (status & RX_COLL) printk("rx coll\n"); if (status & RX_MII_ERROR) printk("rx mii error\n"); if (status & RX_CRC_ERROR) printk("rx crc error\n"); if (status & RX_LEN_ERROR) printk("rx len error\n"); if (status & RX_U_CNTRL_FRAME) printk("rx u control frame\n"); } } prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); au_sync(); prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; } return 0; }