static void xmit_slowpath_or_die(struct ctx *ctx, int cpu, unsigned long orig_num) { int ret, icmp_sock = -1; unsigned long num = 1, i = 0; struct timeval start, end, diff; unsigned long long tx_bytes = 0, tx_packets = 0; struct packet_dyn *pktd; struct sockaddr_ll saddr = { .sll_family = PF_PACKET, .sll_halen = ETH_ALEN, .sll_ifindex = device_ifindex(ctx->device), }; if (ctx->num > 0) num = ctx->num; if (ctx->num == 0 && orig_num > 0) num = 0; if (ctx->smoke_test) icmp_sock = xmit_smoke_setup(ctx); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0) && likely(num > 0) && likely(plen > 0)) { pktd = &packet_dyn[i]; if (pktd->clen + pktd->rlen + pktd->slen) { apply_counter(i); apply_randomizer(i); apply_csum16(i); } retry: ret = sendto(sock, packets[i].payload, packets[i].len, 0, (struct sockaddr *) &saddr, sizeof(saddr)); if (unlikely(ret < 0)) { if (errno == ENOBUFS) { sched_yield(); goto retry; } panic("Sendto error: %s!\n", strerror(errno)); } tx_bytes += packets[i].len; tx_packets++; if (ctx->smoke_test) { ret = xmit_smoke_probe(icmp_sock, ctx); if (unlikely(ret < 0)) { printf("%sSmoke test alert:%s\n", colorize_start(bold), colorize_end()); printf(" Remote host seems to be unresponsive to ICMP probes!\n"); printf(" Last instance was packet%lu, seed:%u, trafgen snippet:\n\n", i, seed); dump_trafgen_snippet(packets[i].payload, packets[i].len); break; } } if (!ctx->rand) { i++; if (i >= plen) i = 0; } else i = rand() % plen; if (ctx->num > 0) num--; if (ctx->gap > 0) usleep(ctx->gap); } bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); if (ctx->smoke_test) close(icmp_sock); stats[cpu].tx_packets = tx_packets; stats[cpu].tx_bytes = tx_bytes; stats[cpu].tv_sec = diff.tv_sec; stats[cpu].tv_usec = diff.tv_usec; stats[cpu].state |= CPU_STATS_STATE_RES; } static void xmit_fastpath_or_die(struct ctx *ctx, int cpu, unsigned long orig_num) { int ifindex = device_ifindex(ctx->device); uint8_t *out = NULL; unsigned int it = 0; unsigned long num = 1, i = 0, size; struct ring tx_ring; struct frame_map *hdr; struct timeval start, end, diff; struct packet_dyn *pktd; unsigned long long tx_bytes = 0, tx_packets = 0; fmemset(&tx_ring, 0, sizeof(tx_ring)); size = ring_size(ctx->device, ctx->reserve_size); set_sock_prio(sock, 512); set_packet_loss_discard(sock); setup_tx_ring_layout(sock, &tx_ring, size, ctx->jumbo_support); create_tx_ring(sock, &tx_ring, ctx->verbose); mmap_tx_ring(sock, &tx_ring); alloc_tx_ring_frames(sock, &tx_ring); bind_tx_ring(sock, &tx_ring, ifindex); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); if (ctx->kpull) interval = ctx->kpull; if (ctx->num > 0) num = ctx->num; if (ctx->num == 0 && orig_num > 0) num = 0; set_itimer_interval_value(&itimer, 0, interval); setitimer(ITIMER_REAL, &itimer, NULL); bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0) && likely(num > 0) && likely(plen > 0)) { while (user_may_pull_from_tx(tx_ring.frames[it].iov_base) && likely(num > 0)) { hdr = tx_ring.frames[it].iov_base; out = ((uint8_t *) hdr) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); hdr->tp_h.tp_snaplen = packets[i].len; hdr->tp_h.tp_len = packets[i].len; pktd = &packet_dyn[i]; if (pktd->clen + pktd->rlen + pktd->slen) { apply_counter(i); apply_randomizer(i); apply_csum16(i); } fmemcpy(out, packets[i].payload, packets[i].len); tx_bytes += packets[i].len; tx_packets++; if (!ctx->rand) { i++; if (i >= plen) i = 0; } else i = rand() % plen; kernel_may_pull_from_tx(&hdr->tp_h); it++; if (it >= tx_ring.layout.tp_frame_nr) it = 0; if (ctx->num > 0) num--; if (unlikely(sigint == 1)) break; } } bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); timer_purge(); destroy_tx_ring(sock, &tx_ring); stats[cpu].tx_packets = tx_packets; stats[cpu].tx_bytes = tx_bytes; stats[cpu].tv_sec = diff.tv_sec; stats[cpu].tv_usec = diff.tv_usec; stats[cpu].state |= CPU_STATS_STATE_RES; }
static void xmit_slowpath_or_die(struct ctx *ctx, unsigned int cpu, unsigned long orig_num) { int ret, icmp_sock = -1; unsigned long num = 1, i = 0; struct timeval start, end, diff; unsigned long long tx_bytes = 0, tx_packets = 0; struct packet_dyn *pktd; struct sockaddr_ll saddr = { .sll_family = PF_PACKET, .sll_halen = ETH_ALEN, .sll_ifindex = device_ifindex(ctx->device), }; if (ctx->num > 0) num = ctx->num; if (ctx->num == 0 && orig_num > 0) num = 0; if (ctx->smoke_test) icmp_sock = xmit_smoke_setup(ctx); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0 && num > 0 && plen > 0)) { pktd = &packet_dyn[i]; if (pktd->clen + pktd->rlen + pktd->slen) { apply_counter(i); apply_randomizer(i); apply_csum16(i); } retry: ret = sendto(sock, packets[i].payload, packets[i].len, 0, (struct sockaddr *) &saddr, sizeof(saddr)); if (unlikely(ret < 0)) { if (errno == ENOBUFS) { sched_yield(); goto retry; } if (ctx->smoke_test) panic("Sendto error: %s!\n", strerror(errno)); } tx_bytes += packets[i].len; tx_packets++; if (ctx->smoke_test) { ret = xmit_smoke_probe(icmp_sock, ctx); if (unlikely(ret < 0)) { printf("%sSmoke test alert:%s\n", colorize_start(bold), colorize_end()); printf(" Remote host seems to be unresponsive to ICMP probes!\n"); printf(" Last instance was packet%lu, seed:%u, trafgen snippet:\n\n", i, seed); dump_trafgen_snippet(packets[i].payload, packets[i].len); break; } } if (!ctx->rand) { i++; if (i >= plen) i = 0; } else i = rand() % plen; if (ctx->num > 0) num--; if ((ctx->gap.tv_sec | ctx->gap.tv_nsec) > 0) nanosleep(&ctx->gap, NULL); } bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); if (ctx->smoke_test) close(icmp_sock); stats[cpu].tx_packets = tx_packets; stats[cpu].tx_bytes = tx_bytes; stats[cpu].tv_sec = diff.tv_sec; stats[cpu].tv_usec = diff.tv_usec; stats[cpu].state |= CPU_STATS_STATE_RES; } static void xmit_fastpath_or_die(struct ctx *ctx, unsigned int cpu, unsigned long orig_num) { int ifindex = device_ifindex(ctx->device); uint8_t *out = NULL; unsigned int it = 0; unsigned long num = 1, i = 0; size_t size = ring_size(ctx->device, ctx->reserve_size); struct ring tx_ring; struct frame_map *hdr; struct timeval start, end, diff; struct packet_dyn *pktd; unsigned long long tx_bytes = 0, tx_packets = 0; set_sock_prio(sock, 512); ring_tx_setup(&tx_ring, sock, size, ifindex, ctx->jumbo_support, ctx->verbose); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); if (ctx->num > 0) num = ctx->num; if (ctx->num == 0 && orig_num > 0) num = 0; bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0 && num > 0 && plen > 0)) { if (!user_may_pull_from_tx(tx_ring.frames[it].iov_base)) { int ret = pull_and_flush_tx_ring(sock); if (unlikely(ret < 0)) { /* We could hit EBADF if the socket has been closed before * the timer was triggered. */ if (errno != EBADF && errno != ENOBUFS) panic("Flushing TX_RING failed: %s!\n", strerror(errno)); } continue; } hdr = tx_ring.frames[it].iov_base; out = ((uint8_t *) hdr) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); hdr->tp_h.tp_snaplen = packets[i].len; hdr->tp_h.tp_len = packets[i].len; pktd = &packet_dyn[i]; if (pktd->clen + pktd->rlen + pktd->slen) { apply_counter(i); apply_randomizer(i); apply_csum16(i); } fmemcpy(out, packets[i].payload, packets[i].len); tx_bytes += packets[i].len; tx_packets++; if (!ctx->rand) { i++; if (i >= plen) i = 0; } else i = rand() % plen; kernel_may_pull_from_tx(&hdr->tp_h); it++; if (it >= tx_ring.layout.tp_frame_nr) it = 0; if (ctx->num > 0) num--; } bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); pull_and_flush_tx_ring_wait(sock); destroy_tx_ring(sock, &tx_ring); stats[cpu].tx_packets = tx_packets; stats[cpu].tx_bytes = tx_bytes; stats[cpu].tv_sec = diff.tv_sec; stats[cpu].tv_usec = diff.tv_usec; stats[cpu].state |= CPU_STATS_STATE_RES; }