static void reset_interval(struct ctx *ctx) { if (ctx->dump_mode == DUMP_INTERVAL_TIME) { interval = ctx->dump_interval; set_itimer_interval_value(&itimer, interval, 0); setitimer(ITIMER_REAL, &itimer, NULL); } else { interval = 0; } }
static void timer_purge(void) { int ret; ret = pull_and_flush_tx_ring_wait(tx_sock); if (unlikely(ret < 0)) { if (errno != EBADF && errno != ENOBUFS) panic("Flushing TX_RING failed: %s!\n", strerror(errno)); } set_itimer_interval_value(&itimer, 0, 0); setitimer(ITIMER_REAL, &itimer, NULL); }
static void timer_elapsed(int number) { int ret = pull_and_flush_tx_ring(sock); if (unlikely(ret < 0)) { /* We could hit EBADF if the socket has been closed before * the timer was triggered. */ if (errno != EBADF && errno != ENOBUFS) panic("Flushing TX_RING failed: %s!\n", strerror(errno)); } set_itimer_interval_value(&itimer, 0, interval); setitimer(ITIMER_REAL, &itimer, NULL); }
static int begin_multi_pcap_file(struct ctx *ctx) { int fd, ret; char fname[256]; bug_on(!__pcap_io); if (ctx->device_out[strlen(ctx->device_out) - 1] == '/') ctx->device_out[strlen(ctx->device_out) - 1] = 0; slprintf(fname, sizeof(fname), "%s/%s%lu.pcap", ctx->device_out, ctx->prefix ? : "dump-", time(0)); fd = open_or_die_m(fname, O_RDWR | O_CREAT | O_TRUNC | O_LARGEFILE, DEFFILEMODE); ret = __pcap_io->push_fhdr_pcap(fd, ctx->magic, ctx->link_type); if (ret) panic("Error writing pcap header!\n"); if (__pcap_io->prepare_access_pcap) { ret = __pcap_io->prepare_access_pcap(fd, PCAP_MODE_WR, true); if (ret) panic("Error prepare writing pcap!\n"); } if (ctx->dump_mode == DUMP_INTERVAL_TIME) { interval = ctx->dump_interval; set_itimer_interval_value(&itimer, interval, 0); setitimer(ITIMER_REAL, &itimer, NULL); } else { interval = 0; } return fd; }
static void receive_to_xmit(struct ctx *ctx) { short ifflags = 0; uint8_t *in, *out; int rx_sock, ifindex_in, ifindex_out; unsigned int size_in, size_out, it_in = 0, it_out = 0; unsigned long frame_count = 0; struct frame_map *hdr_in, *hdr_out; struct ring tx_ring, rx_ring; struct pollfd rx_poll; struct sock_fprog bpf_ops; if (!strncmp(ctx->device_in, ctx->device_out, IFNAMSIZ)) panic("Ingress/egress devices must be different!\n"); if (!device_up_and_running(ctx->device_out)) panic("Egress device not up and running!\n"); rx_sock = pf_socket(); tx_sock = pf_socket(); fmemset(&tx_ring, 0, sizeof(tx_ring)); fmemset(&rx_ring, 0, sizeof(rx_ring)); fmemset(&rx_poll, 0, sizeof(rx_poll)); fmemset(&bpf_ops, 0, sizeof(bpf_ops)); ifindex_in = device_ifindex(ctx->device_in); ifindex_out = device_ifindex(ctx->device_out); size_in = ring_size(ctx->device_in, ctx->reserve_size); size_out = ring_size(ctx->device_out, ctx->reserve_size); enable_kernel_bpf_jit_compiler(); bpf_parse_rules(ctx->filter, &bpf_ops, ctx->link_type); if (ctx->dump_bpf) bpf_dump_all(&bpf_ops); bpf_attach_to_sock(rx_sock, &bpf_ops); setup_rx_ring_layout(rx_sock, &rx_ring, size_in, ctx->jumbo, false); create_rx_ring(rx_sock, &rx_ring, ctx->verbose); mmap_rx_ring(rx_sock, &rx_ring); alloc_rx_ring_frames(rx_sock, &rx_ring); bind_rx_ring(rx_sock, &rx_ring, ifindex_in); prepare_polling(rx_sock, &rx_poll); set_packet_loss_discard(tx_sock); setup_tx_ring_layout(tx_sock, &tx_ring, size_out, ctx->jumbo); create_tx_ring(tx_sock, &tx_ring, ctx->verbose); mmap_tx_ring(tx_sock, &tx_ring); alloc_tx_ring_frames(tx_sock, &tx_ring); bind_tx_ring(tx_sock, &tx_ring, ifindex_out); dissector_init_all(ctx->print_mode); if (ctx->promiscuous) ifflags = enter_promiscuous_mode(ctx->device_in); if (ctx->kpull) interval = ctx->kpull; set_itimer_interval_value(&itimer, 0, interval); setitimer(ITIMER_REAL, &itimer, NULL); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); printf("Running! Hang up with ^C!\n\n"); fflush(stdout); while (likely(sigint == 0)) { while (user_may_pull_from_rx(rx_ring.frames[it_in].iov_base)) { __label__ next; hdr_in = rx_ring.frames[it_in].iov_base; in = ((uint8_t *) hdr_in) + hdr_in->tp_h.tp_mac; frame_count++; if (ctx->packet_type != -1) if (ctx->packet_type != hdr_in->s_ll.sll_pkttype) goto next; hdr_out = tx_ring.frames[it_out].iov_base; out = ((uint8_t *) hdr_out) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); for (; !user_may_pull_from_tx(tx_ring.frames[it_out].iov_base) && likely(!sigint);) { if (ctx->randomize) next_rnd_slot(&it_out, &tx_ring); else { it_out++; if (it_out >= tx_ring.layout.tp_frame_nr) it_out = 0; } hdr_out = tx_ring.frames[it_out].iov_base; out = ((uint8_t *) hdr_out) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); } tpacket_hdr_clone(&hdr_out->tp_h, &hdr_in->tp_h); fmemcpy(out, in, hdr_in->tp_h.tp_len); kernel_may_pull_from_tx(&hdr_out->tp_h); if (ctx->randomize) next_rnd_slot(&it_out, &tx_ring); else { it_out++; if (it_out >= tx_ring.layout.tp_frame_nr) it_out = 0; } show_frame_hdr(hdr_in, ctx->print_mode); dissector_entry_point(in, hdr_in->tp_h.tp_snaplen, ctx->link_type, ctx->print_mode); if (frame_count_max != 0) { if (frame_count >= frame_count_max) { sigint = 1; break; } } next: kernel_may_pull_from_rx(&hdr_in->tp_h); it_in++; if (it_in >= rx_ring.layout.tp_frame_nr) it_in = 0; if (unlikely(sigint == 1)) goto out; } poll(&rx_poll, 1, -1); } out: timer_purge(); sock_rx_net_stats(rx_sock, 0); bpf_release(&bpf_ops); dissector_cleanup_all(); destroy_tx_ring(tx_sock, &tx_ring); destroy_rx_ring(rx_sock, &rx_ring); if (ctx->promiscuous) leave_promiscuous_mode(ctx->device_in, ifflags); close(tx_sock); close(rx_sock); }
static void pcap_to_xmit(struct ctx *ctx) { __label__ out; uint8_t *out = NULL; int irq, ifindex, fd = 0, ret; unsigned int size, it = 0; unsigned long trunced = 0; struct ring tx_ring; struct frame_map *hdr; struct sock_fprog bpf_ops; struct timeval start, end, diff; pcap_pkthdr_t phdr; if (!device_up_and_running(ctx->device_out) && !ctx->rfraw) panic("Device not up and running!\n"); bug_on(!__pcap_io); tx_sock = pf_socket(); if (!strncmp("-", ctx->device_in, strlen("-"))) { fd = dup_or_die(fileno(stdin)); close(fileno(stdin)); if (ctx->pcap == PCAP_OPS_MM) ctx->pcap = PCAP_OPS_SG; } else { fd = open_or_die(ctx->device_in, O_RDONLY | O_LARGEFILE | O_NOATIME); } if (__pcap_io->init_once_pcap) __pcap_io->init_once_pcap(); ret = __pcap_io->pull_fhdr_pcap(fd, &ctx->magic, &ctx->link_type); if (ret) panic("Error reading pcap header!\n"); if (__pcap_io->prepare_access_pcap) { ret = __pcap_io->prepare_access_pcap(fd, PCAP_MODE_RD, ctx->jumbo); if (ret) panic("Error prepare reading pcap!\n"); } fmemset(&tx_ring, 0, sizeof(tx_ring)); fmemset(&bpf_ops, 0, sizeof(bpf_ops)); if (ctx->rfraw) { ctx->device_trans = xstrdup(ctx->device_out); xfree(ctx->device_out); enter_rfmon_mac80211(ctx->device_trans, &ctx->device_out); if (ctx->link_type != LINKTYPE_IEEE802_11) panic("Wrong linktype of pcap!\n"); } ifindex = device_ifindex(ctx->device_out); size = ring_size(ctx->device_out, ctx->reserve_size); bpf_parse_rules(ctx->filter, &bpf_ops, ctx->link_type); if (ctx->dump_bpf) bpf_dump_all(&bpf_ops); set_packet_loss_discard(tx_sock); setup_tx_ring_layout(tx_sock, &tx_ring, size, ctx->jumbo); create_tx_ring(tx_sock, &tx_ring, ctx->verbose); mmap_tx_ring(tx_sock, &tx_ring); alloc_tx_ring_frames(tx_sock, &tx_ring); bind_tx_ring(tx_sock, &tx_ring, ifindex); dissector_init_all(ctx->print_mode); if (ctx->cpu >= 0 && ifindex > 0) { irq = device_irq_number(ctx->device_out); device_set_irq_affinity(irq, ctx->cpu); if (ctx->verbose) printf("IRQ: %s:%d > CPU%d\n", ctx->device_out, irq, ctx->cpu); } if (ctx->kpull) interval = ctx->kpull; set_itimer_interval_value(&itimer, 0, interval); setitimer(ITIMER_REAL, &itimer, NULL); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); printf("Running! Hang up with ^C!\n\n"); fflush(stdout); bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0)) { while (user_may_pull_from_tx(tx_ring.frames[it].iov_base)) { hdr = tx_ring.frames[it].iov_base; out = ((uint8_t *) hdr) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); do { ret = __pcap_io->read_pcap(fd, &phdr, ctx->magic, out, ring_frame_size(&tx_ring)); if (unlikely(ret <= 0)) goto out; if (ring_frame_size(&tx_ring) < pcap_get_length(&phdr, ctx->magic)) { pcap_set_length(&phdr, ctx->magic, ring_frame_size(&tx_ring)); trunced++; } } while (ctx->filter && !bpf_run_filter(&bpf_ops, out, pcap_get_length(&phdr, ctx->magic))); pcap_pkthdr_to_tpacket_hdr(&phdr, ctx->magic, &hdr->tp_h, &hdr->s_ll); ctx->tx_bytes += hdr->tp_h.tp_len;; ctx->tx_packets++; show_frame_hdr(hdr, ctx->print_mode); dissector_entry_point(out, hdr->tp_h.tp_snaplen, ctx->link_type, ctx->print_mode); kernel_may_pull_from_tx(&hdr->tp_h); it++; if (it >= tx_ring.layout.tp_frame_nr) it = 0; if (unlikely(sigint == 1)) break; if (frame_count_max != 0) { if (ctx->tx_packets >= frame_count_max) { sigint = 1; break; } } } } out: bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); timer_purge(); bpf_release(&bpf_ops); dissector_cleanup_all(); destroy_tx_ring(tx_sock, &tx_ring); if (ctx->rfraw) leave_rfmon_mac80211(ctx->device_trans, ctx->device_out); if (__pcap_io->prepare_close_pcap) __pcap_io->prepare_close_pcap(fd, PCAP_MODE_RD); if (!strncmp("-", ctx->device_in, strlen("-"))) dup2(fd, fileno(stdin)); close(fd); close(tx_sock); fflush(stdout); printf("\n"); printf("\r%12lu packets outgoing\n", ctx->tx_packets); printf("\r%12lu packets truncated in file\n", trunced); printf("\r%12lu bytes outgoing\n", ctx->tx_bytes); printf("\r%12lu sec, %lu usec in total\n", diff.tv_sec, diff.tv_usec); }
static void timer_next_dump(int unused __maybe_unused) { set_itimer_interval_value(&itimer, interval, 0); next_dump = true; setitimer(ITIMER_REAL, &itimer, NULL); }
static void xmit_slowpath_or_die(struct ctx *ctx, int cpu, unsigned long orig_num) { int ret, icmp_sock = -1; unsigned long num = 1, i = 0; struct timeval start, end, diff; unsigned long long tx_bytes = 0, tx_packets = 0; struct packet_dyn *pktd; struct sockaddr_ll saddr = { .sll_family = PF_PACKET, .sll_halen = ETH_ALEN, .sll_ifindex = device_ifindex(ctx->device), }; if (ctx->num > 0) num = ctx->num; if (ctx->num == 0 && orig_num > 0) num = 0; if (ctx->smoke_test) icmp_sock = xmit_smoke_setup(ctx); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0) && likely(num > 0) && likely(plen > 0)) { pktd = &packet_dyn[i]; if (pktd->clen + pktd->rlen + pktd->slen) { apply_counter(i); apply_randomizer(i); apply_csum16(i); } retry: ret = sendto(sock, packets[i].payload, packets[i].len, 0, (struct sockaddr *) &saddr, sizeof(saddr)); if (unlikely(ret < 0)) { if (errno == ENOBUFS) { sched_yield(); goto retry; } panic("Sendto error: %s!\n", strerror(errno)); } tx_bytes += packets[i].len; tx_packets++; if (ctx->smoke_test) { ret = xmit_smoke_probe(icmp_sock, ctx); if (unlikely(ret < 0)) { printf("%sSmoke test alert:%s\n", colorize_start(bold), colorize_end()); printf(" Remote host seems to be unresponsive to ICMP probes!\n"); printf(" Last instance was packet%lu, seed:%u, trafgen snippet:\n\n", i, seed); dump_trafgen_snippet(packets[i].payload, packets[i].len); break; } } if (!ctx->rand) { i++; if (i >= plen) i = 0; } else i = rand() % plen; if (ctx->num > 0) num--; if (ctx->gap > 0) usleep(ctx->gap); } bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); if (ctx->smoke_test) close(icmp_sock); stats[cpu].tx_packets = tx_packets; stats[cpu].tx_bytes = tx_bytes; stats[cpu].tv_sec = diff.tv_sec; stats[cpu].tv_usec = diff.tv_usec; stats[cpu].state |= CPU_STATS_STATE_RES; } static void xmit_fastpath_or_die(struct ctx *ctx, int cpu, unsigned long orig_num) { int ifindex = device_ifindex(ctx->device); uint8_t *out = NULL; unsigned int it = 0; unsigned long num = 1, i = 0, size; struct ring tx_ring; struct frame_map *hdr; struct timeval start, end, diff; struct packet_dyn *pktd; unsigned long long tx_bytes = 0, tx_packets = 0; fmemset(&tx_ring, 0, sizeof(tx_ring)); size = ring_size(ctx->device, ctx->reserve_size); set_sock_prio(sock, 512); set_packet_loss_discard(sock); setup_tx_ring_layout(sock, &tx_ring, size, ctx->jumbo_support); create_tx_ring(sock, &tx_ring, ctx->verbose); mmap_tx_ring(sock, &tx_ring); alloc_tx_ring_frames(sock, &tx_ring); bind_tx_ring(sock, &tx_ring, ifindex); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); if (ctx->kpull) interval = ctx->kpull; if (ctx->num > 0) num = ctx->num; if (ctx->num == 0 && orig_num > 0) num = 0; set_itimer_interval_value(&itimer, 0, interval); setitimer(ITIMER_REAL, &itimer, NULL); bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0) && likely(num > 0) && likely(plen > 0)) { while (user_may_pull_from_tx(tx_ring.frames[it].iov_base) && likely(num > 0)) { hdr = tx_ring.frames[it].iov_base; out = ((uint8_t *) hdr) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); hdr->tp_h.tp_snaplen = packets[i].len; hdr->tp_h.tp_len = packets[i].len; pktd = &packet_dyn[i]; if (pktd->clen + pktd->rlen + pktd->slen) { apply_counter(i); apply_randomizer(i); apply_csum16(i); } fmemcpy(out, packets[i].payload, packets[i].len); tx_bytes += packets[i].len; tx_packets++; if (!ctx->rand) { i++; if (i >= plen) i = 0; } else i = rand() % plen; kernel_may_pull_from_tx(&hdr->tp_h); it++; if (it >= tx_ring.layout.tp_frame_nr) it = 0; if (ctx->num > 0) num--; if (unlikely(sigint == 1)) break; } } bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); timer_purge(); destroy_tx_ring(sock, &tx_ring); stats[cpu].tx_packets = tx_packets; stats[cpu].tx_bytes = tx_bytes; stats[cpu].tv_sec = diff.tv_sec; stats[cpu].tv_usec = diff.tv_usec; stats[cpu].state |= CPU_STATS_STATE_RES; }