// one sender thread int send_run(void) { log_debug("send", "thread started"); pthread_mutex_lock(&send_mutex); int sock = get_socket(); struct sockaddr_ll sockaddr; // get source interface index struct ifreq if_idx; memset(&if_idx, 0, sizeof(struct ifreq)); if (strlen(zconf.iface) >= IFNAMSIZ) { log_error("send", "device interface name (%s) too long\n", zconf.iface); return -1; } strncpy(if_idx.ifr_name, zconf.iface, IFNAMSIZ-1); if (ioctl(sock, SIOCGIFINDEX, &if_idx) < 0) { perror("SIOCGIFINDEX"); return -1; } int ifindex = if_idx.ifr_ifindex; // get source interface mac struct ifreq if_mac; memset(&if_mac, 0, sizeof(struct ifreq)); strncpy(if_mac.ifr_name, zconf.iface, IFNAMSIZ-1); if (ioctl(sock, SIOCGIFHWADDR, &if_mac) < 0) { perror("SIOCGIFHWADDR"); return -1; } // find source IP address associated with the dev from which we're sending. // while we won't use this address for sending packets, we need the address // to set certain socket options and it's easiest to just use the primary // address the OS believes is associated. struct ifreq if_ip; memset(&if_ip, 0, sizeof(struct ifreq)); strncpy(if_ip.ifr_name, zconf.iface, IFNAMSIZ-1); if (ioctl(sock, SIOCGIFADDR, &if_ip) < 0) { perror("SIOCGIFADDR"); return -1; } // destination address for the socket memset((void*) &sockaddr, 0, sizeof(struct sockaddr_ll)); sockaddr.sll_ifindex = ifindex; sockaddr.sll_halen = ETH_ALEN; memcpy(sockaddr.sll_addr, zconf.gw_mac, ETH_ALEN); char buf[MAX_PACKET_SIZE]; memset(buf, 0, MAX_PACKET_SIZE); zconf.probe_module->thread_initialize(buf, (unsigned char *)if_mac.ifr_hwaddr.sa_data, zconf.gw_mac, zconf.target_port); pthread_mutex_unlock(&send_mutex); // adaptive timing to hit target rate uint32_t count = 0; uint32_t last_count = count; double last_time = now(); uint32_t delay = 0; int interval = 0; volatile int vi; if (zconf.rate > 0) { // estimate initial rate delay = 10000; for (vi = delay; vi--; ) ; delay *= 1 / (now() - last_time) / (zconf.rate / zconf.senders); interval = (zconf.rate / zconf.senders) / 20; last_time = now(); } while (1) { // adaptive timing delay if (delay > 0) { count++; for (vi = delay; vi--; ) ; if (!interval || (count % interval == 0)) { double t = now(); delay *= (double)(count - last_count) / (t - last_time) / (zconf.rate / zconf.senders); if (delay < 1) delay = 1; last_count = count; last_time = t; } } // generate next ip from cyclic group and update global state // (everything locked happens here) pthread_mutex_lock(&send_mutex); if (zsend.complete) { pthread_mutex_unlock(&send_mutex); break; } if (zsend.sent >= zconf.max_targets) { zsend.complete = 1; zsend.finish = now(); pthread_mutex_unlock(&send_mutex); break; } if (zconf.max_runtime && zconf.max_runtime <= now() - zsend.start) { zsend.complete = 1; zsend.finish = now(); pthread_mutex_unlock(&send_mutex); break; } uint32_t curr; //if has CIDR if(zconf.cidr != '\0'){ //Get next IP and convert to correct format // uint32_t val = zsend.first_scanned++; curr = cidr_get_next_ip(); if (zsend.last_to_scan == 1) { zsend.complete = 1; zsend.finish = now(); } } else{ curr = cyclic_get_next_ip(); if (curr == zsend.first_scanned) { zsend.complete = 1; zsend.finish = now(); } } zsend.sent++; pthread_mutex_unlock(&send_mutex); for (int i=0; i < zconf.packet_streams; i++) { uint32_t src_ip = get_src_ip(curr, i); uint32_t validation[VALIDATE_BYTES/sizeof(uint32_t)]; validate_gen(src_ip, curr, (uint8_t *)validation); zconf.probe_module->make_packet(buf, src_ip, curr, validation, i); if (zconf.dryrun) { zconf.probe_module->print_packet(stdout, buf); } else { int l = zconf.probe_module->packet_length; int rc = sendto(sock, buf, l, 0, (struct sockaddr *)&sockaddr, sizeof(struct sockaddr_ll)); if (rc < 0) { struct in_addr addr; addr.s_addr = curr; log_debug("send", "sendto failed for %s. %s", inet_ntoa(addr), strerror(errno)); pthread_mutex_lock(&send_mutex); zsend.sendto_failures++; pthread_mutex_unlock(&send_mutex); } } } } log_debug("send", "thread finished"); return EXIT_SUCCESS; }
int send_run(int sock) #endif { log_debug("send", "thread started"); pthread_mutex_lock(&send_mutex); #ifdef ZMAP_PCAP_INJECT /* Using pcap, mirror the linux SOCK_RAW behaviour as closely as possible */ unsigned char mac[ETHER_ADDR_LEN]; struct in_addr src_ip = {0}; //pcap_t *pc = get_pcap_t(); /* We don't need the index; we have a pcap handle to the proper interface */ get_hwaddr(mac); get_ipaddr(&src_ip); #else //int sock = get_socket(); struct sockaddr_ll sockaddr; // get source interface index struct ifreq if_idx; memset(&if_idx, 0, sizeof(struct ifreq)); if (strlen(zconf.iface) >= IFNAMSIZ) { log_error("send", "device interface name (%s) too long\n", zconf.iface); return -1; } strncpy(if_idx.ifr_name, zconf.iface, IFNAMSIZ-2); if (ioctl(sock, SIOCGIFINDEX, &if_idx) < 0) { perror("SIOCGIFINDEX"); return -1; } int ifindex = if_idx.ifr_ifindex; // get source interface mac struct ifreq if_mac; memset(&if_mac, 0, sizeof(struct ifreq)); strncpy(if_mac.ifr_name, zconf.iface, IFNAMSIZ-1); if (ioctl(sock, SIOCGIFHWADDR, &if_mac) < 0) { perror("SIOCGIFHWADDR"); return -1; } // find source IP address associated with the dev from which we're sending. // while we won't use this address for sending packets, we need the address // to set certain socket options and it's easiest to just use the primary // address the OS believes is associated. struct ifreq if_ip; memset(&if_ip, 0, sizeof(struct ifreq)); strncpy(if_ip.ifr_name, zconf.iface, IFNAMSIZ-1); if (ioctl(sock, SIOCGIFADDR, &if_ip) < 0) { perror("SIOCGIFADDR"); return -1; } // wbk TODO: gateway MAC. // destination address for the socket memset((void*) &sockaddr, 0, sizeof(struct sockaddr_ll)); sockaddr.sll_ifindex = ifindex; sockaddr.sll_halen = ETH_ALEN; memcpy(sockaddr.sll_addr, zconf.gw_mac, ETH_ALEN); #endif /* not ZMAP_PCAP_INJECT */ /* may move down... TODO wbk */ char buf[MAX_PACKET_SIZE]; memset(buf, 0, MAX_PACKET_SIZE); zconf.probe_module->thread_initialize(buf, #ifdef ZMAP_PCAP_INJECT mac, #else (unsigned char *)if_mac.ifr_hwaddr.sa_data, #endif zconf.gw_mac, zconf.target_port); pthread_mutex_unlock(&send_mutex); // adaptive timing to hit target rate uint32_t count = 0; uint32_t last_count = count; double last_time = now(); uint32_t delay = 0; int interval = 0; volatile int vi; if (zconf.rate > 0) { // estimate initial rate delay = 10000; for (vi = delay; vi--; ) ; delay *= 1 / (now() - last_time) / (zconf.rate / zconf.senders); interval = (zconf.rate / zconf.senders) / 20; last_time = now(); } while (1) { // adaptive timing delay if (delay > 0) { count++; for (vi = delay; vi--; ) ; if (!interval || (count % interval == 0)) { double t = now(); delay *= (double)(count - last_count) / (t - last_time) / (zconf.rate / zconf.senders); if (delay < 1) delay = 1; last_count = count; last_time = t; } } // generate next ip from cyclic group and update global state // (everything locked happens here) pthread_mutex_lock(&send_mutex); if (zsend.complete) { pthread_mutex_unlock(&send_mutex); break; } if (zsend.sent >= zconf.max_targets) { zsend.complete = 1; zsend.finish = now(); pthread_mutex_unlock(&send_mutex); break; } if (zconf.max_runtime && zconf.max_runtime <= now() - zsend.start) { zsend.complete = 1; zsend.finish = now(); pthread_mutex_unlock(&send_mutex); break; } uint32_t curr = cyclic_get_next_ip(); if (curr == zsend.first_scanned) { zsend.complete = 1; zsend.finish = now(); } zsend.sent++; pthread_mutex_unlock(&send_mutex); for (int i=0; i < zconf.packet_streams; i++) { uint32_t src_ip = get_src_ip(curr, i); uint32_t validation[VALIDATE_BYTES/sizeof(uint32_t)]; validate_gen(src_ip, curr, (uint8_t *)validation); zconf.probe_module->make_packet(buf, src_ip, curr, validation, i); if (zconf.dryrun) { zconf.probe_module->print_packet(stdout, buf); } else { int l = zconf.probe_module->packet_length; #ifdef ZMAP_PCAP_INJECT int rc = pcap_inject(pc, buf, (size_t)l); if (rc == -1) { struct in_addr addr; addr.s_addr = curr; log_fatal("send", "pcap_inject() failed for %s. %s", /* TODO: make log_debug */ inet_ntoa(addr), strerror(errno)); pthread_mutex_lock(&send_mutex); zsend.sendto_failures++; pthread_mutex_unlock(&send_mutex); } #else /* TODO: error handling can be shared. */ int rc = sendto(sock, buf + zconf.send_ip_pkts*sizeof(struct ethhdr), l, 0, (struct sockaddr *)&sockaddr, sizeof(struct sockaddr_ll)); if (rc < 0) { struct in_addr addr; addr.s_addr = curr; log_debug("send", "sendto failed for %s. %s", inet_ntoa(addr), strerror(errno)); pthread_mutex_lock(&send_mutex); zsend.sendto_failures++; pthread_mutex_unlock(&send_mutex); } #endif } } } log_debug("send", "thread finished"); return EXIT_SUCCESS; }
// one sender thread int send_run(int sock) { log_trace("send", "send thread started"); pthread_mutex_lock(&send_mutex); // Allocate a buffer to hold the outgoing packet char buf[MAX_PACKET_SIZE]; memset(buf, 0, MAX_PACKET_SIZE); // OS specific per-thread init if (send_run_init(sock)) { return -1; } // Get the source hardware address, and give it to the probe // module if (get_iface_hw_addr(zconf.iface, zconf.hw_mac)) { log_fatal("send", "could not retrieve hardware address for" "interface: %s", zconf.iface); return -1; } char mac_buf[(ETHER_ADDR_LEN * 2) + (ETHER_ADDR_LEN - 1) + 1]; char *p = mac_buf; for(int i=0; i < ETHER_ADDR_LEN; i++) { if (i == ETHER_ADDR_LEN-1) { snprintf(p, 3, "%.2x", zconf.hw_mac[i]); p += 2; } else { snprintf(p, 4, "%.2x:", zconf.hw_mac[i]); p += 3; } } log_debug("send", "source MAC address %s", mac_buf); zconf.probe_module->thread_initialize(buf, zconf.hw_mac, zconf.gw_mac, zconf.target_port); pthread_mutex_unlock(&send_mutex); // adaptive timing to hit target rate uint32_t count = 0; uint32_t last_count = count; double last_time = now(); uint32_t delay = 0; int interval = 0; volatile int vi; if (zconf.rate > 0) { // estimate initial rate delay = 10000; for (vi = delay; vi--; ) ; delay *= 1 / (now() - last_time) / (zconf.rate / zconf.senders); interval = (zconf.rate / zconf.senders) / 20; last_time = now(); } while (1) { // adaptive timing delay if (delay > 0) { count++; for (vi = delay; vi--; ) ; if (!interval || (count % interval == 0)) { double t = now(); delay *= (double)(count - last_count) / (t - last_time) / (zconf.rate / zconf.senders); if (delay < 1) delay = 1; last_count = count; last_time = t; } } // generate next ip from cyclic group and update global state // (everything locked happens here) pthread_mutex_lock(&send_mutex); if (zsend.complete) { pthread_mutex_unlock(&send_mutex); break; } if (zsend.sent >= zconf.max_targets) { zsend.complete = 1; zsend.finish = now(); pthread_mutex_unlock(&send_mutex); break; } if (zconf.max_runtime && zconf.max_runtime <= now() - zsend.start) { zsend.complete = 1; zsend.finish = now(); pthread_mutex_unlock(&send_mutex); break; } uint32_t curr = cyclic_get_next_ip(c); if (curr == zsend.first_scanned) { zsend.complete = 1; zsend.finish = now(); } zsend.sent++; pthread_mutex_unlock(&send_mutex); for (int i=0; i < zconf.packet_streams; i++) { uint32_t src_ip = get_src_ip(curr, i); uint32_t validation[VALIDATE_BYTES/sizeof(uint32_t)]; validate_gen(src_ip, curr, (uint8_t *)validation); zconf.probe_module->make_packet(buf, src_ip, curr, validation, i); if (zconf.dryrun) { zconf.probe_module->print_packet(stdout, buf); } else { int length = zconf.probe_module->packet_length; void *contents = buf + zconf.send_ip_pkts*sizeof(struct ether_header); int rc = send_packet(sock, contents, length); if (rc < 0) { struct in_addr addr; addr.s_addr = curr; log_debug("send", "send_packet failed for %s. %s", inet_ntoa(addr), strerror(errno)); pthread_mutex_lock(&send_mutex); zsend.sendto_failures++; pthread_mutex_unlock(&send_mutex); } } } } cyclic_free(c); log_debug("send", "thread finished"); return EXIT_SUCCESS; }