static void send_data(struct asus_oled_dev *odev) { size_t packet_num = odev->buf_size / ASUS_OLED_PACKET_BUF_SIZE; struct asus_oled_packet *packet; packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL); if (!packet) { dev_err(&odev->udev->dev, "out of memory\n"); return; } if (odev->pack_mode == PACK_MODE_G1) { /* When sending roll-mode data the display updated only first packet. I have no idea why, but when static picture is sent just before rolling picture everything works fine. */ if (odev->pic_mode == ASUS_OLED_ROLL) send_packets(odev->udev, packet, odev->buf, ASUS_OLED_STATIC, 2); /* Only ROLL mode can use more than 2 packets.*/ if (odev->pic_mode != ASUS_OLED_ROLL && packet_num > 2) packet_num = 2; send_packets(odev->udev, packet, odev->buf, odev->pic_mode, packet_num); } else if (odev->pack_mode == PACK_MODE_G50) { send_packets_g50(odev->udev, packet, odev->buf); } kfree(packet); }
void Graph::start_animation() { if (!started) { started = true; paused = false; timer = new QTimer(this); connect(timer, SIGNAL(timeout()), this, SLOT(send_packets())); timer->singleShot(0,this,SLOT(send_packets())); timer->start(time); } }
int main(int argc, char *argv[]) { if (argc < 4) { fprintf(stderr,"%s", USAGE); exit(0); } server_t = gethostbyname(argv[2]); server = gethostbyname(argv[2]); if (server == NULL) { fprintf(stderr,"ERROR, no such host\n"); exit(0); } if (server_t == NULL) { fprintf(stderr,"ERROR, no such host\n"); exit(0); } filename = (char *) malloc(FILENAME_SIZE); strcpy(filename,argv[1]); portno = atoi(argv[3]); makesocket(); mapfile(); //send_file_info(); send_file_info_tcp(); if((error=pthread_create(&resend_thread,NULL,resend_packet,NULL))){ fprintf(stderr, "error in creating pthread: %s\n",strerror(error)); exit(1); } packet packet1; memset(packet1.payload,'\0',PAYLOAD_SIZE+1); if((filesize % PAYLOAD_SIZE) != 0) no_of_packets = (filesize/PAYLOAD_SIZE)+1; else no_of_packets = (filesize/PAYLOAD_SIZE); if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" );} while(seqNum < no_of_packets){ packet1.seq_num = seqNum; if((seqNum == (no_of_packets-1)) && ((filesize % PAYLOAD_SIZE) != 0)){ memcpy(packet1.payload,data+off,(filesize % PAYLOAD_SIZE)); } else{ memcpy(packet1.payload,data+off,PAYLOAD_SIZE); } seqNum++; memcpy(packet1.payload,data+off,PAYLOAD_SIZE); off = off + PAYLOAD_SIZE; send_packets(packet1); } pthread_join(resend_thread,NULL); munmap(data, filesize); close(fp); return 1; }
bool update(const char * field, std::vector<uint8_t> data) { if (!strcmp(field, "dlpkt")) { return delete_packet(data); } else if (!strcmp(field, "dlcha")) { return delete_channel(data); } else if (!strcmp(field, "pnum")) { return set_pnumber(data); } else if (!strcmp(field, "chan")) { return set_channel(data); } else if (!strcmp(field, "pol")) { set_poll(data); } else if (!strcmp(field, "dir")) { const char * dirStr = byteVec2cstr(data); if (!strcmp(dirStr, "pos")) set_direction(DIR_POS); else if (!strcmp(dirStr, "neg")) set_direction(DIR_NEG); else return false; } else if (!strcmp(field, "data")) { return set_current(data); } else if (!strcmp(field, "wait")) { set_delay(data); } else if (!strcmp(field, "send")) { flag_return = true; send_packets(); } else if (!strcmp(field, "reset")) { send_global_reset(); } else if (!strcmp(field, "glob")) { set_global(); } else if (!strcmp(field, "conn")) { flag_return = true; return connect_serial(); } else if (!strcmp(field, "exit")) { return exit(); } else if (!strcmp(field, "clrpks")) { clear_packets(); } else if (!strcmp(field, "prev")) { if (debug_) preview_packets(); preview_packet_bytes(); flag_return = true; } else { return false; } return true; }
/** * Send UDP packets */ void Client::send_udp_packets(const char *ip_addr, int port, int n_pkts, int port_ack) { int sock, s_ipc, sock_ack; struct sockaddr_in addr; struct sockaddr_un s_addr; struct sockaddr_in addr_ack; // use same port as sending for receiving ack setup_udp_client(&sock, &addr, ip_addr, port); setup_ipc_client(&s_ipc, &s_addr, ip_addr, port); setup_ack_server(&sock_ack, &addr_ack, port_ack); send_packets(sock, addr, n_pkts, sock_ack); send_msg_finish(s_ipc); recv_msg_info(s_ipc); close(sock); close(s_ipc); close(sock_ack); }
/* * * Function: main() * */ int main(int argc, char *argv[]) { struct icmp6_info icmp6_data; int background = 0; debug = 0; program_name = strdup(argv[0]); memset(&icmp6_data, '\0', sizeof(struct icmp6_info)); parse_options(argc, argv, &icmp6_data, &background); if (background) /* Work in the background */ if (daemon(0, 0) < 0) fatal_error("daemon()"); send_packets(&icmp6_data); exit(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { if (argc < 5) { fprintf(stderr, "usage: %s dev queue kthread num\n", argv[0]); return -1; } const char *dev = argv[1]; int queue = atoi(argv[2]); int kthread = atoi(argv[3]); unsigned long long num = atoll(argv[4]); pfq_t * q= pfq_open(64, 1024, 1024); if (pfq_bind_tx(q, dev, queue, kthread) < 0) { fprintf(stderr, "%s\n", pfq_error(q)); return -1; } pfq_enable(q); if (kthread == -1) { send_packets(q, num); } else { send_packets_async(q, num); } sleep(2); struct pfq_stats stat; pfq_get_stats(q, &stat); fprintf(stdout, "sent: %lu - disc: %lu\n", stat.sent, stat.disc); pfq_close(q); return 0; }
void* resend_packet(void* a) { while(1){ int n,seq,size=PAYLOAD_SIZE; n = recvfrom(sockfd,&seq,sizeof(int),0,(struct sockaddr *)&serv_addr,&fromlen); if (n < 0) errorMsg("recvfrom"); if(seq == -1){ printf("Entire file transmitted\n"); if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" );} time_e = (stop.tv_sec - start.tv_sec)+ (double)(stop.tv_nsec - start.tv_nsec)/1e9; printf("TIME FOR DATA TRANSMISSION: %f \n", time_e); pthread_exit(0); } if((seq == (no_of_packets-1)) && (0 != filesize % PAYLOAD_SIZE)) size = filesize % PAYLOAD_SIZE; packet packet2; memset(packet2.payload,'\0',PAYLOAD_SIZE+1); packet2.seq_num = seq; memcpy(packet2.payload,data+(seq*PAYLOAD_SIZE),size); send_packets(packet2); } }
void handle_send_event(pings_params *pings) { if(pings->sent_all_flag == PINGS_FALSE){ send_packets(pings); if(pings->send_error){ return; } } if(pings->sent_all_flag && pings->resend_flag \ && pings->resend_num < PINGS_RESEND_NUMBER){ resend_packets(pings); if(pings->send_error){ return; } if(pings->resent_probe_ip_num == pings->dest_ip_num){ if(pings->is_resent == PINGS_TRUE){ pings->resend_num++; pings->resent_probe_ip_num = 0; pings->is_resent = PINGS_FALSE; } else{ pings->resend_num = PINGS_RESEND_NUMBER; } } pings->resend_flag = PINGS_FALSE; } pings->event.events = EPOLLIN | EPOLLET; epoll_ctl(pings->epoll_fd, EPOLL_CTL_MOD, pings->sock_fd, &(pings->event)); }
bool connectToEPuck(const bdaddr_t *ba) { // set the connection parameters (who to connect to) struct sockaddr_rc addr; addr.rc_family = AF_BLUETOOTH; addr.rc_channel = (uint8_t) 1; addr.rc_bdaddr = *ba; //memcpy(addr.rc_bdaddr, ba, sizeof(bdaddr_t)); // allocate a socket int rfcommSock = socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM); // connect to server int status = ::connect(rfcommSock, (struct sockaddr *)&addr, sizeof(addr)); if (status == 0) { unsigned trial = 0; while (true) { if (send_packets(rfcommSock)) break; else std::cerr << "Upload failed (" << trial << ") . Trying again. Press CTRL-C to quit" << std::endl; trial++; } } else { std::cerr << "Error, can't connect to rfcomm socket" << std::endl; } close(rfcommSock); return status == 0; }
/** * \brief replay a pcap file out interface(s) * * Internal to tcpreplay. Does the heavy lifting. */ static int replay_file(tcpreplay_t *ctx, int idx) { char *path; pcap_t *pcap = NULL; char ebuf[PCAP_ERRBUF_SIZE]; assert(ctx); assert(ctx->options->sources[idx].type = source_filename); path = ctx->options->sources[idx].filename; /* close stdin if reading from it (needed for some OS's) */ if (strncmp(path, "-", 1) == 0) close(1); /* read from pcap file if we haven't cached things yet */ if (!ctx->options->preload_pcap) { if ((pcap = pcap_open_offline(path, ebuf)) == NULL) { tcpreplay_seterr(ctx, "Error opening pcap file: %s", ebuf); return -1; } ctx->options->file_cache[idx].dlt = pcap_datalink(pcap); #ifdef HAVE_PCAP_SNAPSHOT if (pcap_snapshot(pcap) < 65535) warnx("%s was captured using a snaplen of %d bytes. This may mean you have truncated packets.", path, pcap_snapshot(pcap)); #endif } else { if (!ctx->options->file_cache[idx].cached) { if ((pcap = pcap_open_offline(path, ebuf)) == NULL) { tcpreplay_seterr(ctx, "Error opening pcap file: %s", ebuf); return -1; } ctx->options->file_cache[idx].dlt = pcap_datalink(pcap); } } #if 0 /* * this API is broken right now. This needs to be handled via a pipe or * something else so we can pass the output up to the calling programm */ #ifdef ENABLE_VERBOSE if (ctx->options->verbose) { /* in cache mode, we may not have opened the file */ if (pcap == NULL) if ((pcap = pcap_open_offline(path, ebuf)) == NULL) { tcpreplay_seterr("Error opening pcap file: %s", ebuf); return -1; } ctx->options->file_cache[idx].dlt = pcap_datalink(pcap); /* init tcpdump */ tcpdump_open(ctx->options->tcpdump, pcap); } #endif #endif if (pcap != NULL) { if (ctx->intf1dlt == -1) ctx->intf1dlt = sendpacket_get_dlt(ctx->intf1); #if 0 if ((ctx->intf1dlt >= 0) && (ctx->intf1dlt != pcap_datalink(pcap))) warnx("%s DLT (%s) does not match that of the outbound interface: %s (%s)", path, pcap_datalink_val_to_name(pcap_datalink(pcap)), ctx->options->intf1->device, pcap_datalink_val_to_name(ctx->intf1dlt)); #endif if (ctx->intf1dlt != ctx->options->file_cache[idx].dlt) tcpreplay_setwarn(ctx, "%s DLT (%s) does not match that of the outbound interface: %s (%s)", path, pcap_datalink_val_to_name(pcap_datalink(pcap)), ctx->intf1->device, pcap_datalink_val_to_name(ctx->intf1dlt)); } ctx->stats.active_pcap = ctx->options->sources[idx].filename; send_packets(ctx, pcap, idx); if (pcap != NULL) pcap_close(pcap); #if 0 #ifdef ENABLE_VERBOSE tcpdump_close(ctx->options->tcpdump); #endif #endif return 0; }
int main(int argc, char **argv) { /** * */ int shmid; key_t key = rand(); libnet_t *lnet_handle = NULL; pcap_t *pcap_handle; char lnet_errbuf[LIBNET_ERRBUF_SIZE]; char pcap_errbuf[PCAP_ERRBUF_SIZE]; char *cp, *filter, *dst_str, *src_str, *device = "eth0"; struct bpf_program fp; u_int32_t dst, src; u_short dst_port, src_port; pid_t pid; unsigned int c, amount = 10000; /* 10000 packets sent by default */ useconds_t interval = 10000; /* 10 miliseconds by default */ if (argc < 5) { usage(argv[0]); exit(EXIT_FAILURE); } /** * Getting arguments */ while ((c = getopt(argc, argv, "d:s:t:a:i:")) != EOF) { switch(c) { case 'd': device = malloc(sizeof(char) * strlen(optarg)); strcpy(device, optarg); break; case 's': if (!(cp = strrchr(optarg, ':'))) { usage(argv[0]); exit(EXIT_FAILURE); } *cp++ = 0; src_port = (u_short)atoi(cp); src = libnet_name2addr4(lnet_handle, optarg, LIBNET_RESOLVE); src_str = malloc(sizeof(char) * (strlen(optarg) + 1)); strcpy(src_str, optarg); if (src == -1) { fprintf(stderr, "Bad source (%s).\n", libnet_geterror(lnet_handle)); exit(EXIT_FAILURE); } break; case 't': if (!(cp = strrchr(optarg, ':'))) { usage(argv[0]); exit(EXIT_FAILURE); } *cp++ = 0; dst_port = (u_short)atoi(cp); dst = libnet_name2addr4(lnet_handle, optarg, LIBNET_RESOLVE); dst_str = malloc(sizeof(char) * (strlen(optarg) + 1)); strcpy(dst_str, optarg); if (dst == -1) { fprintf(stderr, "Bad target (%s).\n", libnet_geterror(lnet_handle)); exit(EXIT_FAILURE); } break; case 'i': interval = atoi(optarg); break; case 'a': amount = atoi(optarg); break; } } /** * Initialize libnet */ lnet_handle = libnet_init(LIBNET_RAW4, device, lnet_errbuf); if (lnet_handle == NULL) { fprintf(stderr, "libnet_init() failed: %s.\n", lnet_errbuf); exit(EXIT_FAILURE); } /** * Initialize libpcap */ pcap_handle = pcap_open_live(device, CLNET_ETHER_MAX_LEN, 1, 1000, pcap_errbuf); if (pcap_handle == NULL) { fprintf(stderr, "Couldn't open device %s: %s.\n", device, pcap_errbuf); exit(EXIT_FAILURE); } datalink = pcap_datalink(pcap_handle); if (datalink != DLT_EN10MB && datalink != DLT_LINUX_SLL) { fprintf(stderr, "%s (%d) is not an Ethernet.\n", device, datalink); exit(EXIT_FAILURE); } filter = (char*) malloc(FILTER_SIZE); sprintf(filter, FILTER_FORMAT, dst_port, src_port, dst_str, src_str); if (pcap_compile(pcap_handle, &fp, filter, 0, 0) == -1) { fprintf(stderr, "Couldn't parse filter '%s': %s.\n", filter, pcap_geterr(pcap_handle)); exit(EXIT_FAILURE); } if (pcap_setfilter(pcap_handle, &fp) == -1) { fprintf(stderr, "Couldn't install filter %s: %s.\n", filter, pcap_geterr(pcap_handle)); exit(EXIT_FAILURE); } /** * */ if ((shmid = shmget(key, sizeof(unsigned int), IPC_CREAT | 0666)) < 0) { perror("shmget"); exit(EXIT_FAILURE); } if ((count = shmat(shmid, NULL, 0)) == (unsigned int *) -1) { perror("shmat"); exit(EXIT_FAILURE); } *count = 0; pid = fork(); clnet_initialize(); if (pid == 0) { pcap_loop(pcap_handle, amount, get_tcp_isn, NULL); fflush(stdout); pcap_freecode(&fp); pcap_close(pcap_handle); } else { send_packets(lnet_handle, src, dst, src_port, dst_port, amount, interval); libnet_destroy(lnet_handle); } clnet_finalize(); exit(EXIT_SUCCESS); }
/* * Main packet transmit routine. Transmit packets at a fixed rate for * specified length of time. */ static void *run_thread_tx(void *arg) { test_globals_t *globals; int thr_id; odp_queue_t outq; pkt_tx_stats_t *stats; uint64_t next_tx_cycles, end_cycles, cur_cycles; uint64_t burst_gap_cycles; uint32_t batch_len; int unsent_pkts = 0; odp_event_t tx_event[BATCH_LEN_MAX]; uint64_t idle_start = 0; thread_args_t *targs = arg; batch_len = targs->batch_len; if (batch_len > BATCH_LEN_MAX) batch_len = BATCH_LEN_MAX; thr_id = odp_thread_id(); globals = odp_shm_addr(odp_shm_lookup("test_globals")); stats = &globals->tx_stats[thr_id]; outq = odp_pktio_outq_getdef(globals->pktio_tx); if (outq == ODP_QUEUE_INVALID) LOG_ABORT("Failed to get output queue for thread %d\n", thr_id); burst_gap_cycles = odp_time_ns_to_cycles( (ODP_TIME_SEC * 999) / (1000 * targs->pps / (targs->batch_len))); odp_barrier_wait(&globals->tx_barrier); cur_cycles = odp_time_cycles(); next_tx_cycles = cur_cycles; end_cycles = cur_cycles + odp_time_ns_to_cycles(targs->duration * ODP_TIME_SEC); while (cur_cycles < end_cycles) { unsigned alloc_cnt = 0, tx_cnt; if (cur_cycles < next_tx_cycles) { cur_cycles = odp_time_cycles(); if (idle_start == 0) idle_start = cur_cycles; continue; } if (idle_start) { stats->s.idle_cycles += odp_time_diff_cycles( idle_start, cur_cycles); idle_start = 0; } next_tx_cycles += burst_gap_cycles; alloc_cnt = alloc_packets(tx_event, batch_len - unsent_pkts); if (alloc_cnt != batch_len) stats->s.alloc_failures++; tx_cnt = send_packets(outq, tx_event, alloc_cnt); unsent_pkts = alloc_cnt - tx_cnt; stats->s.enq_failures += unsent_pkts; stats->s.tx_cnt += tx_cnt; cur_cycles = odp_time_cycles(); } VPRINT(" %02d: TxPkts %-8"PRIu64" EnqFail %-6"PRIu64 " AllocFail %-6"PRIu64" Idle %"PRIu64"ms\n", thr_id, stats->s.tx_cnt, stats->s.enq_failures, stats->s.alloc_failures, odp_time_cycles_to_ns(stats->s.idle_cycles)/1000/1000); return NULL; }
static void * pinger_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLIN }; struct netmap_if *nifp = targ->nmd->nifp; int i, rx = 0, n = targ->g->npackets; void *frame; int size; uint32_t sent = 0; struct timespec ts, now, last_print; uint32_t count = 0, min = 1000000000, av = 0; frame = &targ->pkt; frame += sizeof(targ->pkt.vh) - targ->g->virt_header; size = targ->g->pkt_size + targ->g->virt_header; if (targ->g->nthreads > 1) { D("can only ping with 1 thread"); return NULL; } clock_gettime(CLOCK_REALTIME_PRECISE, &last_print); now = last_print; while (n == 0 || (int)sent < n) { struct netmap_ring *ring = NETMAP_TXRING(nifp, 0); struct netmap_slot *slot; char *p; for (i = 0; i < 1; i++) { /* XXX why the loop for 1 pkt ? */ slot = &ring->slot[ring->cur]; slot->len = size; p = NETMAP_BUF(ring, slot->buf_idx); if (nm_ring_empty(ring)) { D("-- ouch, cannot send"); } else { struct tstamp *tp; nm_pkt_copy(frame, p, size); clock_gettime(CLOCK_REALTIME_PRECISE, &ts); bcopy(&sent, p+42, sizeof(sent)); tp = (struct tstamp *)(p+46); tp->sec = (uint32_t)ts.tv_sec; tp->nsec = (uint32_t)ts.tv_nsec; sent++; ring->head = ring->cur = nm_ring_next(ring, ring->cur); } } /* should use a parameter to decide how often to send */ if (poll(&pfd, 1, 3000) <= 0) { D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); continue; } /* see what we got back */ for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) { ring = NETMAP_RXRING(nifp, i); while (!nm_ring_empty(ring)) { uint32_t seq; struct tstamp *tp; slot = &ring->slot[ring->cur]; p = NETMAP_BUF(ring, slot->buf_idx); clock_gettime(CLOCK_REALTIME_PRECISE, &now); bcopy(p+42, &seq, sizeof(seq)); tp = (struct tstamp *)(p+46); ts.tv_sec = (time_t)tp->sec; ts.tv_nsec = (long)tp->nsec; ts.tv_sec = now.tv_sec - ts.tv_sec; ts.tv_nsec = now.tv_nsec - ts.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_nsec += 1000000000; ts.tv_sec--; } if (1) D("seq %d/%d delta %d.%09d", seq, sent, (int)ts.tv_sec, (int)ts.tv_nsec); if (ts.tv_nsec < (int)min) min = ts.tv_nsec; count ++; av += ts.tv_nsec; ring->head = ring->cur = nm_ring_next(ring, ring->cur); rx++; } } //D("tx %d rx %d", sent, rx); //usleep(100000); ts.tv_sec = now.tv_sec - last_print.tv_sec; ts.tv_nsec = now.tv_nsec - last_print.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_nsec += 1000000000; ts.tv_sec--; } if (ts.tv_sec >= 1) { D("count %d min %d av %d", count, min, av/count); count = 0; av = 0; min = 100000000; last_print = now; } } return NULL; } /* * reply to ping requests */ static void * ponger_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLIN }; struct netmap_if *nifp = targ->nmd->nifp; struct netmap_ring *txring, *rxring; int i, rx = 0, sent = 0, n = targ->g->npackets; if (targ->g->nthreads > 1) { D("can only reply ping with 1 thread"); return NULL; } D("understood ponger %d but don't know how to do it", n); while (n == 0 || sent < n) { uint32_t txcur, txavail; //#define BUSYWAIT #ifdef BUSYWAIT ioctl(pfd.fd, NIOCRXSYNC, NULL); #else if (poll(&pfd, 1, 1000) <= 0) { D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); continue; } #endif txring = NETMAP_TXRING(nifp, 0); txcur = txring->cur; txavail = nm_ring_space(txring); /* see what we got back */ for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) { rxring = NETMAP_RXRING(nifp, i); while (!nm_ring_empty(rxring)) { uint16_t *spkt, *dpkt; uint32_t cur = rxring->cur; struct netmap_slot *slot = &rxring->slot[cur]; char *src, *dst; src = NETMAP_BUF(rxring, slot->buf_idx); //D("got pkt %p of size %d", src, slot->len); rxring->head = rxring->cur = nm_ring_next(rxring, cur); rx++; if (txavail == 0) continue; dst = NETMAP_BUF(txring, txring->slot[txcur].buf_idx); /* copy... */ dpkt = (uint16_t *)dst; spkt = (uint16_t *)src; nm_pkt_copy(src, dst, slot->len); dpkt[0] = spkt[3]; dpkt[1] = spkt[4]; dpkt[2] = spkt[5]; dpkt[3] = spkt[0]; dpkt[4] = spkt[1]; dpkt[5] = spkt[2]; txring->slot[txcur].len = slot->len; /* XXX swap src dst mac */ txcur = nm_ring_next(txring, txcur); txavail--; sent++; } } txring->head = txring->cur = txcur; targ->count = sent; #ifdef BUSYWAIT ioctl(pfd.fd, NIOCTXSYNC, NULL); #endif //D("tx %d rx %d", sent, rx); } return NULL; } static __inline int timespec_ge(const struct timespec *a, const struct timespec *b) { if (a->tv_sec > b->tv_sec) return (1); if (a->tv_sec < b->tv_sec) return (0); if (a->tv_nsec >= b->tv_nsec) return (1); return (0); } static __inline struct timespec timeval2spec(const struct timeval *a) { struct timespec ts = { .tv_sec = a->tv_sec, .tv_nsec = a->tv_usec * 1000 }; return ts; } static __inline struct timeval timespec2val(const struct timespec *a) { struct timeval tv = { .tv_sec = a->tv_sec, .tv_usec = a->tv_nsec / 1000 }; return tv; } static __inline struct timespec timespec_add(struct timespec a, struct timespec b) { struct timespec ret = { a.tv_sec + b.tv_sec, a.tv_nsec + b.tv_nsec }; if (ret.tv_nsec >= 1000000000) { ret.tv_sec++; ret.tv_nsec -= 1000000000; } return ret; } static __inline struct timespec timespec_sub(struct timespec a, struct timespec b) { struct timespec ret = { a.tv_sec - b.tv_sec, a.tv_nsec - b.tv_nsec }; if (ret.tv_nsec < 0) { ret.tv_sec--; ret.tv_nsec += 1000000000; } return ret; } /* * wait until ts, either busy or sleeping if more than 1ms. * Return wakeup time. */ static struct timespec wait_time(struct timespec ts) { for (;;) { struct timespec w, cur; clock_gettime(CLOCK_REALTIME_PRECISE, &cur); w = timespec_sub(ts, cur); if (w.tv_sec < 0) return cur; else if (w.tv_sec > 0 || w.tv_nsec > 1000000) poll(NULL, 0, 1); } } static void * sender_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLOUT }; struct netmap_if *nifp; struct netmap_ring *txring; int i, n = targ->g->npackets / targ->g->nthreads; int64_t sent = 0; int options = targ->g->options | OPT_COPY; struct timespec nexttime = { 0, 0}; // XXX silence compiler int rate_limit = targ->g->tx_rate; struct pkt *pkt = &targ->pkt; void *frame; int size; if (targ->frame == NULL) { frame = pkt; frame += sizeof(pkt->vh) - targ->g->virt_header; size = targ->g->pkt_size + targ->g->virt_header; } else { frame = targ->frame; size = targ->g->pkt_size; } D("start, fd %d main_fd %d", targ->fd, targ->g->main_fd); if (setaffinity(targ->thread, targ->affinity)) goto quit; /* main loop.*/ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (rate_limit) { targ->tic = timespec_add(targ->tic, (struct timespec){2,0}); targ->tic.tv_nsec = 0; wait_time(targ->tic); nexttime = targ->tic; } if (targ->g->dev_type == DEV_TAP) { D("writing to file desc %d", targ->g->main_fd); for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (write(targ->g->main_fd, frame, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } #ifndef NO_PCAP } else if (targ->g->dev_type == DEV_PCAP) { pcap_t *p = targ->g->p; for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (pcap_inject(p, frame, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } #endif /* NO_PCAP */ } else { int tosend = 0; int frags = targ->g->frags; nifp = targ->nmd->nifp; while (!targ->cancel && (n == 0 || sent < n)) { if (rate_limit && tosend <= 0) { tosend = targ->g->burst; nexttime = timespec_add(nexttime, targ->g->tx_period); wait_time(nexttime); } /* * wait for available room in the send queue(s) */ if (poll(&pfd, 1, 2000) <= 0) { if (targ->cancel) break; D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); // goto quit; } if (pfd.revents & POLLERR) { D("poll error"); goto quit; } /* * scan our queues and send on those with room */ if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) { D("drop copy"); options &= ~OPT_COPY; } for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) { int m, limit = rate_limit ? tosend : targ->g->burst; if (n > 0 && n - sent < limit) limit = n - sent; txring = NETMAP_TXRING(nifp, i); if (nm_ring_empty(txring)) continue; if (frags > 1) limit = ((limit + frags - 1) / frags) * frags; m = send_packets(txring, pkt, frame, size, targ->g, limit, options, frags); ND("limit %d tail %d frags %d m %d", limit, txring->tail, frags, m); sent += m; targ->count = sent; if (rate_limit) { tosend -= m; if (tosend <= 0) break; } } } /* flush any remaining packets */ D("flush tail %d head %d on thread %p", txring->tail, txring->head, pthread_self()); ioctl(pfd.fd, NIOCTXSYNC, NULL); /* final part: wait all the TX queues to be empty. */ for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) { txring = NETMAP_TXRING(nifp, i); while (nm_tx_pending(txring)) { RD(5, "pending tx tail %d head %d on ring %d", txring->tail, txring->head, i); ioctl(pfd.fd, NIOCTXSYNC, NULL); usleep(1); /* wait 1 tick */ } } } /* end DEV_NETMAP */ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->completed = 1; targ->count = sent; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); } #ifndef NO_PCAP static void receive_pcap(u_char *user, const struct pcap_pkthdr * h, const u_char * bytes) { int *count = (int *)user; (void)h; /* UNUSED */ (void)bytes; /* UNUSED */ (*count)++; } #endif /* !NO_PCAP */ static int receive_packets(struct netmap_ring *ring, u_int limit, int dump) { u_int cur, rx, n; cur = ring->cur; n = nm_ring_space(ring); if (n < limit) limit = n; for (rx = 0; rx < limit; rx++) { struct netmap_slot *slot = &ring->slot[cur]; char *p = NETMAP_BUF(ring, slot->buf_idx); if (dump) dump_payload(p, slot->len, ring, cur); cur = nm_ring_next(ring, cur); } ring->head = ring->cur = cur; return (rx); } static void * receiver_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLIN }; struct netmap_if *nifp; struct netmap_ring *rxring; int i; uint64_t received = 0; if (setaffinity(targ->thread, targ->affinity)) goto quit; D("reading from %s fd %d main_fd %d", targ->g->ifname, targ->fd, targ->g->main_fd); /* unbounded wait for the first packet. */ for (;!targ->cancel;) { i = poll(&pfd, 1, 1000); if (i > 0 && !(pfd.revents & POLLERR)) break; RD(1, "waiting for initial packets, poll returns %d %d", i, pfd.revents); } /* main loop, exit after 1s silence */ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (targ->g->dev_type == DEV_TAP) { while (!targ->cancel) { char buf[MAX_BODYSIZE]; /* XXX should we poll ? */ if (read(targ->g->main_fd, buf, sizeof(buf)) > 0) targ->count++; } #ifndef NO_PCAP } else if (targ->g->dev_type == DEV_PCAP) { while (!targ->cancel) { /* XXX should we poll ? */ pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap, (u_char *)&targ->count); } #endif /* !NO_PCAP */ } else { int dump = targ->g->options & OPT_DUMP; nifp = targ->nmd->nifp; while (!targ->cancel) { /* Once we started to receive packets, wait at most 1 seconds before quitting. */ if (poll(&pfd, 1, 1 * 1000) <= 0 && !targ->g->forever) { clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->toc.tv_sec -= 1; /* Subtract timeout time. */ goto out; } if (pfd.revents & POLLERR) { D("poll err"); goto quit; } for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) { int m; rxring = NETMAP_RXRING(nifp, i); if (nm_ring_empty(rxring)) continue; m = receive_packets(rxring, targ->g->burst, dump); received += m; } targ->count = received; } } clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); out: targ->completed = 1; targ->count = received; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); } /* very crude code to print a number in normalized form. * Caller has to make sure that the buffer is large enough. */ static const char * norm(char *buf, double val) { char *units[] = { "", "K", "M", "G", "T" }; u_int i; for (i = 0; val >=1000 && i < sizeof(units)/sizeof(char *) - 1; i++) val /= 1000; sprintf(buf, "%.2f %s", val, units[i]); return buf; } static void tx_output(uint64_t sent, int size, double delta) { double bw, raw_bw, pps; char b1[40], b2[80], b3[80]; printf("Sent %llu packets, %d bytes each, in %.2f seconds.\n", (unsigned long long)sent, size, delta); if (delta == 0) delta = 1e-6; if (size < 60) /* correct for min packet size */ size = 60; pps = sent / delta; bw = (8.0 * size * sent) / delta; /* raw packets have4 bytes crc + 20 bytes framing */ raw_bw = (8.0 * (size + 24) * sent) / delta; printf("Speed: %spps Bandwidth: %sbps (raw %sbps)\n", norm(b1, pps), norm(b2, bw), norm(b3, raw_bw) ); } static void rx_output(uint64_t received, double delta) { double pps; char b1[40]; printf("Received %llu packets, in %.2f seconds.\n", (unsigned long long) received, delta); if (delta == 0) delta = 1e-6; pps = received / delta; printf("Speed: %spps\n", norm(b1, pps)); }
void RcpSrc::startflow() { _cwnd = _mss; _unacked = _cwnd; send_packets(); }
int main(int argc, char **argv) { // LC: file contains the mappings FILE *mappingfp; char tempc; unsigned char *path; //LC: mapping table variables char sip[16]; char fip[16]; char *mdev; int it; char *cp; int c; pcap_if_t *devptr; int i; int devnum; char *device = NULL; int loop = 1; thiszone = gmt2local(0); if ((cp = strrchr(argv[0], '/')) != NULL) program_name = cp + 1; else program_name = argv[0]; /* process options */ while ((c = getopt(argc, argv, "dvi:s:l:c:m:r:p:h")) != -1) { switch (c) { case 'd': if (pcap_findalldevs(&devptr, ebuf) < 0) error("%s", ebuf); else { for (i = 0; devptr != 0; i++) { (void)printf("%d. %s", i + 1, devptr->name); if (devptr->description != NULL) (void)printf(" (%s)", devptr->description); (void)putchar('\n'); devptr = devptr->next; } } exit(EXIT_SUCCESS); case 'v': ++vflag; break; case 'i': /* LC: Not valid anymore if ((devnum = atoi(optarg)) != 0) { if (devnum < 0) error("invalid adapter index"); if (pcap_findalldevs(&devptr, ebuf) < 0) error("%s", ebuf); else { for (i = 0; i < devnum - 1; i++) { devptr = devptr->next; if (devptr == NULL) error("invalid adapter index"); } } device = devptr->name; } else { device = optarg; }*/ break; case 's': len = strtol(optarg, NULL, 0); if (len != -1 && len != 0) { if (len < ETHER_HDR_LEN || len > ETHER_MAX_LEN) error("value for length must be between %d to %d", ETHER_HDR_LEN, ETHER_MAX_LEN); } break; case 'l': loop = strtol(optarg, NULL, 0); /* loop infinitely of loop <= 0 */ fprintf(stdout, "in loop %d\n", loop); break; case 'c': max_pkts = strtol(optarg, NULL, 0); /* send all packets if max_pkts <= 0 */ break; case 'm': speed = strtod(optarg, NULL); if (speed > 0 && speed < SPEED_MIN) error("positive value for speed must be at least %f", SPEED_MIN); break; case 'r': linerate = strtol(optarg, NULL, 0); if (linerate < LINERATE_MIN || linerate > LINERATE_MAX) error("value for rate must be between %d to %d", LINERATE_MIN, LINERATE_MAX); break; case 'p': interval = strtol(optarg, NULL, 0); if (interval < 1 || interval > SLEEP_MAX) error("value for sleep must be between 1 to %d", SLEEP_MAX); break; case 'h': default: usage(); } } if (argv[optind] == NULL) error("multiple mapping file not specified"); if (argv[optind+1] == NULL) error("trace file not specified"); // LC: creating mapping table printf("Mapping file: %s\r\n", argv[optind]); mappingfp = fopen(argv[optind], "r"); if(mappingfp == NULL){ fprintf(stdout,"Error, mapping file could not be oppened\n"); exit(0); } nentries = 100; //LC: ipTable = (char*)malloc(sizeof(char) * 3 * 100); memset(ipTable, 0, 3 * 100); nentries = 0; path = (unsigned char *)malloc(sizeof(struct sockaddr)); while(!feof(mappingfp)){ memset(sip,0,16); memset(fip,0,16); mdev = (char *)malloc(sizeof(char) * 10); memset(mdev,0,10); fscanf(mappingfp, "%[^,],%[^,],%[^\n]\n", sip,fip,mdev); fprintf(stdout, "%s-%s-%s-\n", sip,fip,mdev); inet_pton(AF_INET, sip, path); ipTable[(nentries * 3) + 0] = (unsigned int)htonl(inet_addr(sip)); inet_pton(AF_INET, fip, path); ipTable[(nentries * 3) + 1] = (unsigned int)htonl(inet_addr(fip)); ipTable[(nentries * 3) + 2] = mdev; nentries++; } fprintf(stdout, "The mapping contains %d lines\n", nentries); fclose(mappingfp); for(it = 0; it < nentries; it++){ fprintf(stdout, "Opening device %s .. %u %u \n", ipTable[(it * 3) + 2], (unsigned int)ipTable[(it * 3)], (unsigned int)ipTable[(it * 3) + 1]); // empty error buffer to grab warning message (if exist) from pcap_open_live() below *ebuf = '\0'; /* note that we are doing this for sending packets, not capture */ pd = pcap_open_live(ipTable[(it * 3) + 2], ETHER_MAX_LEN, // portion of packet to capture 1, // promiscuous mode is on 1000, // read timeout, in milliseconds ebuf); pdl[it] = pd; //ipTable[(it * 3) + 2] = pd; if (pd == NULL) error("%s", ebuf); else if (*ebuf) notice("%s", ebuf); // warning message from pcap_open_live() above } /* buffer to store data for each packet including its link-layer header, freed in cleanup() */ pkt_data = (u_char *)malloc(sizeof(u_char) * ETHER_MAX_LEN); if (pkt_data == NULL) error("malloc(): cannot allocate memory for pkt_data"); memset(pkt_data, 0, ETHER_MAX_LEN); /* set signal handler for SIGINT (Control-C) */ (void)signal(SIGINT, cleanup); if (gettimeofday(&start, NULL) == -1) notice("gettimeofday(): %s", strerror(errno)); if (loop > 0) { while (loop--) { for (i = optind + 1; i < argc; i++){ /* for each trace file */ send_packets(mdev, argv[i]); } } } /* send infinitely if loop <= 0 until user Control-C */ else { while (1) { for (i = optind + 1; i < argc; i++) send_packets(mdev, argv[i]); } } cleanup(0); /* NOTREACHED */ exit(EXIT_SUCCESS); }
/* * Main packet transmit routine. Transmit packets at a fixed rate for * specified length of time. */ static int run_thread_tx(void *arg) { test_globals_t *globals; int thr_id; odp_pktout_queue_t pktout; pkt_tx_stats_t *stats; odp_time_t cur_time, send_time_end, send_duration; odp_time_t burst_gap_end, burst_gap; uint32_t batch_len; int unsent_pkts = 0; odp_packet_t tx_packet[BATCH_LEN_MAX]; odp_time_t idle_start = ODP_TIME_NULL; thread_args_t *targs = arg; batch_len = targs->batch_len; if (batch_len > BATCH_LEN_MAX) batch_len = BATCH_LEN_MAX; thr_id = odp_thread_id(); globals = odp_shm_addr(odp_shm_lookup("test_globals")); stats = &globals->tx_stats[thr_id]; if (odp_pktout_queue(globals->pktio_tx, &pktout, 1) != 1) LOG_ABORT("Failed to get output queue for thread %d\n", thr_id); burst_gap = odp_time_local_from_ns( ODP_TIME_SEC_IN_NS / (targs->pps / targs->batch_len)); send_duration = odp_time_local_from_ns(targs->duration * ODP_TIME_SEC_IN_NS); odp_barrier_wait(&globals->tx_barrier); cur_time = odp_time_local(); send_time_end = odp_time_sum(cur_time, send_duration); burst_gap_end = cur_time; while (odp_time_cmp(send_time_end, cur_time) > 0) { unsigned alloc_cnt = 0, tx_cnt; if (odp_time_cmp(burst_gap_end, cur_time) > 0) { cur_time = odp_time_local(); if (!odp_time_cmp(idle_start, ODP_TIME_NULL)) idle_start = cur_time; continue; } if (odp_time_cmp(idle_start, ODP_TIME_NULL) > 0) { odp_time_t diff = odp_time_diff(cur_time, idle_start); stats->s.idle_ticks = odp_time_sum(diff, stats->s.idle_ticks); idle_start = ODP_TIME_NULL; } burst_gap_end = odp_time_sum(burst_gap_end, burst_gap); alloc_cnt = alloc_packets(tx_packet, batch_len - unsent_pkts); if (alloc_cnt != batch_len) stats->s.alloc_failures++; tx_cnt = send_packets(pktout, tx_packet, alloc_cnt); unsent_pkts = alloc_cnt - tx_cnt; stats->s.enq_failures += unsent_pkts; stats->s.tx_cnt += tx_cnt; cur_time = odp_time_local(); } VPRINT(" %02d: TxPkts %-8" PRIu64 " EnqFail %-6" PRIu64 " AllocFail %-6" PRIu64 " Idle %" PRIu64 "ms\n", thr_id, stats->s.tx_cnt, stats->s.enq_failures, stats->s.alloc_failures, odp_time_to_ns(stats->s.idle_ticks) / (uint64_t)ODP_TIME_MSEC_IN_NS); return 0; }
static __inline struct timespec timeval2spec(const struct timeval *a) { struct timespec ts = { .tv_sec = a->tv_sec, .tv_nsec = a->tv_usec * 1000 }; return ts; } static __inline struct timeval timespec2val(const struct timespec *a) { struct timeval tv = { .tv_sec = a->tv_sec, .tv_usec = a->tv_nsec / 1000 }; return tv; } static __inline struct timespec timespec_add(struct timespec a, struct timespec b) { struct timespec ret = { a.tv_sec + b.tv_sec, a.tv_nsec + b.tv_nsec }; if (ret.tv_nsec >= 1000000000) { ret.tv_sec++; ret.tv_nsec -= 1000000000; } return ret; } static __inline struct timespec timespec_sub(struct timespec a, struct timespec b) { struct timespec ret = { a.tv_sec - b.tv_sec, a.tv_nsec - b.tv_nsec }; if (ret.tv_nsec < 0) { ret.tv_sec--; ret.tv_nsec += 1000000000; } return ret; } /* * wait until ts, either busy or sleeping if more than 1ms. * Return wakeup time. */ static struct timespec wait_time(struct timespec ts) { for (;;) { struct timespec w, cur; clock_gettime(CLOCK_REALTIME_PRECISE, &cur); w = timespec_sub(ts, cur); if (w.tv_sec < 0) return cur; else if (w.tv_sec > 0 || w.tv_nsec > 1000000) poll(NULL, 0, 1); } } static void * sender_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *txring; int i, n = targ->g->npackets / targ->g->nthreads, sent = 0; int options = targ->g->options | OPT_COPY; struct timespec nexttime = { 0, 0}; // XXX silence compiler int rate_limit = targ->g->tx_rate; struct pkt *pkt = &targ->pkt; void *frame; int size; frame = pkt; frame += sizeof(pkt->vh) - targ->g->virt_header; size = targ->g->pkt_size + targ->g->virt_header; D("start"); if (setaffinity(targ->thread, targ->affinity)) goto quit; /* setup poll(2) mechanism. */ memset(fds, 0, sizeof(fds)); fds[0].fd = targ->fd; fds[0].events = (POLLOUT); /* main loop.*/ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (rate_limit) { targ->tic = timespec_add(targ->tic, (struct timespec){2,0}); targ->tic.tv_nsec = 0; wait_time(targ->tic); nexttime = targ->tic; } if (targ->g->dev_type == DEV_PCAP) { pcap_t *p = targ->g->p; for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (pcap_inject(p, frame, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } } else if (targ->g->dev_type == DEV_TAP) { /* tap */ D("writing to file desc %d", targ->g->main_fd); for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (write(targ->g->main_fd, frame, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } } else { int tosend = 0; int frags = targ->g->frags; while (!targ->cancel && (n == 0 || sent < n)) { if (rate_limit && tosend <= 0) { tosend = targ->g->burst; nexttime = timespec_add(nexttime, targ->g->tx_period); wait_time(nexttime); } /* * wait for available room in the send queue(s) */ if (poll(fds, 1, 2000) <= 0) { if (targ->cancel) break; D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); goto quit; } if (fds[0].revents & POLLERR) { D("poll error"); goto quit; } /* * scan our queues and send on those with room */ if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) { D("drop copy"); options &= ~OPT_COPY; } for (i = targ->qfirst; i < targ->qlast; i++) { int m, limit = rate_limit ? tosend : targ->g->burst; if (n > 0 && n - sent < limit) limit = n - sent; txring = NETMAP_TXRING(nifp, i); if (nm_ring_empty(txring)) continue; if (frags > 1) limit = ((limit + frags - 1) / frags) * frags; m = send_packets(txring, pkt, frame, size, targ->g, limit, options, frags); ND("limit %d avail %d frags %d m %d", limit, txring->avail, frags, m); sent += m; targ->count = sent; if (rate_limit) { tosend -= m; if (tosend <= 0) break; } } } /* flush any remaining packets */ ioctl(fds[0].fd, NIOCTXSYNC, NULL); /* final part: wait all the TX queues to be empty. */ for (i = targ->qfirst; i < targ->qlast; i++) { txring = NETMAP_TXRING(nifp, i); while (nm_tx_pending(txring)) { ioctl(fds[0].fd, NIOCTXSYNC, NULL); usleep(1); /* wait 1 tick */ } } } clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->completed = 1; targ->count = sent; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); } static void receive_pcap(u_char *user, const struct pcap_pkthdr * h, const u_char * bytes) { int *count = (int *)user; (void)h; /* UNUSED */ (void)bytes; /* UNUSED */ (*count)++; }
void menu() { small_uint_t cmd; int full_duplex; printf(&debug, "Free memory: %d bytes\n", mem_available(&pool)); printf(&debug, "Ethernet: %s", eth_get_carrier(ð) ? "Cable OK" : "No cable"); if (eth_get_speed(ð, &full_duplex)) { printf(&debug, ", %s", full_duplex ? "Full Duplex" : "Half Duplex"); } printf(&debug, ", %u interrupts\n", eth.intr); printf(&debug, "Transmit: %ld packets, %ld collisions, %ld errors\n", eth.netif.out_packets, eth.netif.out_collisions, eth.netif.out_errors); printf(&debug, "Receive: %ld packets, %ld errors, %ld lost\n", eth.netif.in_packets, eth.netif.in_errors, eth.netif.in_discards); printf(&debug, "\n 1. Transmit 1 packet"); printf(&debug, "\n 2. Transmit 2 packets"); printf(&debug, "\n 3. Transmit 8 packets"); printf(&debug, "\n 4. Run send/receive test"); printf(&debug, "\n 5. Packet size: %d bytes", packet_size); printf(&debug, "\n 6. Local loopback: %s", local_loop ? "Enabled" : "Disabled"); puts(&debug, "\n\n"); for (;;) { /* Ввод команды. */ puts(&debug, "Command: "); while (peekchar (&debug) < 0) timer_delay(&timer, 50); cmd = getchar(&debug); putchar(&debug, '\n'); if (cmd == '\n' || cmd == '\r') break; if (cmd == '1') { send_packets(1); break; } if (cmd == '2') { send_packets(2); break; } if (cmd == '3') { send_packets(8); break; } if (cmd == '4') { run_test(); break; } if (cmd == '5') { try_again: printf(&debug, "Enter packet size (1-1518): "); packet_size = get_short(packet_size); if (packet_size <= 0 || packet_size > 1518) { printf(&debug, "Invalid value, try again."); goto try_again; } putchar(&debug, '\n'); data_pattern = mem_realloc(data_pattern, packet_size); if (!data_pattern) { printf(&debug, "No memory for data_pattern\n"); uos_halt(1); } int i; for (i = 0; i < packet_size; i++) data_pattern[i] = i; if (packet_size >= 6) memset(data_pattern, 0xFF, 6); if (packet_size >= 12) memcpy(data_pattern + 6, eth.netif.ethaddr, 6); break; } if (cmd == '6') { local_loop = !local_loop; eth_set_loop(ð, local_loop); break; } if (cmd == CTL('E')) { /* Регистры Ethernet. */ putchar(&debug, '\n'); eth_debug(ð, &debug); putchar(&debug, '\n'); continue; } if (cmd == CTL('T')) { /* Список задач uOS. */ printf(&debug, "\nFree memory: %u bytes\n\n", mem_available(&pool)); task_print(&debug, 0); task_print(&debug, (task_t*) stack_console); task_print(&debug, (task_t*) stack_test); task_print(&debug, (task_t*) eth.stack); putchar(&debug, '\n'); continue; } } }
static void * sender_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *txring; int i, n = targ->g->npackets / targ->g->nthreads, sent = 0; int options = targ->g->options | OPT_COPY; D("start"); if (setaffinity(targ->thread, targ->affinity)) goto quit; /* setup poll(2) mechanism. */ memset(fds, 0, sizeof(fds)); fds[0].fd = targ->fd; fds[0].events = (POLLOUT); /* main loop.*/ gettimeofday(&targ->tic, NULL); if (targ->g->use_pcap) { int size = targ->g->pkt_size; void *pkt = &targ->pkt; pcap_t *p = targ->g->p; for (i = 0; n == 0 || sent < n; i++) { if (pcap_inject(p, pkt, size) != -1) sent++; if (i > 10000) { targ->count = sent; i = 0; } } } else { while (n == 0 || sent < n) { /* * wait for available room in the send queue(s) */ if (poll(fds, 1, 2000) <= 0) { D("poll error/timeout on queue %d", targ->me); goto quit; } /* * scan our queues and send on those with room */ if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) { D("drop copy"); options &= ~OPT_COPY; } for (i = targ->qfirst; i < targ->qlast; i++) { int m, limit = targ->g->burst; if (n > 0 && n - sent < limit) limit = n - sent; txring = NETMAP_TXRING(nifp, i); if (txring->avail == 0) continue; m = send_packets(txring, &targ->pkt, targ->g->pkt_size, limit, options); sent += m; targ->count = sent; } } /* flush any remaining packets */ ioctl(fds[0].fd, NIOCTXSYNC, NULL); /* final part: wait all the TX queues to be empty. */ for (i = targ->qfirst; i < targ->qlast; i++) { txring = NETMAP_TXRING(nifp, i); while (!NETMAP_TX_RING_EMPTY(txring)) { ioctl(fds[0].fd, NIOCTXSYNC, NULL); usleep(1); /* wait 1 tick */ } } } gettimeofday(&targ->toc, NULL); targ->completed = 1; targ->count = sent; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); }
void TcpSrc::receivePacket(Packet& pkt) { TcpAck *p = (TcpAck*)(&pkt); TcpAck::seq_t seqno = p->ackno(); pkt.flow().logTraffic(pkt,*this,TrafficLogger::PKT_RCVDESTROY); p->free(); assert(seqno >= _last_acked); // no dups or reordering allowed in this simple simulator if (seqno > _last_acked) { // a brand new ack if (!_in_fast_recovery) { // best behaviour: proper ack of a new packet, when we were expecting it _last_acked = seqno; _dupacks = 0; inflate_window(); _unacked = _cwnd; _effcwnd = _cwnd; if (_logger) _logger->logTcp(*this, TcpLogger::TCP_RCV); send_packets(); return; } // We're in fast recovery, i.e. one packet has been // dropped but we're pretending it's not serious if (seqno >= _recoverq) { // got ACKs for all the "recovery window": resume // normal service uint32_t flightsize = _highest_sent - seqno; _cwnd = min(_ssthresh, flightsize + _mss); _unacked = _cwnd; _effcwnd = _cwnd; _last_acked = seqno; _dupacks = 0; _in_fast_recovery = false; if (_logger) _logger->logTcp(*this, TcpLogger::TCP_RCV_FR_END); send_packets(); return; } // In fast recovery, and still getting ACKs for the // "recovery window" // This is dangerous. It means that several packets // got lost, not just the one that triggered FR. uint32_t new_data = seqno - _last_acked; _last_acked = seqno; if (new_data < _cwnd) _cwnd -= new_data; else _cwnd=0; _cwnd += _mss; if (_logger) _logger->logTcp(*this, TcpLogger::TCP_RCV_FR); retransmit_packet(); send_packets(); return; } // It's a dup ack if (_in_fast_recovery) { // still in fast recovery; hopefully the prodigal ACK is on it's way _cwnd += _mss; if (_cwnd>_maxcwnd) _cwnd = _maxcwnd; // When we restart, the window will be set to // min(_ssthresh, flightsize+_mss), so keep track of // this _unacked = min(_ssthresh, _highest_sent-_recoverq+_mss); if (_last_acked+_cwnd >= _highest_sent+_mss) _effcwnd=_unacked; // starting to send packets again if (_logger) _logger->logTcp(*this, TcpLogger::TCP_RCV_DUP_FR); send_packets(); return; } // Not yet in fast recovery. What should we do instead? _dupacks++; if (_dupacks!=3) { // not yet serious worry if (_logger) _logger->logTcp(*this, TcpLogger::TCP_RCV_DUP); send_packets(); return; } // _dupacks==3 if (_last_acked < _recoverq) { //See RFC 3782: if we haven't //recovered from timeouts //etc. don't do fast recovery if (_logger) _logger->logTcp(*this, TcpLogger::TCP_RCV_3DUPNOFR); return; } // begin fast recovery _ssthresh = max(_cwnd/2, (uint32_t)(2 * _mss)); retransmit_packet(); _cwnd = _ssthresh + 3 * _mss; _unacked = _ssthresh; _effcwnd = 0; _in_fast_recovery = true; _recoverq = _highest_sent; // _recoverq is the value of the // first ACK that tells us things // are back on track if (_logger) _logger->logTcp(*this, TcpLogger::TCP_RCV_DUP_FASTXMIT); }