int main(int argc, const char *argv[]) { if (nice(2*MAX_PRIO) != -1) handle_error("Error: error checking for syscalls should be implemented.\n"); if (nice(-2*MAX_PRIO) != -1) handle_error("Error: error checking for syscalls should be implemented.\n"); if (setpriority(-1,0) != -1) handle_error("Error: error checking for syscalls should be implemented.\n"); if (getpriority(-1) != -1) handle_error("Error: error checking for syscalls should be implemented.\n"); if (setpriority(getpid(),2*MAX_PRIO) != -1) handle_error("Error: error checking for syscalls should be implemented.\n"); if (setpriority(getpid(),-1) != -1) handle_error("Error: error checking for syscalls should be implemented.\n"); if (setaffinity(-1, 0) != -1) handle_error("Error: error checking for syscalls should be implemented.\n"); if (setaffinity(getpid(), -1) != -1) handle_error("Error: error checking for syscalls should be implemented.\n"); if (setaffinity(getpid(), 3) != -1) handle_error("Error: error checking for syscalls should be implemented.\n"); if (getaffinity(-1) != -1) handle_error("Error: error checking for syscalls should be implemented.\n"); printf(stdout, "hw4-test-err succeeded\n"); exit(); }
static void* slave_main(void* arg) { slave_conf_t* my_wc = (slave_conf_t*)arg; pthread_mutex_t *my_mut = mut + my_wc->thread_id; pthread_cond_t *my_cond = cond + my_wc->thread_id; BENCHDECLARE #if BENCHMARKING unsigned long* t_[BS]; int a1, a2; a1 = my_wc->thread_id; a2 = my_wc->thread_id + nrn_nthread; t_[a1] = t1_[a1]; t_[a2] = t1_[a2]; #endif setaffinity(my_wc->thread_id); for(;;) { if (busywait_) { while(my_wc->flag == 0) {;} if (my_wc->flag == 1) { BENCHBEGIN(a1) (*my_wc->job)(nrn_threads + my_wc->thread_id); BENCHADD(a2) }else{ return (void*)0; } my_wc->flag = 0; pthread_cond_signal(my_cond); }else{
int main(void) { struct queue *q = qinit(); int i; pthread_t kids[POP_CNT + PUSH_CNT]; setaffinity(0); msgs = calloc(MSG_CNT, sizeof(struct msg)); for (i = 0; i < POP_CNT; i++) start_thread(i, pop_task, q, &kids[i]); for (; i < POP_CNT + PUSH_CNT; i++) start_thread(i, push_task, q, &kids[i]); for (i = 0; i < POP_CNT + PUSH_CNT; i++) pthread_join(kids[i], NULL); quit = 0; if (pop_total < MSG_CNT) { printf("flushing: %d\n", MSG_CNT - pop_total); for (i = 0; i < POP_CNT; i++) start_thread(i, pop_flush_task, q, &kids[i]); for (i = 0; i < POP_CNT; i++) pthread_join(kids[i], NULL); } printf("pop total: %d\n", pop_total); printf("pop cycles/msg: %lu\n", pop_cycles / pop_total); printf("push cycles/msg: %lu\n", push_cycles / MSG_CNT); return 0; }
/* set the affinity for the current task to the given CPU */ int on_cpu(unsigned cpu){ cpu_set_t mask; CPU_ZERO(&mask); CPU_SET(cpu,&mask); if (setaffinity(mask) < 0){ perror("sched_setaffinity"); return -1; } return 0; }
int TestMain () { #if _WIN32||_WIN64 || __linux__ || __FreeBSD_version >= 701000 #if _WIN32||_WIN64 SYSTEM_INFO si; GetSystemInfo(&si); if ( si.dwNumberOfProcessors < 2 ) return Harness::Skipped; int availableProcs = (int)si.dwNumberOfProcessors / 2; DWORD_PTR mask = 1; for ( int i = 1; i < availableProcs; ++i ) mask |= mask << 1; bool err = !SetProcessAffinityMask( GetCurrentProcess(), mask ); #else /* !WIN */ #if __linux__ int maxProcs = get_nprocs(); typedef cpu_set_t mask_t; #if __TBB_MAIN_THREAD_AFFINITY_BROKEN #define setaffinity(mask) sched_setaffinity(0 /*get the mask of the calling thread*/, sizeof(mask_t), &mask) #else #define setaffinity(mask) sched_setaffinity(getpid(), sizeof(mask_t), &mask) #endif #else /* __FreeBSD__ */ int maxProcs = sysconf(_SC_NPROCESSORS_ONLN); typedef cpuset_t mask_t; #if __TBB_MAIN_THREAD_AFFINITY_BROKEN #define setaffinity(mask) cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, sizeof(mask_t), &mask) #else #define setaffinity(mask) cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(mask_t), &mask) #endif #endif /* __FreeBSD__ */ if ( maxProcs < 2 ) return Harness::Skipped; mask_t newMask; CPU_ZERO(&newMask); int availableProcs = min(maxProcs, (int)sizeof(mask_t) * CHAR_BIT) / 2; for ( int i = 0; i < availableProcs; ++i ) CPU_SET( i, &newMask ); int err = setaffinity( newMask ); #endif /* !WIN */ ASSERT( !err, "Setting process affinity failed" ); ASSERT( tbb::task_scheduler_init::default_num_threads() == availableProcs, NULL ); ASSERT( (int)tbb::tbb_thread::hardware_concurrency() == availableProcs, NULL ); return Harness::Done; #else /* !(WIN || LIN || BSD) */ return Harness::Skipped; #endif /* !(WIN || LIN || BSD) */ }
int usnet_setup(int argc, char *argv[]) { //struct nm_desc *nmd; char *p; int ret; (void)argc; (void)argv; (void)p; //(void)nmd; setaffinity(0); ret = usnet_get_options(argc, argv); if ( ret < 0 ) { show_help(); exit(0); } g_nmd = usnet_init(g_nmd, (char*)g_interface, 0); if (1) { struct netmap_if *nifp = g_nmd->nifp; struct nmreq *req = &g_nmd->req; int i; D("fisrt_tx_ring=%d, last_tx_ring=%d", g_nmd->first_tx_ring, g_nmd->last_tx_ring); D("nifp at offset %d, %d tx %d rx region %d", req->nr_offset, req->nr_tx_rings, req->nr_rx_rings, req->nr_arg2); for (i = 0; i <= req->nr_tx_rings; i++) { struct netmap_ring *ring = NETMAP_TXRING(nifp, i); D(" TX%d at 0x%p slots %d", i, (void *)((char *)ring - (char *)nifp), ring->num_slots); } for (i = 0; i <= req->nr_rx_rings; i++) { struct netmap_ring *ring = NETMAP_RXRING(nifp, i); D(" RX%d at 0x%p slots %d", i, (void *)((char *)ring - (char *)nifp), ring->num_slots); } } return 0; }
void *run(void* args) { u_int burst = 1024, wait_link = 4; struct thread_args_t *targ = (struct thread_args_t *)args; struct nm_desc *desc = (struct nm_desc*)targ->desc; if (setaffinity(targ->thread, targ->affinity)) { D("setaffinity failed."); } int efd = epoll_create(1); if (efd < 0 ) { D("epoll create failed."); exit(1); } struct epoll_event event, events; memset(&event.data, 0, sizeof(event.data)); event.data.fd = desc->fd; event.events = EPOLLIN; epoll_ctl(efd, EPOLL_CTL_ADD, desc->fd, &event); D("Wait %d secs for link to come up...", wait_link); sleep(wait_link); D("Ready to go ....%d", desc->fd); /* main loop */ signal(SIGINT, sigint_h); while (!do_abort) { if (epoll_wait(efd, &events, 1, 200) > 0 ) { rx(targ, desc, burst); } } return NULL; }
static void * pinger_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLIN }; struct netmap_if *nifp = targ->nmd->nifp; int i, rx = 0, n = targ->g->npackets; void *frame; int size; uint32_t sent = 0; struct timespec ts, now, last_print; uint32_t count = 0, min = 1000000000, av = 0; frame = &targ->pkt; frame += sizeof(targ->pkt.vh) - targ->g->virt_header; size = targ->g->pkt_size + targ->g->virt_header; if (targ->g->nthreads > 1) { D("can only ping with 1 thread"); return NULL; } clock_gettime(CLOCK_REALTIME_PRECISE, &last_print); now = last_print; while (n == 0 || (int)sent < n) { struct netmap_ring *ring = NETMAP_TXRING(nifp, 0); struct netmap_slot *slot; char *p; for (i = 0; i < 1; i++) { /* XXX why the loop for 1 pkt ? */ slot = &ring->slot[ring->cur]; slot->len = size; p = NETMAP_BUF(ring, slot->buf_idx); if (nm_ring_empty(ring)) { D("-- ouch, cannot send"); } else { struct tstamp *tp; nm_pkt_copy(frame, p, size); clock_gettime(CLOCK_REALTIME_PRECISE, &ts); bcopy(&sent, p+42, sizeof(sent)); tp = (struct tstamp *)(p+46); tp->sec = (uint32_t)ts.tv_sec; tp->nsec = (uint32_t)ts.tv_nsec; sent++; ring->head = ring->cur = nm_ring_next(ring, ring->cur); } } /* should use a parameter to decide how often to send */ if (poll(&pfd, 1, 3000) <= 0) { D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); continue; } /* see what we got back */ for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) { ring = NETMAP_RXRING(nifp, i); while (!nm_ring_empty(ring)) { uint32_t seq; struct tstamp *tp; slot = &ring->slot[ring->cur]; p = NETMAP_BUF(ring, slot->buf_idx); clock_gettime(CLOCK_REALTIME_PRECISE, &now); bcopy(p+42, &seq, sizeof(seq)); tp = (struct tstamp *)(p+46); ts.tv_sec = (time_t)tp->sec; ts.tv_nsec = (long)tp->nsec; ts.tv_sec = now.tv_sec - ts.tv_sec; ts.tv_nsec = now.tv_nsec - ts.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_nsec += 1000000000; ts.tv_sec--; } if (1) D("seq %d/%d delta %d.%09d", seq, sent, (int)ts.tv_sec, (int)ts.tv_nsec); if (ts.tv_nsec < (int)min) min = ts.tv_nsec; count ++; av += ts.tv_nsec; ring->head = ring->cur = nm_ring_next(ring, ring->cur); rx++; } } //D("tx %d rx %d", sent, rx); //usleep(100000); ts.tv_sec = now.tv_sec - last_print.tv_sec; ts.tv_nsec = now.tv_nsec - last_print.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_nsec += 1000000000; ts.tv_sec--; } if (ts.tv_sec >= 1) { D("count %d min %d av %d", count, min, av/count); count = 0; av = 0; min = 100000000; last_print = now; } } return NULL; } /* * reply to ping requests */ static void * ponger_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLIN }; struct netmap_if *nifp = targ->nmd->nifp; struct netmap_ring *txring, *rxring; int i, rx = 0, sent = 0, n = targ->g->npackets; if (targ->g->nthreads > 1) { D("can only reply ping with 1 thread"); return NULL; } D("understood ponger %d but don't know how to do it", n); while (n == 0 || sent < n) { uint32_t txcur, txavail; //#define BUSYWAIT #ifdef BUSYWAIT ioctl(pfd.fd, NIOCRXSYNC, NULL); #else if (poll(&pfd, 1, 1000) <= 0) { D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); continue; } #endif txring = NETMAP_TXRING(nifp, 0); txcur = txring->cur; txavail = nm_ring_space(txring); /* see what we got back */ for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) { rxring = NETMAP_RXRING(nifp, i); while (!nm_ring_empty(rxring)) { uint16_t *spkt, *dpkt; uint32_t cur = rxring->cur; struct netmap_slot *slot = &rxring->slot[cur]; char *src, *dst; src = NETMAP_BUF(rxring, slot->buf_idx); //D("got pkt %p of size %d", src, slot->len); rxring->head = rxring->cur = nm_ring_next(rxring, cur); rx++; if (txavail == 0) continue; dst = NETMAP_BUF(txring, txring->slot[txcur].buf_idx); /* copy... */ dpkt = (uint16_t *)dst; spkt = (uint16_t *)src; nm_pkt_copy(src, dst, slot->len); dpkt[0] = spkt[3]; dpkt[1] = spkt[4]; dpkt[2] = spkt[5]; dpkt[3] = spkt[0]; dpkt[4] = spkt[1]; dpkt[5] = spkt[2]; txring->slot[txcur].len = slot->len; /* XXX swap src dst mac */ txcur = nm_ring_next(txring, txcur); txavail--; sent++; } } txring->head = txring->cur = txcur; targ->count = sent; #ifdef BUSYWAIT ioctl(pfd.fd, NIOCTXSYNC, NULL); #endif //D("tx %d rx %d", sent, rx); } return NULL; } static __inline int timespec_ge(const struct timespec *a, const struct timespec *b) { if (a->tv_sec > b->tv_sec) return (1); if (a->tv_sec < b->tv_sec) return (0); if (a->tv_nsec >= b->tv_nsec) return (1); return (0); } static __inline struct timespec timeval2spec(const struct timeval *a) { struct timespec ts = { .tv_sec = a->tv_sec, .tv_nsec = a->tv_usec * 1000 }; return ts; } static __inline struct timeval timespec2val(const struct timespec *a) { struct timeval tv = { .tv_sec = a->tv_sec, .tv_usec = a->tv_nsec / 1000 }; return tv; } static __inline struct timespec timespec_add(struct timespec a, struct timespec b) { struct timespec ret = { a.tv_sec + b.tv_sec, a.tv_nsec + b.tv_nsec }; if (ret.tv_nsec >= 1000000000) { ret.tv_sec++; ret.tv_nsec -= 1000000000; } return ret; } static __inline struct timespec timespec_sub(struct timespec a, struct timespec b) { struct timespec ret = { a.tv_sec - b.tv_sec, a.tv_nsec - b.tv_nsec }; if (ret.tv_nsec < 0) { ret.tv_sec--; ret.tv_nsec += 1000000000; } return ret; } /* * wait until ts, either busy or sleeping if more than 1ms. * Return wakeup time. */ static struct timespec wait_time(struct timespec ts) { for (;;) { struct timespec w, cur; clock_gettime(CLOCK_REALTIME_PRECISE, &cur); w = timespec_sub(ts, cur); if (w.tv_sec < 0) return cur; else if (w.tv_sec > 0 || w.tv_nsec > 1000000) poll(NULL, 0, 1); } } static void * sender_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLOUT }; struct netmap_if *nifp; struct netmap_ring *txring; int i, n = targ->g->npackets / targ->g->nthreads; int64_t sent = 0; int options = targ->g->options | OPT_COPY; struct timespec nexttime = { 0, 0}; // XXX silence compiler int rate_limit = targ->g->tx_rate; struct pkt *pkt = &targ->pkt; void *frame; int size; if (targ->frame == NULL) { frame = pkt; frame += sizeof(pkt->vh) - targ->g->virt_header; size = targ->g->pkt_size + targ->g->virt_header; } else { frame = targ->frame; size = targ->g->pkt_size; } D("start, fd %d main_fd %d", targ->fd, targ->g->main_fd); if (setaffinity(targ->thread, targ->affinity)) goto quit; /* main loop.*/ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (rate_limit) { targ->tic = timespec_add(targ->tic, (struct timespec){2,0}); targ->tic.tv_nsec = 0; wait_time(targ->tic); nexttime = targ->tic; } if (targ->g->dev_type == DEV_TAP) { D("writing to file desc %d", targ->g->main_fd); for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (write(targ->g->main_fd, frame, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } #ifndef NO_PCAP } else if (targ->g->dev_type == DEV_PCAP) { pcap_t *p = targ->g->p; for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (pcap_inject(p, frame, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } #endif /* NO_PCAP */ } else { int tosend = 0; int frags = targ->g->frags; nifp = targ->nmd->nifp; while (!targ->cancel && (n == 0 || sent < n)) { if (rate_limit && tosend <= 0) { tosend = targ->g->burst; nexttime = timespec_add(nexttime, targ->g->tx_period); wait_time(nexttime); } /* * wait for available room in the send queue(s) */ if (poll(&pfd, 1, 2000) <= 0) { if (targ->cancel) break; D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); // goto quit; } if (pfd.revents & POLLERR) { D("poll error"); goto quit; } /* * scan our queues and send on those with room */ if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) { D("drop copy"); options &= ~OPT_COPY; } for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) { int m, limit = rate_limit ? tosend : targ->g->burst; if (n > 0 && n - sent < limit) limit = n - sent; txring = NETMAP_TXRING(nifp, i); if (nm_ring_empty(txring)) continue; if (frags > 1) limit = ((limit + frags - 1) / frags) * frags; m = send_packets(txring, pkt, frame, size, targ->g, limit, options, frags); ND("limit %d tail %d frags %d m %d", limit, txring->tail, frags, m); sent += m; targ->count = sent; if (rate_limit) { tosend -= m; if (tosend <= 0) break; } } } /* flush any remaining packets */ D("flush tail %d head %d on thread %p", txring->tail, txring->head, pthread_self()); ioctl(pfd.fd, NIOCTXSYNC, NULL); /* final part: wait all the TX queues to be empty. */ for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) { txring = NETMAP_TXRING(nifp, i); while (nm_tx_pending(txring)) { RD(5, "pending tx tail %d head %d on ring %d", txring->tail, txring->head, i); ioctl(pfd.fd, NIOCTXSYNC, NULL); usleep(1); /* wait 1 tick */ } } } /* end DEV_NETMAP */ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->completed = 1; targ->count = sent; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); } #ifndef NO_PCAP static void receive_pcap(u_char *user, const struct pcap_pkthdr * h, const u_char * bytes) { int *count = (int *)user; (void)h; /* UNUSED */ (void)bytes; /* UNUSED */ (*count)++; } #endif /* !NO_PCAP */ static int receive_packets(struct netmap_ring *ring, u_int limit, int dump) { u_int cur, rx, n; cur = ring->cur; n = nm_ring_space(ring); if (n < limit) limit = n; for (rx = 0; rx < limit; rx++) { struct netmap_slot *slot = &ring->slot[cur]; char *p = NETMAP_BUF(ring, slot->buf_idx); if (dump) dump_payload(p, slot->len, ring, cur); cur = nm_ring_next(ring, cur); } ring->head = ring->cur = cur; return (rx); } static void * receiver_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLIN }; struct netmap_if *nifp; struct netmap_ring *rxring; int i; uint64_t received = 0; if (setaffinity(targ->thread, targ->affinity)) goto quit; D("reading from %s fd %d main_fd %d", targ->g->ifname, targ->fd, targ->g->main_fd); /* unbounded wait for the first packet. */ for (;!targ->cancel;) { i = poll(&pfd, 1, 1000); if (i > 0 && !(pfd.revents & POLLERR)) break; RD(1, "waiting for initial packets, poll returns %d %d", i, pfd.revents); } /* main loop, exit after 1s silence */ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (targ->g->dev_type == DEV_TAP) { while (!targ->cancel) { char buf[MAX_BODYSIZE]; /* XXX should we poll ? */ if (read(targ->g->main_fd, buf, sizeof(buf)) > 0) targ->count++; } #ifndef NO_PCAP } else if (targ->g->dev_type == DEV_PCAP) { while (!targ->cancel) { /* XXX should we poll ? */ pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap, (u_char *)&targ->count); } #endif /* !NO_PCAP */ } else { int dump = targ->g->options & OPT_DUMP; nifp = targ->nmd->nifp; while (!targ->cancel) { /* Once we started to receive packets, wait at most 1 seconds before quitting. */ if (poll(&pfd, 1, 1 * 1000) <= 0 && !targ->g->forever) { clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->toc.tv_sec -= 1; /* Subtract timeout time. */ goto out; } if (pfd.revents & POLLERR) { D("poll err"); goto quit; } for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) { int m; rxring = NETMAP_RXRING(nifp, i); if (nm_ring_empty(rxring)) continue; m = receive_packets(rxring, targ->g->burst, dump); received += m; } targ->count = received; } } clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); out: targ->completed = 1; targ->count = received; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); } /* very crude code to print a number in normalized form. * Caller has to make sure that the buffer is large enough. */ static const char * norm(char *buf, double val) { char *units[] = { "", "K", "M", "G", "T" }; u_int i; for (i = 0; val >=1000 && i < sizeof(units)/sizeof(char *) - 1; i++) val /= 1000; sprintf(buf, "%.2f %s", val, units[i]); return buf; } static void tx_output(uint64_t sent, int size, double delta) { double bw, raw_bw, pps; char b1[40], b2[80], b3[80]; printf("Sent %llu packets, %d bytes each, in %.2f seconds.\n", (unsigned long long)sent, size, delta); if (delta == 0) delta = 1e-6; if (size < 60) /* correct for min packet size */ size = 60; pps = sent / delta; bw = (8.0 * size * sent) / delta; /* raw packets have4 bytes crc + 20 bytes framing */ raw_bw = (8.0 * (size + 24) * sent) / delta; printf("Speed: %spps Bandwidth: %sbps (raw %sbps)\n", norm(b1, pps), norm(b2, bw), norm(b3, raw_bw) ); } static void rx_output(uint64_t received, double delta) { double pps; char b1[40]; printf("Received %llu packets, in %.2f seconds.\n", (unsigned long long) received, delta); if (delta == 0) delta = 1e-6; pps = received / delta; printf("Speed: %spps\n", norm(b1, pps)); }
int main (int argc, char *argv[]) { char *threads_spec; pthread_t ebbr, egll, zzzz; struct spec b, l, p, m; char *some_mem __attribute__((unused)) = malloc(100); if (argc > 5 && atoi(argv[5])) setaffinity(); setup_sigusr_handler(); if (argc > 1) loops = atoi(argv[1]); if (argc > 2) sleepms = atoi(argv[2]); if (argc > 3) burn = atoll(argv[3]); if (argc > 4) threads_spec = argv[4]; else threads_spec = "BSBSBSBS"; fprintf(stderr, "loops/sleep_ms/burn/threads_spec/affinity: %d %d %d %s %d\n", loops, sleepms, burn, threads_spec, argc > 5 && atoi(argv[5])); fflush(stderr); b.name = "Brussels"; b.burn = *threads_spec++ == 'B'; b.sleep = *threads_spec++ == 'S'; b.t = -1; if (b.burn || b.sleep) { b.t = 1; pthread_create(&ebbr, NULL, sleeper_or_burner, &b); wait_ready(); } l.name = "London"; l.burn = *threads_spec++ == 'B'; l.sleep = *threads_spec++ == 'S'; l.t = -1; if (l.burn || l.sleep) { l.t = 2; pthread_create(&egll, NULL, sleeper_or_burner, &l); wait_ready(); } p.name = "Petaouchnok"; p.burn = *threads_spec++ == 'B'; p.sleep = *threads_spec++ == 'S'; p.t = -1; if (p.burn || p.sleep) { p.t = 3; pthread_create(&zzzz, NULL, sleeper_or_burner, &p); wait_ready(); } m.name = "main"; m.burn = *threads_spec++ == 'B'; m.sleep = *threads_spec++ == 'S'; m.t = 0; sleeper_or_burner(&m); if (b.t != -1) pthread_join(ebbr, NULL); if (l.t != -1) pthread_join(egll, NULL); if (p.t != -1) pthread_join(zzzz, NULL); return 0; }
void setaffinity(uint64_t const proc) { setaffinity(std::vector<uint64_t>(1,proc)); }
static __inline struct timespec timeval2spec(const struct timeval *a) { struct timespec ts = { .tv_sec = a->tv_sec, .tv_nsec = a->tv_usec * 1000 }; return ts; } static __inline struct timeval timespec2val(const struct timespec *a) { struct timeval tv = { .tv_sec = a->tv_sec, .tv_usec = a->tv_nsec / 1000 }; return tv; } static __inline struct timespec timespec_add(struct timespec a, struct timespec b) { struct timespec ret = { a.tv_sec + b.tv_sec, a.tv_nsec + b.tv_nsec }; if (ret.tv_nsec >= 1000000000) { ret.tv_sec++; ret.tv_nsec -= 1000000000; } return ret; } static __inline struct timespec timespec_sub(struct timespec a, struct timespec b) { struct timespec ret = { a.tv_sec - b.tv_sec, a.tv_nsec - b.tv_nsec }; if (ret.tv_nsec < 0) { ret.tv_sec--; ret.tv_nsec += 1000000000; } return ret; } /* * wait until ts, either busy or sleeping if more than 1ms. * Return wakeup time. */ static struct timespec wait_time(struct timespec ts) { for (;;) { struct timespec w, cur; clock_gettime(CLOCK_REALTIME_PRECISE, &cur); w = timespec_sub(ts, cur); if (w.tv_sec < 0) return cur; else if (w.tv_sec > 0 || w.tv_nsec > 1000000) poll(NULL, 0, 1); } } static void * sender_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *txring; int i, n = targ->g->npackets / targ->g->nthreads, sent = 0; int options = targ->g->options | OPT_COPY; struct timespec nexttime = { 0, 0}; // XXX silence compiler int rate_limit = targ->g->tx_rate; struct pkt *pkt = &targ->pkt; void *frame; int size; frame = pkt; frame += sizeof(pkt->vh) - targ->g->virt_header; size = targ->g->pkt_size + targ->g->virt_header; D("start"); if (setaffinity(targ->thread, targ->affinity)) goto quit; /* setup poll(2) mechanism. */ memset(fds, 0, sizeof(fds)); fds[0].fd = targ->fd; fds[0].events = (POLLOUT); /* main loop.*/ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (rate_limit) { targ->tic = timespec_add(targ->tic, (struct timespec){2,0}); targ->tic.tv_nsec = 0; wait_time(targ->tic); nexttime = targ->tic; } if (targ->g->dev_type == DEV_PCAP) { pcap_t *p = targ->g->p; for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (pcap_inject(p, frame, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } } else if (targ->g->dev_type == DEV_TAP) { /* tap */ D("writing to file desc %d", targ->g->main_fd); for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (write(targ->g->main_fd, frame, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } } else { int tosend = 0; int frags = targ->g->frags; while (!targ->cancel && (n == 0 || sent < n)) { if (rate_limit && tosend <= 0) { tosend = targ->g->burst; nexttime = timespec_add(nexttime, targ->g->tx_period); wait_time(nexttime); } /* * wait for available room in the send queue(s) */ if (poll(fds, 1, 2000) <= 0) { if (targ->cancel) break; D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); goto quit; } if (fds[0].revents & POLLERR) { D("poll error"); goto quit; } /* * scan our queues and send on those with room */ if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) { D("drop copy"); options &= ~OPT_COPY; } for (i = targ->qfirst; i < targ->qlast; i++) { int m, limit = rate_limit ? tosend : targ->g->burst; if (n > 0 && n - sent < limit) limit = n - sent; txring = NETMAP_TXRING(nifp, i); if (nm_ring_empty(txring)) continue; if (frags > 1) limit = ((limit + frags - 1) / frags) * frags; m = send_packets(txring, pkt, frame, size, targ->g, limit, options, frags); ND("limit %d avail %d frags %d m %d", limit, txring->avail, frags, m); sent += m; targ->count = sent; if (rate_limit) { tosend -= m; if (tosend <= 0) break; } } } /* flush any remaining packets */ ioctl(fds[0].fd, NIOCTXSYNC, NULL); /* final part: wait all the TX queues to be empty. */ for (i = targ->qfirst; i < targ->qlast; i++) { txring = NETMAP_TXRING(nifp, i); while (nm_tx_pending(txring)) { ioctl(fds[0].fd, NIOCTXSYNC, NULL); usleep(1); /* wait 1 tick */ } } } clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->completed = 1; targ->count = sent; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); } static void receive_pcap(u_char *user, const struct pcap_pkthdr * h, const u_char * bytes) { int *count = (int *)user; (void)h; /* UNUSED */ (void)bytes; /* UNUSED */ (*count)++; }
static void * receiver_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *rxring; int i; uint64_t received = 0; if (setaffinity(targ->thread, targ->affinity)) goto quit; /* setup poll(2) mechanism. */ memset(fds, 0, sizeof(fds)); fds[0].fd = targ->fd; fds[0].events = (POLLIN); /* unbounded wait for the first packet. */ for (;;) { i = poll(fds, 1, 1000); if (i > 0 && !(fds[0].revents & POLLERR)) break; RD(1, "waiting for initial packets, poll returns %d %d", i, fds[0].revents); } /* main loop, exit after 1s silence */ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (targ->g->dev_type == DEV_PCAP) { while (!targ->cancel) { /* XXX should we poll ? */ pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap, NULL); } } else if (targ->g->dev_type == DEV_TAP) { D("reading from %s fd %d", targ->g->ifname, targ->g->main_fd); while (!targ->cancel) { char buf[2048]; /* XXX should we poll ? */ if (read(targ->g->main_fd, buf, sizeof(buf)) > 0) targ->count++; } } else { int dump = targ->g->options & OPT_DUMP; while (!targ->cancel) { /* Once we started to receive packets, wait at most 1 seconds before quitting. */ if (poll(fds, 1, 1 * 1000) <= 0 && !targ->g->forever) { clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->toc.tv_sec -= 1; /* Subtract timeout time. */ break; } if (fds[0].revents & POLLERR) { D("poll err"); goto quit; } for (i = targ->qfirst; i < targ->qlast; i++) { int m; rxring = NETMAP_RXRING(nifp, i); if (nm_ring_empty(rxring)) continue; m = receive_packets(rxring, targ->g->burst, dump); received += m; } targ->count = received; // tell the card we have read the data //ioctl(fds[0].fd, NIOCRXSYNC, NULL); } } targ->completed = 1; targ->count = received; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); }
static void * receiver_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *rxring; int i, received = 0; if (setaffinity(targ->thread, targ->affinity)) goto quit; /* setup poll(2) mechanism. */ memset(fds, 0, sizeof(fds)); fds[0].fd = targ->fd; fds[0].events = (POLLIN); /* unbounded wait for the first packet. */ for (;;) { i = poll(fds, 1, 1000); if (i > 0 && !(fds[0].revents & POLLERR)) break; D("waiting for initial packets, poll returns %d %d", i, fds[0].revents); } /* main loop, exit after 1s silence */ gettimeofday(&targ->tic, NULL); if (targ->g->use_pcap) { for (;;) { /* XXX should we poll ? */ pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap, NULL); } } else { while (1) { /* Once we started to receive packets, wait at most 1 seconds before quitting. */ if (poll(fds, 1, 1 * 1000) <= 0) { gettimeofday(&targ->toc, NULL); targ->toc.tv_sec -= 1; /* Subtract timeout time. */ break; } for (i = targ->qfirst; i < targ->qlast; i++) { int m; rxring = NETMAP_RXRING(nifp, i); if (rxring->avail == 0) continue; m = receive_packets(rxring, targ->g->burst, SKIP_PAYLOAD); received += m; targ->count = received; } // tell the card we have read the data //ioctl(fds[0].fd, NIOCRXSYNC, NULL); } } targ->completed = 1; targ->count = received; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); }
static void * sender_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *txring; int i, n = targ->g->npackets / targ->g->nthreads, sent = 0; int options = targ->g->options | OPT_COPY; D("start"); if (setaffinity(targ->thread, targ->affinity)) goto quit; /* setup poll(2) mechanism. */ memset(fds, 0, sizeof(fds)); fds[0].fd = targ->fd; fds[0].events = (POLLOUT); /* main loop.*/ gettimeofday(&targ->tic, NULL); if (targ->g->use_pcap) { int size = targ->g->pkt_size; void *pkt = &targ->pkt; pcap_t *p = targ->g->p; for (i = 0; n == 0 || sent < n; i++) { if (pcap_inject(p, pkt, size) != -1) sent++; if (i > 10000) { targ->count = sent; i = 0; } } } else { while (n == 0 || sent < n) { /* * wait for available room in the send queue(s) */ if (poll(fds, 1, 2000) <= 0) { D("poll error/timeout on queue %d", targ->me); goto quit; } /* * scan our queues and send on those with room */ if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) { D("drop copy"); options &= ~OPT_COPY; } for (i = targ->qfirst; i < targ->qlast; i++) { int m, limit = targ->g->burst; if (n > 0 && n - sent < limit) limit = n - sent; txring = NETMAP_TXRING(nifp, i); if (txring->avail == 0) continue; m = send_packets(txring, &targ->pkt, targ->g->pkt_size, limit, options); sent += m; targ->count = sent; } } /* flush any remaining packets */ ioctl(fds[0].fd, NIOCTXSYNC, NULL); /* final part: wait all the TX queues to be empty. */ for (i = targ->qfirst; i < targ->qlast; i++) { txring = NETMAP_TXRING(nifp, i); while (!NETMAP_TX_RING_EMPTY(txring)) { ioctl(fds[0].fd, NIOCTXSYNC, NULL); usleep(1); /* wait 1 tick */ } } } gettimeofday(&targ->toc, NULL); targ->completed = 1; targ->count = sent; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); }