int count_syscall(struct tcb *tcp, struct timeval *tv) { tcp->flags &= ~TCB_INSYSCALL; if (tcp->scno < 0 || tcp->scno >= nsyscalls) return 0; if (!counts) { counts = calloc(nsyscalls, sizeof(*counts)); if (!counts) { fprintf(stderr, "strace: out of memory for call counts\n"); exit(1); } } counts[tcp->scno].calls++; if (tcp->u_error) counts[tcp->scno].errors++; tv_sub(tv, tv, &tcp->etime); #ifdef LINUX if (tv_cmp(tv, &tcp->dtime) > 0) { static struct timeval one_tick; if (one_tick.tv_usec == 0) { /* Initialize it. */ struct itimerval it; memset(&it, 0, sizeof it); it.it_interval.tv_usec = 1; setitimer(ITIMER_REAL, &it, NULL); getitimer(ITIMER_REAL, &it); one_tick = it.it_interval; } if (tv_nz(&tcp->dtime)) *tv = tcp->dtime; else if (tv_cmp(tv, &one_tick) > 0) { if (tv_cmp(&shortest, &one_tick) < 0) *tv = shortest; else *tv = one_tick; } } #endif /* LINUX */ if (tv_cmp(tv, &shortest) < 0) shortest = *tv; tv_add(&counts[tcp->scno].time, &counts[tcp->scno].time, tv); return 0; }
void count_syscall(struct tcb *tcp, const struct timeval *syscall_exiting_tv) { struct timeval wtv; struct timeval *tv = &wtv; struct call_counts *cc; unsigned long scno = tcp->scno; if (!SCNO_IN_RANGE(scno)) return; if (!counts) counts = xcalloc(nsyscalls, sizeof(*counts)); cc = &counts[scno]; cc->calls++; if (tcp->u_error) cc->errors++; /* tv = wall clock time spent while in syscall */ tv_sub(tv, syscall_exiting_tv, &tcp->etime); /* Spent more wall clock time than spent system time? (usually yes) */ if (tv_cmp(tv, &tcp->dtime) > 0) { static struct timeval one_tick = { -1, 0 }; if (one_tick.tv_sec == -1) { /* Initialize it. */ struct itimerval it; memset(&it, 0, sizeof it); it.it_interval.tv_usec = 1; setitimer(ITIMER_REAL, &it, NULL); getitimer(ITIMER_REAL, &it); one_tick = it.it_interval; //FIXME: this hack doesn't work (tested on linux-3.6.11): one_tick = 0.000000 //tprintf(" one_tick.tv_usec:%u\n", (unsigned)one_tick.tv_usec); } if (tv_nz(&tcp->dtime)) /* tv = system time spent, if it isn't 0 */ tv = &tcp->dtime; else if (tv_cmp(tv, &one_tick) > 0) { /* tv = smallest "sane" time interval */ if (tv_cmp(&shortest, &one_tick) < 0) tv = &shortest; else tv = &one_tick; } } if (tv_cmp(tv, &shortest) < 0) shortest = *tv; tv_add(&cc->time, &cc->time, count_wallclock ? &wtv : tv); }
int has_aged(struct pkt_list_entry *new_pkt, struct pkt_list_entry *old_pkt, struct timeval max_age) { struct timeval diff; diff = tv_absdiff(new_pkt->pkt.timestamp, old_pkt->pkt.timestamp); return (0 < tv_cmp(diff, max_age)); }
void count_syscall(struct tcb *tcp, struct timeval *tv) { if (!SCNO_IN_RANGE(tcp->scno)) return; if (!counts) { counts = calloc(nsyscalls, sizeof(*counts)); if (!counts) die_out_of_memory(); } counts[tcp->scno].calls++; if (tcp->u_error) counts[tcp->scno].errors++; tv_sub(tv, tv, &tcp->etime); if (tv_cmp(tv, &tcp->dtime) > 0) { static struct timeval one_tick; if (one_tick.tv_usec == 0) { /* Initialize it. */ struct itimerval it; memset(&it, 0, sizeof it); it.it_interval.tv_usec = 1; setitimer(ITIMER_REAL, &it, NULL); getitimer(ITIMER_REAL, &it); one_tick = it.it_interval; } if (tv_nz(&tcp->dtime)) *tv = tcp->dtime; else if (tv_cmp(tv, &one_tick) > 0) { if (tv_cmp(&shortest, &one_tick) < 0) *tv = shortest; else *tv = one_tick; } } if (tv_cmp(tv, &shortest) < 0) shortest = *tv; tv_add(&counts[tcp->scno].time, &counts[tcp->scno].time, tv); }
timeout_t tmout_create (const struct timeval *value) { timeout_t new_handle; if (tv_cmp (value, &time_0ms) <= 0) return -1; new_handle = dtable_add ((void **)&table_, &table_len_, &table_used_, sizeof(struct timeout), (use_checker_t)is_used); if (new_handle != -1) tmout_set (new_handle, value); return new_handle; }
void tv_min (struct timeval *result, const struct timeval *tv_1, const struct timeval *tv_2) { assert (result != NULL); assert (tv_1 != NULL); assert (tv_2 != NULL); if (tv_cmp (tv_1, tv_2) < 0) { if (result != tv_1) memcpy (result, tv_1, sizeof(struct timeval)); } else { if (result != tv_2) memcpy (result, tv_2, sizeof(struct timeval)); } }
bool tmout_left (timeout_t handle, struct timeval *result) { struct timeval elapsed; struct timeval left; struct timeout *tmout; assert (is_valid_handle (handle)); tmout = &(table_[handle]); crono_measure (&(tmout->to_crono), &elapsed); tv_diff (&left, &(tmout->to_maxval), &elapsed); if (result) memcpy (result, &left, sizeof(*result)); if (tv_cmp (&left, &time_0ms) <= 0) return FALSE; return TRUE; }
static int time_cmp(void *a, void *b) { return -tv_cmp(&counts[*((int *) a)].time, &counts[*((int *) b)].time); }
int32 EventLoop::run() { SDK_LOG(LOG_LEVEL_TRACE, "eventloop run"); const struct timeval c_tvmax = { LONG_MAX, LONG_MAX }; struct timeval tv; struct timeval* ptv; fd_set fds; while (m_run) { tv = c_tvmax; processTimers(tv); ptv = (tv_cmp(c_tvmax, tv) == 0) ? NULL : &tv; FD_ZERO(&fds); SOCKET maxfd = m_ctlfdr; FD_SET(m_ctlfdr, &fds); for (CliConnMap::iterator it = m_conns.begin(); it != m_conns.end();it++) { CliConn* pcon = it->second; if (pcon && pcon->getfd() != INVALID_SOCKET) { FD_SET(pcon->getfd(), &fds); maxfd = (maxfd >= pcon->getfd()) ? maxfd:pcon->getfd(); } } maxfd++; //SDK_LOG(LOG_LEVEL_TRACE, "select time out = %s", itostr(tv.tv_sec).c_str()); int32 ret = select(maxfd, &fds, NULL, NULL, ptv); if (ret < 0) { if (ret != /*SOCK_EINTR*/4) { SDK_LOG(LOG_LEVEL_TRACE, "select error %d", ret); return -1; } } else if (ret == 0) { //time out } else { if (FD_ISSET(m_ctlfdr, &fds)) { processOps(); } std::vector<std::string> errconns; for (CliConnMap::iterator it = m_conns.begin(); it != m_conns.end(); it++) { CliConn* pcon = it->second; if (pcon && FD_ISSET(pcon->getfd(), &fds)) { if (pcon->handleRead() < 0) { pcon->onDisconnect(true, MY_NETWORK_ERROR); errconns.push_back(pcon->getCid()); } } } for (std::vector<std::string>::iterator it = errconns.begin(); it != errconns.end(); ++it) { delConn(*it); } } } //onStopAndWait(); SDK_LOG(LOG_LEVEL_TRACE, "eventloop exit"); return 0; }
static int __event_dispatch(void) { fd_set r, w; int nfd; struct event *ev; struct timeval now, timeout, t; FD_ZERO(&r); FD_ZERO(&w); nfd = 0; gettimeofday(&now, NULL); timeout.tv_sec = 10; /* arbitrary */ timeout.tv_usec = 0; TAILQ_INIT(¤t); /* * Build fd_set's */ event_log_debug("%s: building fd set...", __func__); while (!TAILQ_EMPTY(&pending)) { ev = TAILQ_FIRST(&pending); event_del(ev); if (ev->flags & EV_HAS_TIMEOUT) { if (tv_cmp(&now, &ev->expire) >= 0) t.tv_sec = t.tv_usec = 0; else { t = ev->expire; tv_sub(&t, &now); } if (tv_cmp(&t, &timeout) < 0) timeout = t; } if (ev->fd >= 0) { if (ev->flags & EV_READ) { FD_SET(ev->fd, &r); nfd = (nfd > ev->fd) ? nfd : ev->fd; } if (ev->flags & EV_WRITE) { FD_SET(ev->fd, &w); nfd = (nfd > ev->fd) ? nfd : ev->fd; } } __event_add_current(ev); } event_log_debug("%s: waiting for events...", __func__); nfd = select(nfd + 1, &r, &w, NULL, &timeout); if (nfd < 0) return (-1); /* * Process current pending */ event_log_debug("%s: processing events...", __func__); gettimeofday(&now, NULL); while (!TAILQ_EMPTY(¤t)) { ev = TAILQ_FIRST(¤t); __event_del_current(ev); /* check if fd is ready for reading/writing */ if (nfd > 0 && ev->fd >= 0) { if (FD_ISSET(ev->fd, &r) || FD_ISSET(ev->fd, &w)) { if (ev->flags & EV_PERSIST) { if (ev->flags & EV_HAS_TIMEOUT) event_add(ev, &ev->timeout); else event_add(ev, NULL); } nfd --; event_log_debug("%s: calling %p(%d, %p), " \ "ev=%p", __func__, ev->cb, ev->fd, ev->cbarg, ev); (ev->cb)(ev->fd, (ev->flags & (EV_READ|EV_WRITE)), ev->cbarg); continue; } } /* if event has no timeout - just requeue */ if ((ev->flags & EV_HAS_TIMEOUT) == 0) { event_add(ev, NULL); continue; } /* check if event has expired */ if (tv_cmp(&now, &ev->expire) >= 0) { if (ev->flags & EV_PERSIST) event_add(ev, &ev->timeout); event_log_debug("%s: calling %p(%d, %p), ev=%p", __func__, ev->cb, ev->fd, ev->cbarg, ev); (ev->cb)(ev->fd, (ev->flags & (EV_READ|EV_WRITE)), ev->cbarg); continue; } assert((ev->flags & (EV_PENDING|EV_CURRENT)) == 0); __event_link(ev); } return (0); }