static void lcw_dump_stack(struct lc_watchdog *lcw) { cfs_time_t current_time; cfs_duration_t delta_time; struct timeval timediff; current_time = cfs_time_current(); delta_time = cfs_time_sub(current_time, lcw->lcw_last_touched); cfs_duration_usec(delta_time, &timediff); /* * Check to see if we should throttle the watchdog timer to avoid * too many dumps going to the console thus triggering an NMI. */ delta_time = cfs_duration_sec(cfs_time_sub(current_time, lcw_last_watchdog_time)); if (delta_time < libcfs_watchdog_ratelimit && lcw_recent_watchdog_count > 3) { LCONSOLE_WARN("Service thread pid %u was inactive for " "%lu.%.02lus. Watchdog stack traces are limited " "to 3 per %d seconds, skipping this one.\n", (int)lcw->lcw_pid, timediff.tv_sec, timediff.tv_usec / 10000, libcfs_watchdog_ratelimit); } else { if (delta_time < libcfs_watchdog_ratelimit) { lcw_recent_watchdog_count++; } else { memcpy(&lcw_last_watchdog_time, ¤t_time, sizeof(current_time)); lcw_recent_watchdog_count = 0; } LCONSOLE_WARN("Service thread pid %u was inactive for " "%lu.%.02lus. The thread might be hung, or it " "might only be slow and will resume later. " "Dumping the stack trace for debugging purposes:" "\n", (int)lcw->lcw_pid, timediff.tv_sec, timediff.tv_usec / 10000); lcw_dump(lcw); } }
int libcfs_sock_write (cfs_socket_t *sock, void *buffer, int nob, int timeout) { int rc; struct pollfd pfd; cfs_time_t start_time = cfs_time_current(); pfd.fd = sock->s_fd; pfd.events = POLLOUT; pfd.revents = 0; /* poll(2) measures timeout in msec */ timeout *= 1000; while (nob != 0 && timeout > 0) { cfs_time_t current_time; rc = poll(&pfd, 1, timeout); if (rc < 0) return -errno; if (rc == 0) return -ETIMEDOUT; if ((pfd.revents & POLLOUT) == 0) return -EIO; rc = write(sock->s_fd, buffer, nob); if (rc < 0) return -errno; if (rc == 0) return -EIO; buffer = ((char *)buffer) + rc; nob -= rc; current_time = cfs_time_current(); timeout -= 1000 * cfs_duration_sec(cfs_time_sub(current_time, start_time)); start_time = current_time; } if (nob == 0) return 0; else return -ETIMEDOUT; }
ssize_t libcfs_sock_read(struct lnet_xport *lx, void *buffer, size_t nob, int timeout) { ssize_t rc; struct pollfd pfd; cfs_time_t start_time = cfs_time_current(); pfd.fd = lx->lx_fd; pfd.events = POLLIN; pfd.revents = 0; /* poll(2) measures timeout in msec */ timeout *= 1000; while (nob != 0 && timeout > 0) { rc = poll(&pfd, 1, timeout); if (rc < 0) return -errno; if (rc == 0) return -ETIMEDOUT; if ((pfd.revents & POLLIN) == 0) return -EIO; rc = read(lx->lx_fd, buffer, nob); if (rc < 0) return -errno; if (rc == 0) return -EIO; buffer = ((char *)buffer) + rc; nob -= rc; timeout -= cfs_duration_sec(cfs_time_sub(cfs_time_current(), start_time)); } if (nob == 0) return 0; else return -ETIMEDOUT; }
ssize_t libcfs_ssl_sock_read(struct lnet_xport *lx, void *buf, size_t n, int timeout) { cfs_time_t start_time = cfs_time_current(); struct pollfd pfd; ssize_t rc; pfd.fd = lx->lx_fd; pfd.events = POLLIN; pfd.revents = 0; /* poll(2) measures timeout in msec */ timeout *= 1000; while (n != 0 && timeout > 0) { rc = poll(&pfd, 1, timeout); if (rc < 0) return (-errno); if (rc == 0) return (-ETIMEDOUT); if ((pfd.revents & POLLIN) == 0) return (-EIO); rc = SSL_read(lx->lx_ssl, buf, n); if (rc < 0) return (-errno); if (rc == 0) return (-EIO); buf = (char *)buf + rc; n -= rc; timeout -= cfs_duration_sec(cfs_time_sub( cfs_time_current(), start_time)); } if (n == 0) return (0); return (-ETIMEDOUT); }
static void lcw_update_time(struct lc_watchdog *lcw, const char *message) { cfs_time_t newtime = cfs_time_current();; if (lcw->lcw_state == LC_WATCHDOG_EXPIRED) { struct timeval timediff; cfs_time_t delta_time = cfs_time_sub(newtime, lcw->lcw_last_touched); cfs_duration_usec(delta_time, &timediff); LCONSOLE_WARN("Service thread pid %u %s after %lu.%.02lus. " "This indicates the system was overloaded (too " "many service threads, or there were not enough " "hardware resources).\n", lcw->lcw_pid, message, timediff.tv_sec, timediff.tv_usec / 10000); } lcw->lcw_last_touched = newtime; }
inline cfs_time_t round_timeout(cfs_time_t timeout) { return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1); }
static inline cfs_time_t capa_renewal_time(struct obd_capa *ocapa) { return cfs_time_sub(ocapa->c_expiry, cfs_time_seconds(ocapa->c_capa.lc_timeout) / 2); }
int LNetEQPoll (lnet_handle_eq_t *eventqs, int neq, int timeout_ms, lnet_event_t *event, int *which) { int i; int rc; #ifdef __KERNEL__ cfs_waitlink_t wl; cfs_time_t now; #else struct timeval then; struct timeval now; # ifdef HAVE_LIBPTHREAD struct timespec ts; # endif lnet_ni_t *eqwaitni = the_lnet.ln_eqwaitni; #endif ENTRY; LASSERT (the_lnet.ln_init); LASSERT (the_lnet.ln_refcount > 0); if (neq < 1) RETURN(-ENOENT); LNET_LOCK(); for (;;) { #ifndef __KERNEL__ LNET_UNLOCK(); /* Recursion breaker */ if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING && !LNetHandleIsEqual(eventqs[0], the_lnet.ln_rc_eqh)) lnet_router_checker(); LNET_LOCK(); #endif for (i = 0; i < neq; i++) { lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]); if (eq == NULL) { LNET_UNLOCK(); RETURN(-ENOENT); } rc = lib_get_event (eq, event); if (rc != 0) { LNET_UNLOCK(); *which = i; RETURN(rc); } } #ifdef __KERNEL__ if (timeout_ms == 0) { LNET_UNLOCK(); RETURN (0); } cfs_waitlink_init(&wl); set_current_state(TASK_INTERRUPTIBLE); cfs_waitq_add(&the_lnet.ln_waitq, &wl); LNET_UNLOCK(); if (timeout_ms < 0) { cfs_waitq_wait (&wl, CFS_TASK_INTERRUPTIBLE); } else { struct timeval tv; now = cfs_time_current(); cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE, cfs_time_seconds(timeout_ms)/1000); cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv); timeout_ms -= tv.tv_sec * 1000 + tv.tv_usec / 1000; if (timeout_ms < 0) timeout_ms = 0; } LNET_LOCK(); cfs_waitq_del(&the_lnet.ln_waitq, &wl); #else if (eqwaitni != NULL) { /* I have a single NI that I have to call into, to get * events queued, or to block. */ lnet_ni_addref_locked(eqwaitni); LNET_UNLOCK(); if (timeout_ms <= 0) { (eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms); } else { gettimeofday(&then, NULL); (eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms); gettimeofday(&now, NULL); timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 + (now.tv_usec - then.tv_usec) / 1000; if (timeout_ms < 0) timeout_ms = 0; } LNET_LOCK(); lnet_ni_decref_locked(eqwaitni); /* don't call into eqwaitni again if timeout has * expired */ if (timeout_ms == 0) eqwaitni = NULL; continue; /* go back and check for events */ } if (timeout_ms == 0) { LNET_UNLOCK(); RETURN (0); } # ifndef HAVE_LIBPTHREAD /* If I'm single-threaded, LNET fails at startup if it can't * set the_lnet.ln_eqwaitni correctly. */ LBUG(); # else if (timeout_ms < 0) { pthread_cond_wait(&the_lnet.ln_cond, &the_lnet.ln_lock); } else { gettimeofday(&then, NULL); ts.tv_sec = then.tv_sec + timeout_ms/1000; ts.tv_nsec = then.tv_usec * 1000 + (timeout_ms%1000) * 1000000; if (ts.tv_nsec >= 1000000000) { ts.tv_sec++; ts.tv_nsec -= 1000000000; } pthread_cond_timedwait(&the_lnet.ln_cond, &the_lnet.ln_lock, &ts); gettimeofday(&now, NULL); timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 + (now.tv_usec - then.tv_usec) / 1000; if (timeout_ms < 0) timeout_ms = 0; } # endif #endif } }