static BOOL FKG485_IsReaderDelayed(FKG485_DEVICE_ST *_device, unsigned long *wait_ms)
{
  struct timeval now;
  
	if (_device == NULL)
		return FALSE;
  
  gettimeofday(&now, NULL);
  if (compare_timeval(&now, &_device->_keep_cool_until) < 0)
  {
    if (wait_ms != NULL)
    {
      unsigned long diff_us;
      diff_us  = _device->_keep_cool_until.tv_sec - now.tv_sec;
      diff_us *= 1000000;
      diff_us += _device->_keep_cool_until.tv_usec - now.tv_usec;
      
      diff_us /= 1000;
    
      /* Only decrease wait_ms, never increase it (to avoid sleeping too much) */
      if ((*wait_ms == 0) || (diff_us < *wait_ms))
        *wait_ms = diff_us;
    }
    
    return TRUE;
  }
    
  return FALSE;
}
int
noit_check_schedule_next(noit_module_t *self,
                         struct timeval *last_check, noit_check_t *check,
                         struct timeval *now, dispatch_func_t dispatch,
                         noit_check_t *cause) {
  eventer_t newe;
  struct timeval period, earliest;
  recur_closure_t *rcl;

  assert(cause == NULL);
  assert(check->fire_event == NULL);
  if(check->period == 0) return 0;
  if(NOIT_CHECK_DISABLED(check) || NOIT_CHECK_KILLED(check)) {
    if(!(check->flags & NP_TRANSIENT)) check_slots_dec_tv(last_check);
    return 0;
  }

  /* If we have an event, we know when we intended it to fire.  This means
   * we should schedule that point + period.
   */
  if(now)
    memcpy(&earliest, now, sizeof(earliest));
  else
    gettimeofday(&earliest, NULL);

  /* If the check is unconfigured and needs resolving, we'll set the
   * period down a bit lower so we can pick up the resolution quickly.
   */
  if(!NOIT_CHECK_RESOLVED(check) && NOIT_CHECK_SHOULD_RESOLVE(check) &&
      check->period > 1000) {
    period.tv_sec = 1;
    period.tv_usec = 0;
  }
  else {
    period.tv_sec = check->period / 1000;
    period.tv_usec = (check->period % 1000) * 1000;
  }

  newe = eventer_alloc();
  memcpy(&newe->whence, last_check, sizeof(*last_check));
  add_timeval(newe->whence, period, &newe->whence);
  if(compare_timeval(newe->whence, earliest) < 0)
    memcpy(&newe->whence, &earliest, sizeof(earliest));
  newe->mask = EVENTER_TIMER;
  newe->callback = noit_check_recur_handler;
  rcl = calloc(1, sizeof(*rcl));
  rcl->self = self;
  rcl->check = check;
  rcl->cause = cause;
  rcl->dispatch = dispatch;
  newe->closure = rcl;

  eventer_add(newe);
  check->fire_event = newe;
  return 0;
}
Example #3
0
/* Perform timeval subtraction
    - "to - from = result"
    - return -1 if 'from' is larget(later) than 'to'
    - return 0 if success */
int subtract_timeval(struct timeval *to, struct timeval *from, struct timeval *result) 
{
    if (compare_timeval(to, from) < 0)
        return -1;
    result->tv_sec = to->tv_sec - from->tv_sec;
    result->tv_usec = to->tv_usec - from->tv_usec;
    if(result->tv_usec < 0)    
		{
        result->tv_sec--;
        result->tv_usec += 1000000;
    }    
    return 0;
}
Example #4
0
static int eventer_ports_impl_loop(int id) {
  struct timeval __dyna_sleep = { 0, 0 };
  struct ports_spec *spec;
  spec = eventer_get_spec_for_event(NULL);

  while(1) {
    struct timeval __sleeptime;
    struct timespec __ports_sleeptime;
    unsigned int fd_cnt = 0;
    int ret;
    port_event_t pevents[MAX_PORT_EVENTS];

    if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0)
      __dyna_sleep = eventer_max_sleeptime;
 
    __sleeptime = __dyna_sleep;

    eventer_dispatch_timed(&__sleeptime);

    if(compare_timeval(__sleeptime, __dyna_sleep) > 0)
      __sleeptime = __dyna_sleep;

    /* Handle cross_thread dispatches */
    eventer_cross_thread_process();

    /* Handle recurrent events */
    eventer_dispatch_recurrent();

    /* Now we move on to our fd-based events */
    __ports_sleeptime.tv_sec = __sleeptime.tv_sec;
    __ports_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000;
    fd_cnt = 1;

    pevents[0].portev_source = 65535; /* This is impossible */

    ret = port_getn(spec->port_fd, pevents, MAX_PORT_EVENTS, &fd_cnt,
                    &__ports_sleeptime);
    spec->wakeup_notify = 0; /* force unlock */
    /* The timeout case is a tad complex with ports.  -1/ETIME is clearly
     * a timeout.  However, it i spossible that we got that and fd_cnt isn't
     * 0, which means we both timed out and got events... WTF?
     */
    if(fd_cnt == 0 ||
       (ret == -1 && errno == ETIME && pevents[0].portev_source == 65535))
      add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep);

    if(ret == -1 && (errno != ETIME && errno != EINTR))
      mtevL(eventer_err, "port_getn: %s\n", strerror(errno));

    if(ret < 0)
      mtevL(eventer_deb, "port_getn: %s\n", strerror(errno));

    mtevL(eventer_deb, "debug: port_getn(%d, [], %d) => %d\n",
          spec->port_fd, fd_cnt, ret);

    if(pevents[0].portev_source == 65535) {
      /* the impossible still remains, which means our fd_cnt _must_ be 0 */
      fd_cnt = 0;
    }

    if(fd_cnt > 0) {
      int idx;
      /* Loop a last time to process */
      __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */
      for(idx = 0; idx < fd_cnt; idx++) {
        port_event_t *pe;
        eventer_t e;
        int fd, mask;

        pe = &pevents[idx];
        if(pe->portev_source != PORT_SOURCE_FD) continue;
        fd = (int)pe->portev_object;
        mtevAssert((intptr_t)pe->portev_user == fd);
        e = master_fds[fd].e;

        /* It's possible that someone removed the event and freed it
         * before we got here.... bail out if we're null.
         */
        if (!e) continue;

        mask = 0;
        if(pe->portev_events & (POLLIN | POLLHUP))
          mask |= EVENTER_READ;
        if(pe->portev_events & (POLLOUT))
          mask |= EVENTER_WRITE;
        if(pe->portev_events & (POLLERR | POLLHUP | POLLNVAL))
          mask |= EVENTER_EXCEPTION;

        eventer_ports_impl_trigger(e, mask);
      }
    }
  }
  /* NOTREACHED */
  return 0;
}
Example #5
0
static int eventer_kqueue_impl_loop() {
  struct timeval __dyna_sleep = { 0, 0 };
  KQUEUE_DECL;
  KQUEUE_SETUP;

  if(!kqs) {
    kqs = calloc(1, sizeof(*kqs));
    kqs_init(kqs);
  }
  pthread_setspecific(kqueue_setup_key, kqs);
  while(1) {
    struct timeval __now, __sleeptime;
    struct timespec __kqueue_sleeptime;
    int fd_cnt = 0;

    if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0)
      __dyna_sleep = eventer_max_sleeptime;

    __sleeptime = __dyna_sleep;

    eventer_dispatch_timed(&__now, &__sleeptime);

    if(compare_timeval(__sleeptime, __dyna_sleep) > 0)
      __sleeptime = __dyna_sleep;

    /* Handle recurrent events */
    eventer_dispatch_recurrent(&__now);

    /* If we're the master, we need to lock the master_kqs and make mods */
    if(master_kqs->__ke_vec_used) {
      struct timespec __zerotime = { 0, 0 };
      pthread_mutex_lock(&kqs_lock);
      fd_cnt = kevent(kqueue_fd,
                      master_kqs->__ke_vec, master_kqs->__ke_vec_used,
                      NULL, 0,
                      &__zerotime);
      noitLT(eventer_deb, &__now, "debug: kevent(%d, [], %d) => %d\n", kqueue_fd, master_kqs->__ke_vec_used, fd_cnt);
      if(fd_cnt < 0) {
        noitLT(eventer_err, &__now, "kevent: %s\n", strerror(errno));
      }
      master_kqs->__ke_vec_used = 0;
      pthread_mutex_unlock(&kqs_lock);
    }

    /* Now we move on to our fd-based events */
    __kqueue_sleeptime.tv_sec = __sleeptime.tv_sec;
    __kqueue_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000;
    fd_cnt = kevent(kqueue_fd, ke_vec, ke_vec_used,
                    ke_vec, ke_vec_a,
                    &__kqueue_sleeptime);
    noitLT(eventer_deb, &__now, "debug: kevent(%d, [], %d) => %d\n", kqueue_fd, ke_vec_used, fd_cnt);
    ke_vec_used = 0;
    if(fd_cnt < 0) {
      noitLT(eventer_err, &__now, "kevent: %s\n", strerror(errno));
    }
    else if(fd_cnt == 0) {
      /* timeout */
      add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep);
    }
    else {
      int idx;
      __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */
      /* loop once to clear */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        masks[ke->ident] = 0;
      }
      /* Loop again to aggregate */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        if(ke->filter == EVFILT_READ) masks[ke->ident] |= EVENTER_READ;
        if(ke->filter == EVFILT_WRITE) masks[ke->ident] |= EVENTER_WRITE;
      }
      /* Loop a last time to process */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        eventer_t e;
        int fd;

        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) {
          if(ke->data != EBADF && ke->data != ENOENT)
            noitLT(eventer_err, &__now, "error [%d]: %s\n",
                   (int)ke->ident, strerror(ke->data));
          continue;
        }
        assert((vpsized_int)ke->udata == (vpsized_int)ke->ident);
        fd = ke->ident;
        e = master_fds[fd].e;
        /* If we've seen this fd, don't callback twice */
        if(!masks[fd]) continue;
        /* It's possible that someone removed the event and freed it
         * before we got here.
         */
        if(e) eventer_kqueue_impl_trigger(e, masks[fd]);
        masks[fd] = 0; /* indicates we've processed this fd */
      }
    }
  }
  /* NOTREACHED */
  return 0;
}
Example #6
0
static int eventer_kqueue_impl_loop() {
  struct timeval __dyna_sleep = { 0, 0 };
  KQUEUE_DECL;
  KQUEUE_SETUP(NULL);

  if(eventer_kqueue_impl_register_wakeup(kqs) == -1) {
    mtevFatal(mtev_error, "error in eventer_kqueue_impl_loop: could not eventer_kqueue_impl_register_wakeup\n");
  }

  while(1) {
    struct timeval __now, __sleeptime;
    struct timespec __kqueue_sleeptime;
    int fd_cnt = 0;

    if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0)
      __dyna_sleep = eventer_max_sleeptime;

    __sleeptime = __dyna_sleep;

    eventer_dispatch_timed(&__now, &__sleeptime);

    if(compare_timeval(__sleeptime, __dyna_sleep) > 0)
      __sleeptime = __dyna_sleep;

    /* Handle cross_thread dispatches */
    eventer_cross_thread_process();

    /* Handle recurrent events */
    eventer_dispatch_recurrent(&__now);

    /* Now we move on to our fd-based events */
    __kqueue_sleeptime.tv_sec = __sleeptime.tv_sec;
    __kqueue_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000;
    fd_cnt = kevent(kqs->kqueue_fd, ke_vec, ke_vec_used,
                    ke_vec, ke_vec_a,
                    &__kqueue_sleeptime);
    kqs->wakeup_notify = 0;
    if(fd_cnt > 0 || ke_vec_used)
      mtevLT(eventer_deb, &__now, "[t@%llx] kevent(%d, [...], %d) => %d\n", (vpsized_int)pthread_self(), kqs->kqueue_fd, ke_vec_used, fd_cnt);
    ke_vec_used = 0;
    if(fd_cnt < 0) {
      mtevLT(eventer_err, &__now, "kevent(s/%d): %s\n", kqs->kqueue_fd, strerror(errno));
    }
    else if(fd_cnt == 0 ||
            (fd_cnt == 1 && ke_vec[0].filter == EVFILT_USER)) {
      /* timeout */
      if(fd_cnt) eventer_kqueue_impl_register_wakeup(kqs);
      add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep);
    }
    else {
      int idx;
      __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */
      /* loop once to clear */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        if(ke->filter == EVFILT_USER) {
          eventer_kqueue_impl_register_wakeup(kqs);
          continue;
        }
        masks[ke->ident] = 0;
      }
      /* Loop again to aggregate */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        if(ke->filter == EVFILT_USER) continue;
        if(ke->filter == EVFILT_READ) masks[ke->ident] |= EVENTER_READ;
        if(ke->filter == EVFILT_WRITE) masks[ke->ident] |= EVENTER_WRITE;
      }
      /* Loop a last time to process */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        eventer_t e;
        int fd;

        ke = &ke_vec[idx];
        if(ke->filter == EVFILT_USER) continue;
        if(ke->flags & EV_ERROR) {
          if(ke->data != EBADF && ke->data != ENOENT)
            mtevLT(eventer_err, &__now, "error [%d]: %s\n",
                   (int)ke->ident, strerror(ke->data));
          continue;
        }
        mtevAssert((vpsized_int)ke->udata == (vpsized_int)ke->ident);
        fd = ke->ident;
        e = master_fds[fd].e;
        /* If we've seen this fd, don't callback twice */
        if(!masks[fd]) continue;
        /* It's possible that someone removed the event and freed it
         * before we got here.
         */
        if(e) eventer_kqueue_impl_trigger(e, masks[fd]);
        masks[fd] = 0; /* indicates we've processed this fd */
      }
    }
  }
  /* NOTREACHED */
  return 0;
}
Example #7
0
static int ssh2_drive_session(eventer_t e, int mask, void *closure,
                              struct timeval *now) {
  int i;
  const char *fingerprint;
  ssh2_check_info_t *ci = closure;
  struct timeval diff;
  int timeout_ms = 10; /* 10ms, gets set below */
  if(ci->state == WANT_CLOSE) {
    noit_check_t *check = ci->check;
    ssh2_log_results(ci->self, ci->check);
    ssh2_cleanup(ci->self, ci->check);
    eventer_remove_fd(e->fd);
    e->opset->close(e->fd, &mask, e);
    check->flags &= ~NP_RUNNING;
    return 0;
  }
  switch(mask) {
    case EVENTER_ASYNCH_WORK:
      if(eventer_set_fd_blocking(e->fd)) {
        ci->timed_out = 0;
        ci->error = strdup("socket error");
        return 0;
      }
      ci->session = libssh2_session_init();
#define set_method(a,b) do { \
  int rv; \
  if(ci->methods.a && \
     (rv = libssh2_session_method_pref(ci->session, b, ci->methods.a)) != 0) { \
    ci->timed_out = 0; \
    ci->error = strdup((rv == LIBSSH2_ERROR_METHOD_NOT_SUPPORTED) ? \
                         #a " method not supported" : "error setting " #a); \
    return 0; \
  } \
} while(0)
      set_method(kex, LIBSSH2_METHOD_KEX);
      set_method(hostkey, LIBSSH2_METHOD_HOSTKEY);
      set_method(crypt_cs, LIBSSH2_METHOD_CRYPT_CS);
      set_method(crypt_sc, LIBSSH2_METHOD_CRYPT_SC);
      set_method(mac_cs, LIBSSH2_METHOD_MAC_CS);
      set_method(mac_sc, LIBSSH2_METHOD_MAC_SC);
      set_method(comp_cs, LIBSSH2_METHOD_COMP_CS);
      set_method(comp_sc, LIBSSH2_METHOD_COMP_SC);
      if(compare_timeval(*now, e->whence) < 0) {
        sub_timeval(e->whence, *now, &diff);
        timeout_ms = diff.tv_sec * 1000 + diff.tv_usec / 1000;
      }
#if LIBSSH2_VERSION_NUM >= 0x010209
      libssh2_session_set_timeout(ci->session, timeout_ms);
#endif
      if (libssh2_session_startup(ci->session, e->fd)) {
        ci->timed_out = 0;
        ci->error = strdup("ssh session startup failed");
        return 0;
      }
      fingerprint = libssh2_hostkey_hash(ci->session, LIBSSH2_HOSTKEY_HASH_MD5);
      for(i=0;i<16;i++) {
        snprintf(ci->fingerprint + (i*2), 3, "%02x",
                 (unsigned char)fingerprint[i]);
      }
      ci->fingerprint[32] = '\0';
      ci->timed_out = 0;
      return 0;
      break;
    case EVENTER_ASYNCH_CLEANUP:
      if(ci->session) {
        libssh2_session_disconnect(ci->session, "Bye!");
        libssh2_session_free(ci->session);
        ci->session = NULL;
      }
      ci->state = WANT_CLOSE;
      break;
    default:
      abort();
  }
  return 0;
}
Example #8
0
int
noit_check_schedule_next(noit_module_t *self,
                         struct timeval *last_check, noit_check_t *check,
                         struct timeval *now, dispatch_func_t dispatch,
                         noit_check_t *cause) {
  eventer_t newe;
  struct timeval period, earliest, diff;
  int64_t diffms, periodms, offsetms;
  recur_closure_t *rcl;
  int initial = last_check ? 1 : 0;

  assert(cause == NULL);
  assert(check->fire_event == NULL);
  if(check->period == 0) return 0;

  /* if last_check is not passed, we use the initial_schedule_time
   * otherwise, we set the initial_schedule_time
   */
  if(!last_check) last_check = &check->initial_schedule_time;
  else memcpy(&check->initial_schedule_time, last_check, sizeof(*last_check));

  if(NOIT_CHECK_DISABLED(check) || NOIT_CHECK_KILLED(check)) {
    if(!(check->flags & NP_TRANSIENT)) check_slots_dec_tv(last_check);
    memset(&check->initial_schedule_time, 0, sizeof(struct timeval));
    return 0;
  }

  /* If we have an event, we know when we intended it to fire.  This means
   * we should schedule that point + period.
   */
  if(now)
    memcpy(&earliest, now, sizeof(earliest));
  else
    gettimeofday(&earliest, NULL);

  /* If the check is unconfigured and needs resolving, we'll set the
   * period down a bit lower so we can pick up the resolution quickly.
   * The one exception is if this is the initial run.
   */
  if(!initial &&
     !NOIT_CHECK_RESOLVED(check) && NOIT_CHECK_SHOULD_RESOLVE(check) &&
     check->period > 1000) {
    period.tv_sec = 1;
    period.tv_usec = 0;
  }
  else {
    period.tv_sec = check->period / 1000;
    period.tv_usec = (check->period % 1000) * 1000;
  }
  periodms = period.tv_sec * 1000 + period.tv_usec / 1000;

  newe = eventer_alloc();
  /* calculate the differnet between the initial schedule time and "now" */
  if(compare_timeval(earliest, *last_check) >= 0) {
    sub_timeval(earliest, *last_check, &diff);
    diffms = (int64_t)diff.tv_sec * 1000 + diff.tv_usec / 1000;
  }
  else {
    noitL(noit_error, "time is going backwards. abort.\n");
    abort();
  }
  /* determine the offset from initial schedule time that would place
   * us at the next period-aligned point past "now" */
  offsetms = ((diffms / periodms) + 1) * periodms;
  diff.tv_sec = offsetms / 1000;
  diff.tv_usec = (offsetms % 1000) * 1000;

  memcpy(&newe->whence, last_check, sizeof(*last_check));
  add_timeval(newe->whence, diff, &newe->whence);

  sub_timeval(newe->whence, earliest, &diff);
  diffms = (int64_t)diff.tv_sec * 1000 + (int)diff.tv_usec / 1000;
  assert(compare_timeval(newe->whence, earliest) > 0);
  newe->mask = EVENTER_TIMER;
  newe->callback = noit_check_recur_handler;
  rcl = calloc(1, sizeof(*rcl));
  rcl->self = self;
  rcl->check = check;
  rcl->cause = cause;
  rcl->dispatch = dispatch;
  newe->closure = rcl;

  eventer_add(newe);
  check->fire_event = newe;
  return diffms;
}