Example #1
0
/**
 * Sets the fields in a EXT_TFMCC_ACK_INFO extension for transmission
 */
void set_tfmcc_ack_info(struct group_list_t *group, 
                        struct tfmcc_ack_info_he *tfmcc)
{
    struct timeval now, send_time;
    unsigned ccrate;

    tfmcc->exttype = EXT_TFMCC_ACK_INFO;
    tfmcc->extlen = sizeof(struct tfmcc_ack_info_he) / 4;
    tfmcc->cc_seq = htons(group->ccseq);
    ccrate = current_cc_rate(group);
    glog4(group, "ccrate=%d", ccrate);
    tfmcc->cc_rate = htons(quantize_rate(ccrate));
    //tfmcc->cc_rate = htons(quantize_rate(current_cc_rate(group)));
    tfmcc->flags = 0;
    if (group->slowstart) {
        tfmcc->flags |= FLAG_CC_START;
    }
    if (group->rtt != 0.0) {
        tfmcc->flags |= FLAG_CC_RTT;
    }
    tfmcc->client_id = uid;

    gettimeofday(&now, NULL);
    if (cmptimestamp(now, group->last_server_rx_ts) <= 0) {
        send_time = group->last_server_ts;
    } else {
        send_time = add_timeval(group->last_server_ts,
                diff_timeval(now, group->last_server_rx_ts));
    }
    tfmcc->tstamp_sec = htonl((uint32_t)send_time.tv_sec);
    tfmcc->tstamp_usec = htonl((uint32_t)send_time.tv_usec);
}
Example #2
0
int write_serial_packet(serial_source src, const void *packet, int len)
/* Effects: writes len byte packet to serial source src
   Returns: 0 if packet successfully written, 1 if successfully written
     but not acknowledged, -1 otherwise
*/
{
  struct timeval deadline;

  src->send.seqno++;
  if (write_framed_packet(src, P_PACKET_ACK, src->send.seqno, packet, len) < 0)
    return -1;

  gettimeofday(&deadline, NULL);
  add_timeval(&deadline, ACK_TIMEOUT);
  for (;;) 
    {
      struct packet_list *entry;
      
      read_and_process(src);
      entry = pop_protocol_packet(src, P_ACK);
      if (entry)
	{
	  uint8_t acked = entry->packet[0];

	  free(entry->packet);
	  free(entry);
	  if (acked == src->send.seqno)
	    return 0;
	}
      else if (source_wait(src, &deadline) < 0)
	return 1;
    }
}
Example #3
0
void o2fsck_add_resource_track(struct o2fsck_resource_track *rt1,
                               struct o2fsck_resource_track *rt2)
{
    struct ocfs2_io_stats *io1 = &rt1->rt_io_stats;
    struct ocfs2_io_stats *io2 = &rt2->rt_io_stats;

    add_timeval(&rt1->rt_real_time, &rt2->rt_real_time);
    add_timeval(&rt1->rt_user_time, &rt2->rt_user_time);
    add_timeval(&rt1->rt_sys_time, &rt2->rt_sys_time);

    io1->is_bytes_read += io2->is_bytes_read;
    io1->is_bytes_written += io2->is_bytes_written;
    io1->is_cache_hits += io2->is_cache_hits;
    io1->is_cache_misses += io2->is_cache_misses;
    io1->is_cache_inserts += io2->is_cache_inserts;
    io1->is_cache_removes += io2->is_cache_removes;

}
int
noit_check_schedule_next(noit_module_t *self,
                         struct timeval *last_check, noit_check_t *check,
                         struct timeval *now, dispatch_func_t dispatch,
                         noit_check_t *cause) {
  eventer_t newe;
  struct timeval period, earliest;
  recur_closure_t *rcl;

  assert(cause == NULL);
  assert(check->fire_event == NULL);
  if(check->period == 0) return 0;
  if(NOIT_CHECK_DISABLED(check) || NOIT_CHECK_KILLED(check)) {
    if(!(check->flags & NP_TRANSIENT)) check_slots_dec_tv(last_check);
    return 0;
  }

  /* If we have an event, we know when we intended it to fire.  This means
   * we should schedule that point + period.
   */
  if(now)
    memcpy(&earliest, now, sizeof(earliest));
  else
    gettimeofday(&earliest, NULL);

  /* If the check is unconfigured and needs resolving, we'll set the
   * period down a bit lower so we can pick up the resolution quickly.
   */
  if(!NOIT_CHECK_RESOLVED(check) && NOIT_CHECK_SHOULD_RESOLVE(check) &&
      check->period > 1000) {
    period.tv_sec = 1;
    period.tv_usec = 0;
  }
  else {
    period.tv_sec = check->period / 1000;
    period.tv_usec = (check->period % 1000) * 1000;
  }

  newe = eventer_alloc();
  memcpy(&newe->whence, last_check, sizeof(*last_check));
  add_timeval(newe->whence, period, &newe->whence);
  if(compare_timeval(newe->whence, earliest) < 0)
    memcpy(&newe->whence, &earliest, sizeof(earliest));
  newe->mask = EVENTER_TIMER;
  newe->callback = noit_check_recur_handler;
  rcl = calloc(1, sizeof(*rcl));
  rcl->self = self;
  rcl->check = check;
  rcl->cause = cause;
  rcl->dispatch = dispatch;
  newe->closure = rcl;

  eventer_add(newe);
  check->fire_event = newe;
  return 0;
}
Example #5
0
int main(int argc, char** argv)
{
	// Parse cmd line args.
	int	pointCount	= 0;

	if(argc == 2) {
		pointCount	= atoi(argv[1]);
	}
	else {
		printf("usage: quickhull <points>\n");
		exit(1);
	}

	long* x1		= malloc(pointCount * sizeof(long));
	long* y1		= malloc(pointCount * sizeof(long));
	long* x2		= malloc(pointCount * sizeof(long));
	long* y2		= malloc(pointCount * sizeof(long));

	long* out		= malloc(pointCount * sizeof(long));

	for (int i = 0; i < pointCount; i++) {
		x1[i] = i;
		y1[i] = i;
		x2[i] = i;
		y2[i] = i;
	}

	// Timing setup
        struct timeval start, finish;
        struct rusage start_ru, finish_ru;

        gettimeofday( &start, NULL );
        getrusage( RUSAGE_SELF, &start_ru );

	// Do the deed.
	dotp(pointCount, x1, y1, x2, y2, out);

	// Print how long it took.
        gettimeofday( &finish, NULL );
        getrusage( RUSAGE_SELF, &finish_ru );

//	printf("depth          = %d\n", depth);
//	printf("points on hull = %d\n", hull->length);

        sub_timeval( &finish, &start );
        sub_timeval( &finish_ru.ru_utime, &start_ru.ru_utime );
        sub_timeval( &finish_ru.ru_stime, &start_ru.ru_stime );
        add_timeval( &finish_ru.ru_utime, &finish_ru.ru_stime );

	printf("elapsedTimeMS   = ");
        print_timeval( &finish ); putchar( '\n' );

 	printf("cpuTimeMS       = ");
        print_timeval( &finish_ru.ru_utime); putchar( '\n' );
}
Example #6
0
static void
check_test_schedule_sweeper() {
  struct timeval now, diff;
  if(sweeper_event) return;
  sweeper_event = eventer_alloc();
  sweeper_event->mask = EVENTER_TIMER;
  sweeper_event->callback = check_test_sweeper;
  diff.tv_sec = 0L;
  diff.tv_usec = default_sweep_interval * 1000L;
  gettimeofday(&now, NULL);
  add_timeval(now, diff, &sweeper_event->whence);
  eventer_add(sweeper_event);
}
Example #7
0
int main(int argc, char** argv)
{
	// Parse cmd line args.
	int	size	      = 0;

	if(argc == 2) {
		size          = atoi(argv[1]);
	}
	else {
		printf("usage: filtermax <size>\n");
		exit(1);
	}

        // Input vector.
	long* uvec     = malloc(size * sizeof(long));

        // 
        for (int i = 0; i < size; i++) {
                uvec[i] = i;
        }

        // Output vector.
	long* out      = malloc(size * sizeof(long));

	// Timing setup
        struct timeval start, finish;
        struct rusage start_ru, finish_ru;

        gettimeofday( &start, NULL );
        getrusage( RUSAGE_SELF, &start_ru );

	// Do the deed.
	long out_len, out_max;
	filtermax(size, uvec, out, &out_len, &out_max);

	// Print how long it took.
        gettimeofday( &finish, NULL );
        getrusage( RUSAGE_SELF, &finish_ru );

        sub_timeval( &finish, &start );
        sub_timeval( &finish_ru.ru_utime, &start_ru.ru_utime );
        sub_timeval( &finish_ru.ru_stime, &start_ru.ru_stime );
        add_timeval( &finish_ru.ru_utime, &finish_ru.ru_stime );

	printf("elapsedTimeMS   = ");
        print_timeval( &finish ); putchar( '\n' );

 	printf("cpuTimeMS       = ");
        print_timeval( &finish_ru.ru_utime); putchar( '\n' );
}
void
noit_check_run_full_asynch_opts(noit_check_t *check, eventer_func_t callback,
                                int mask) {
  struct timeval __now, p_int;
  eventer_t e;
  e = eventer_alloc();
  e->fd = -1;
  e->mask = EVENTER_ASYNCH | mask;
  gettimeofday(&__now, NULL);
  memcpy(&e->whence, &__now, sizeof(__now));
  p_int.tv_sec = check->timeout / 1000;
  p_int.tv_usec = (check->timeout % 1000) * 1000;
  add_timeval(e->whence, p_int, &e->whence);
  e->callback = callback;
  e->closure =  check->closure;
  eventer_add(e);
}
Example #9
0
static void _set_ts_timeout(struct target_session *ts, struct timeval *t) {
  struct timeval now;
  eventer_t e = NULL;
  if(ts->timeoutevent) {
    e = eventer_remove(ts->timeoutevent);
    ts->timeoutevent = NULL;
  }
  if(!t) return;

  gettimeofday(&now, NULL);
  if(!e) e = eventer_alloc();
  e->callback = noit_snmp_session_timeout;
  e->closure = ts;
  e->mask = EVENTER_TIMER;
  add_timeval(now, *t, &e->whence);
  ts->timeoutevent = e;
  eventer_add(e);
}
Example #10
0
static int
check_recycle_bin_processor(eventer_t e, int mask, void *closure,
                            struct timeval *now) {
  static struct timeval one_minute = { 60L, 0L };
  struct _checker_rcb *prev = NULL, *curr = checker_rcb;
  noitL(noit_debug, "Scanning check recycle bin\n");
  while(curr) {
    if(!(curr->checker->flags & NP_RUNNING)) {
      noitL(noit_debug, "Check is ready to free.\n");
      noit_poller_free_check(curr->checker);
      if(prev) prev->next = curr->next;
      else checker_rcb = curr->next;
      free(curr);
      curr = prev ? prev->next : checker_rcb;
    }
    else {
      prev = curr;
      curr = curr->next;
    }
  }
  add_timeval(*now, one_minute, &e->whence);
  return EVENTER_TIMER;
}
Example #11
0
static int eventer_kqueue_impl_loop() {
  struct timeval __dyna_sleep = { 0, 0 };
  KQUEUE_DECL;
  KQUEUE_SETUP;

  if(!kqs) {
    kqs = calloc(1, sizeof(*kqs));
    kqs_init(kqs);
  }
  pthread_setspecific(kqueue_setup_key, kqs);
  while(1) {
    struct timeval __now, __sleeptime;
    struct timespec __kqueue_sleeptime;
    int fd_cnt = 0;

    if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0)
      __dyna_sleep = eventer_max_sleeptime;

    __sleeptime = __dyna_sleep;

    eventer_dispatch_timed(&__now, &__sleeptime);

    if(compare_timeval(__sleeptime, __dyna_sleep) > 0)
      __sleeptime = __dyna_sleep;

    /* Handle recurrent events */
    eventer_dispatch_recurrent(&__now);

    /* If we're the master, we need to lock the master_kqs and make mods */
    if(master_kqs->__ke_vec_used) {
      struct timespec __zerotime = { 0, 0 };
      pthread_mutex_lock(&kqs_lock);
      fd_cnt = kevent(kqueue_fd,
                      master_kqs->__ke_vec, master_kqs->__ke_vec_used,
                      NULL, 0,
                      &__zerotime);
      noitLT(eventer_deb, &__now, "debug: kevent(%d, [], %d) => %d\n", kqueue_fd, master_kqs->__ke_vec_used, fd_cnt);
      if(fd_cnt < 0) {
        noitLT(eventer_err, &__now, "kevent: %s\n", strerror(errno));
      }
      master_kqs->__ke_vec_used = 0;
      pthread_mutex_unlock(&kqs_lock);
    }

    /* Now we move on to our fd-based events */
    __kqueue_sleeptime.tv_sec = __sleeptime.tv_sec;
    __kqueue_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000;
    fd_cnt = kevent(kqueue_fd, ke_vec, ke_vec_used,
                    ke_vec, ke_vec_a,
                    &__kqueue_sleeptime);
    noitLT(eventer_deb, &__now, "debug: kevent(%d, [], %d) => %d\n", kqueue_fd, ke_vec_used, fd_cnt);
    ke_vec_used = 0;
    if(fd_cnt < 0) {
      noitLT(eventer_err, &__now, "kevent: %s\n", strerror(errno));
    }
    else if(fd_cnt == 0) {
      /* timeout */
      add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep);
    }
    else {
      int idx;
      __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */
      /* loop once to clear */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        masks[ke->ident] = 0;
      }
      /* Loop again to aggregate */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        if(ke->filter == EVFILT_READ) masks[ke->ident] |= EVENTER_READ;
        if(ke->filter == EVFILT_WRITE) masks[ke->ident] |= EVENTER_WRITE;
      }
      /* Loop a last time to process */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        eventer_t e;
        int fd;

        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) {
          if(ke->data != EBADF && ke->data != ENOENT)
            noitLT(eventer_err, &__now, "error [%d]: %s\n",
                   (int)ke->ident, strerror(ke->data));
          continue;
        }
        assert((vpsized_int)ke->udata == (vpsized_int)ke->ident);
        fd = ke->ident;
        e = master_fds[fd].e;
        /* If we've seen this fd, don't callback twice */
        if(!masks[fd]) continue;
        /* It's possible that someone removed the event and freed it
         * before we got here.
         */
        if(e) eventer_kqueue_impl_trigger(e, masks[fd]);
        masks[fd] = 0; /* indicates we've processed this fd */
      }
    }
  }
  /* NOTREACHED */
  return 0;
}
Example #12
0
int main(int argc, char** argv)
{
	// Parse cmd line args.
	int	pointCount	= 0;
	char*	outSVG		= 0;

	if(argc == 2) {
		pointCount	= atoi(argv[1]);
		outSVG		= 0;
	}
	else if (argc == 3) {
		pointCount	= atoi(argv[1]);
		outSVG		= argv[2];
	}
	else {
		printf("usage: quickhull <points> [out.svg]\n");
		exit(1);
	}

	// Initialise the vector to hold the hull.
	Vector* hull		= vector_new(pointCount);
		
	// Use random points for test data.
	Vector* points		= vector_new(pointCount);

	double	originX		= 300;
	double	originY		= 300;
	long	maxDist		= 250;
	
	srandom(170);
	for (int i = 0; i < pointCount; i++) {
		double r	= (random() % (maxDist * 2)) - maxDist;
		double a	= M_PI * (random() % 360) / 360;

		vector_append
			( points
			, originX + r * cos (a)
			, originY + r * sin (a));
	}

	// Timing setup
        struct timeval start, finish;
        struct rusage start_ru, finish_ru;

        gettimeofday( &start, NULL );
        getrusage( RUSAGE_SELF, &start_ru );

	// Do the deed.
	int depth = quickHull (points, hull);

	// Print how long it took.
        gettimeofday( &finish, NULL );
        getrusage( RUSAGE_SELF, &finish_ru );

//	printf("depth          = %d\n", depth);
//	printf("points on hull = %d\n", hull->length);

        sub_timeval( &finish, &start );
        sub_timeval( &finish_ru.ru_utime, &start_ru.ru_utime );
        sub_timeval( &finish_ru.ru_stime, &start_ru.ru_stime );
        add_timeval( &finish_ru.ru_utime, &finish_ru.ru_stime );

	printf("elapsedTimeMS   = ");
        print_timeval( &finish ); putchar( '\n' );

 	printf("cpuTimeMS       = ");
        print_timeval( &finish_ru.ru_utime); putchar( '\n' );

	// Write output to file if requested.
	if(outSVG != 0) {
		FILE* file = fopen(outSVG, "w");
		dumpSVG	(file, points, hull);	
		fclose	(file);
	}
}
Example #13
0
/**
 * Sends a REGISTER message in response to an ANNOUNCE or on timeout when
 * waiting for a KEYINFO or REG_CONF.  If the register timeout expired, abort.
 */
void send_register(struct group_list_t *group)
{
    struct uftp_h *header;
    struct register_h *reg;
    unsigned char *buf, *keydata;
    struct timeval now, send_time;
    unsigned int len, meslen;
    union key_t key;

    gettimeofday(&now, NULL);
    if (cmptimestamp(now, group->expire_time) >= 0) {
        glog1(group, "Registration unconfirmed by server");
        send_abort(group, "Registration unconfirmed");
        return;
    }

    buf = safe_calloc(MAXMTU, 1);

    header = (struct uftp_h *)buf;
    reg = (struct register_h *)(buf + sizeof(struct uftp_h));
    keydata = (unsigned char *)reg + sizeof(struct register_h);
    set_uftp_header(header, REGISTER, group);
    reg->func = REGISTER;
    if (group->keytype != KEY_NONE) {
        memcpy(reg->rand2, group->rand2, RAND_LEN);
        if (group->keyextype == KEYEX_RSA) {
            if (has_proxy) {
                key = proxy_pubkey;
            } else {
                key = group->server_pubkey;
            }
            if (!RSA_encrypt(key.rsa, group->premaster, group->premaster_len,
                             keydata, &len)) {
                glog0(group, "Error encrypting premaster secret");
                send_abort(group, "Error encrypting premaster secret");
                free(buf);
                return;
            }
        } else {
            uint16_t keylen;
            if (!export_EC_key(group->client_dhkey.ec, keydata, &keylen)) {
                glog0(group, "Error exporting ECDH public key");
                send_abort(group, "Error exporting ECDH public key");
                free(buf);
                return;
            }
            len = keylen;
        }
        reg->keyinfo_len = htons(len); 
    } else {
        len = 0;
    }
    gettimeofday(&now, NULL);
    if (cmptimestamp(now, group->last_server_rx_ts) <= 0) {
        send_time = group->last_server_ts;
    } else {
        send_time = add_timeval(group->last_server_ts,
                diff_timeval(now, group->last_server_rx_ts));
    }
    reg->tstamp_sec = htonl((uint32_t)send_time.tv_sec);
    reg->tstamp_usec = htonl((uint32_t)send_time.tv_usec);
    reg->hlen = (sizeof(struct register_h) + len) / 4;
    meslen = sizeof(struct uftp_h) + (reg->hlen * 4);

    if (nb_sendto(listener, buf, meslen, 0,
               (struct sockaddr *)&(group->replyaddr),
               family_len(group->replyaddr)) == SOCKET_ERROR) {
        gsockerror(group, "Error sending REGISTER");
    } else {
        glog2(group, "REGISTER sent");
    }
    glog3(group, "send time: %d.%06d", send_time.tv_sec, send_time.tv_usec);

    set_timeout(group, 0);
    if (group->client_auth) {
        send_client_key(group);
    }
    free(buf);
}
Example #14
0
static int eventer_kqueue_impl_loop() {
  struct timeval __dyna_sleep = { 0, 0 };
  KQUEUE_DECL;
  KQUEUE_SETUP(NULL);

  if(eventer_kqueue_impl_register_wakeup(kqs) == -1) {
    mtevFatal(mtev_error, "error in eventer_kqueue_impl_loop: could not eventer_kqueue_impl_register_wakeup\n");
  }

  while(1) {
    struct timeval __now, __sleeptime;
    struct timespec __kqueue_sleeptime;
    int fd_cnt = 0;

    if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0)
      __dyna_sleep = eventer_max_sleeptime;

    __sleeptime = __dyna_sleep;

    eventer_dispatch_timed(&__now, &__sleeptime);

    if(compare_timeval(__sleeptime, __dyna_sleep) > 0)
      __sleeptime = __dyna_sleep;

    /* Handle cross_thread dispatches */
    eventer_cross_thread_process();

    /* Handle recurrent events */
    eventer_dispatch_recurrent(&__now);

    /* Now we move on to our fd-based events */
    __kqueue_sleeptime.tv_sec = __sleeptime.tv_sec;
    __kqueue_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000;
    fd_cnt = kevent(kqs->kqueue_fd, ke_vec, ke_vec_used,
                    ke_vec, ke_vec_a,
                    &__kqueue_sleeptime);
    kqs->wakeup_notify = 0;
    if(fd_cnt > 0 || ke_vec_used)
      mtevLT(eventer_deb, &__now, "[t@%llx] kevent(%d, [...], %d) => %d\n", (vpsized_int)pthread_self(), kqs->kqueue_fd, ke_vec_used, fd_cnt);
    ke_vec_used = 0;
    if(fd_cnt < 0) {
      mtevLT(eventer_err, &__now, "kevent(s/%d): %s\n", kqs->kqueue_fd, strerror(errno));
    }
    else if(fd_cnt == 0 ||
            (fd_cnt == 1 && ke_vec[0].filter == EVFILT_USER)) {
      /* timeout */
      if(fd_cnt) eventer_kqueue_impl_register_wakeup(kqs);
      add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep);
    }
    else {
      int idx;
      __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */
      /* loop once to clear */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        if(ke->filter == EVFILT_USER) {
          eventer_kqueue_impl_register_wakeup(kqs);
          continue;
        }
        masks[ke->ident] = 0;
      }
      /* Loop again to aggregate */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        if(ke->filter == EVFILT_USER) continue;
        if(ke->filter == EVFILT_READ) masks[ke->ident] |= EVENTER_READ;
        if(ke->filter == EVFILT_WRITE) masks[ke->ident] |= EVENTER_WRITE;
      }
      /* Loop a last time to process */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        eventer_t e;
        int fd;

        ke = &ke_vec[idx];
        if(ke->filter == EVFILT_USER) continue;
        if(ke->flags & EV_ERROR) {
          if(ke->data != EBADF && ke->data != ENOENT)
            mtevLT(eventer_err, &__now, "error [%d]: %s\n",
                   (int)ke->ident, strerror(ke->data));
          continue;
        }
        mtevAssert((vpsized_int)ke->udata == (vpsized_int)ke->ident);
        fd = ke->ident;
        e = master_fds[fd].e;
        /* If we've seen this fd, don't callback twice */
        if(!masks[fd]) continue;
        /* It's possible that someone removed the event and freed it
         * before we got here.
         */
        if(e) eventer_kqueue_impl_trigger(e, masks[fd]);
        masks[fd] = 0; /* indicates we've processed this fd */
      }
    }
  }
  /* NOTREACHED */
  return 0;
}
Example #15
0
/**
 * Updates the group's loss history
 *
 * Packets older than MAXMISORDER sequence numbers don't change the loss
 * history, and packets aren't considered lost unless the sequence number is
 * more than MAXMISORDER sequence numbers old.  Works under the assumption
 * that no more than 32K packets in a row get lost.
 */
void update_loss_history(struct group_list_t *group, uint16_t txseq, int size,
                         int ecn)
{
    uint16_t i;
    int tdiff, grtt_usec;
    struct timeval tvdiff;

    group->loss_history[txseq].found = 1;
    gettimeofday(&group->loss_history[txseq].t, NULL);
    if (group->multi.ss.ss_family == AF_INET6) {
        group->loss_history[txseq].size = size + 8 + 40;
    } else {
        group->loss_history[txseq].size = size + 8 + 20;
    }
    
    if ((int16_t)(txseq - group->max_txseq) > 0) {
        glog4(group, "Got seq %d, max was %d", txseq, group->max_txseq);
        grtt_usec = (int)(group->grtt * 1000000);
        if (txseq < group->max_txseq) {
            glog5(group, "increasing seq_wrap, txseq=%u, maxseq=%u",
                         txseq, group->max_txseq);
            group->seq_wrap++;
        }
        // First set nominal arrival times of missed packets
        for (i = group->max_txseq + 1; i != txseq; i++) {
            tdiff = (int)diff_usec(group->loss_history[txseq].t,
                                   group->loss_history[group->max_txseq].t) *
                ((i - group->max_txseq) / (txseq - group->max_txseq));
            tvdiff.tv_sec = 0;
            tvdiff.tv_usec = tdiff;
            while (tvdiff.tv_usec >= 1000000) {
                tvdiff.tv_usec -= 1000000;
                tvdiff.tv_sec++;
            }
            group->loss_history[i].found = 0;
            group->loss_history[i].t =
                    add_timeval(group->loss_history[group->max_txseq].t,tvdiff);
        }
        // Then check for missed packets up to MAXMISORDER less than the current
        // Don't do this part unless we have at least MAXMISORDER packets
        // TODO: address issue of start_txseq being within MAXMISORDER sequence
        // numbers from the maximum
        if (group->seq_wrap ||((uint16_t)(group->max_txseq -
                                    group->start_txseq) >= MAXMISORDER)) {
            for (i = group->max_txseq - MAXMISORDER;
                    i != (uint16_t)(txseq - MAXMISORDER); i++) {
                if (!group->loss_history[i].found &&
                        ((diff_usec(group->loss_history[i].t,
                                    group->loss_events[0].t) > grtt_usec) ||
                            group->slowstart)) {
                    new_loss_event(group, i);
                }
            }
        }
        group->max_txseq = txseq;
        if (ecn) {
            glog4(group, "Seq %d marked by ECN", txseq);
            if ((diff_usec(group->loss_history[txseq].t,
                    group->loss_events[0].t) > grtt_usec) || group->slowstart) {
                new_loss_event(group, txseq);
            }
        }
    }
    group->loss_events[0].len = ((group->seq_wrap << 16) | group->max_txseq) -
                                group->loss_events[0].start_seq;
    glog5(group, "current cc len = %d", group->loss_events[0].len);
    glog5(group, "seq_wrap=%d, max_txseq=%u, start_seq=%u",
            group->seq_wrap, group->max_txseq, group->loss_events[0].start_seq);
}
Example #16
0
static int ssh2_initiate(noit_module_t *self, noit_check_t *check,
                         noit_check_t *cause) {
  ssh2_check_info_t *ci = check->closure;
  struct timeval p_int, __now;
  int fd = -1, rv = -1;
  eventer_t e;
  union {
    struct sockaddr_in sin;
    struct sockaddr_in6 sin6;
  } sockaddr;
  socklen_t sockaddr_len;
  unsigned short ssh_port = DEFAULT_SSH_PORT;
  const char *port_str = NULL;

  /* We cannot be running */
  BAIL_ON_RUNNING_CHECK(check);
  check->flags |= NP_RUNNING;

  ci->self = self;
  ci->check = check;

  ci->timed_out = 1;
  if(ci->timeout_event) {
    eventer_remove(ci->timeout_event);
    free(ci->timeout_event->closure);
    eventer_free(ci->timeout_event);
    ci->timeout_event = NULL;
  }
  gettimeofday(&__now, NULL);
  memcpy(&check->last_fire_time, &__now, sizeof(__now));

  if(check->target_ip[0] == '\0') {
    ci->error = strdup("name resolution failure");
    goto fail;
  }
  /* Open a socket */
  fd = socket(check->target_family, NE_SOCK_CLOEXEC|SOCK_STREAM, 0);
  if(fd < 0) goto fail;

  /* Make it non-blocking */
  if(eventer_set_fd_nonblocking(fd)) goto fail;

  if(noit_hash_retr_str(check->config, "port", strlen("port"),
                        &port_str)) {
    ssh_port = (unsigned short)atoi(port_str);
  }
#define config_method(a) do { \
  const char *v; \
  if(noit_hash_retr_str(check->config, "method_" #a, strlen("method_" #a), \
                        &v)) \
    ci->methods.a = strdup(v); \
} while(0)
  config_method(kex);
  config_method(hostkey);
  config_method(crypt_cs);
  config_method(crypt_sc);
  config_method(mac_cs);
  config_method(mac_sc);
  config_method(comp_cs);
  config_method(comp_sc);
  memset(&sockaddr, 0, sizeof(sockaddr));
  sockaddr.sin6.sin6_family = check->target_family;
  if(check->target_family == AF_INET) {
    memcpy(&sockaddr.sin.sin_addr,
           &check->target_addr.addr, sizeof(sockaddr.sin.sin_addr));
    sockaddr.sin.sin_port = htons(ssh_port);
    sockaddr_len = sizeof(sockaddr.sin);
  }
  else {
    memcpy(&sockaddr.sin6.sin6_addr,
           &check->target_addr.addr6, sizeof(sockaddr.sin6.sin6_addr));
    sockaddr.sin6.sin6_port = htons(ssh_port);
    sockaddr_len = sizeof(sockaddr.sin6);
  }

  /* Initiate a connection */
  rv = connect(fd, (struct sockaddr *)&sockaddr, sockaddr_len);
  if(rv == -1 && errno != EINPROGRESS) goto fail;

  /* Register a handler for connection completion */
  e = eventer_alloc();
  e->fd = fd;
  e->mask = EVENTER_READ | EVENTER_WRITE | EVENTER_EXCEPTION;
  e->callback = ssh2_connect_complete;
  e->closure =  ci;
  ci->synch_fd_event = e;
  eventer_add(e);

  e = eventer_alloc();
  e->mask = EVENTER_TIMER;
  e->callback = ssh2_connect_timeout;
  e->closure = ci;
  memcpy(&e->whence, &__now, sizeof(__now));
  p_int.tv_sec = check->timeout / 1000;
  p_int.tv_usec = (check->timeout % 1000) * 1000;
  add_timeval(e->whence, p_int, &e->whence);
  ci->timeout_event = e;
  eventer_add(e);
  return 0;

 fail:
  if(fd >= 0) close(fd);
  ssh2_log_results(ci->self, ci->check);
  ssh2_cleanup(ci->self, ci->check);
  check->flags &= ~NP_RUNNING;
  return -1;
}
Example #17
0
static int dns_check_send(noit_module_t *self, noit_check_t *check,
                          noit_check_t *cause) {
  void *vnv_pair = NULL;
  struct dns_nameval *nv_pair;
  eventer_t newe;
  struct timeval p_int, now;
  struct dns_check_info *ci = check->closure;
  const char *config_val;
  const char *rtype = NULL;
  const char *nameserver = NULL;
  int port = 0;
  const char *port_str = NULL;
  const char *want_sort = NULL;
  const char *ctype = "IN";
  const char *query = NULL;
  char interpolated_nameserver[1024];
  char interpolated_query[1024];
  noit_hash_table check_attrs_hash = NOIT_HASH_EMPTY;

  BAIL_ON_RUNNING_CHECK(check);

  gettimeofday(&now, NULL);
  memcpy(&check->last_fire_time, &now, sizeof(now));
  ci->current.state = NP_BAD;
  ci->current.available = NP_UNAVAILABLE;
  ci->timed_out = 1;
  ci->nrr = 0;
  ci->sort = 1;

  if(!strcmp(check->name, "in-addr.arpa") ||
     (strlen(check->name) >= sizeof("::in-addr.arpa") - 1 &&
      !strcmp(check->name + strlen(check->name) - sizeof("::in-addr.arpa") + 1,
              "::in-addr.arpa"))) {
    /* in-addr.arpa defaults:
     *   nameserver to NULL
     *   rtype to PTR
     *   query to %[:inaddrarpa:target]
     */
    nameserver = NULL;
    rtype = "PTR";
    query = "%[:inaddrarpa:target_ip]";
  }
  else {
    nameserver = "%[target_ip]";
    rtype = "A";
    query = "%[name]";
  }

  if(noit_hash_retr_str(check->config, "port", strlen("port"),
                        &port_str)) {
    port = atoi(port_str);
  }

#define CONFIG_OVERRIDE(a) \
  if(noit_hash_retr_str(check->config, #a, strlen(#a), \
                        &config_val) && \
     strlen(config_val) > 0) \
    a = config_val
  CONFIG_OVERRIDE(ctype);
  CONFIG_OVERRIDE(nameserver);
  CONFIG_OVERRIDE(rtype);
  CONFIG_OVERRIDE(query);
  CONFIG_OVERRIDE(want_sort);
  if(nameserver && !strcmp(nameserver, "default")) nameserver = NULL;
  if(want_sort && strcasecmp(want_sort, "on") && strcasecmp(want_sort, "true"))
    ci->sort = 0;

  noit_check_make_attrs(check, &check_attrs_hash);

  if(nameserver) {
    noit_check_interpolate(interpolated_nameserver,
                           sizeof(interpolated_nameserver),
                           nameserver,
                           &check_attrs_hash, check->config);
    nameserver = interpolated_nameserver;
  }
  if(query) {
    noit_check_interpolate(interpolated_query,
                           sizeof(interpolated_query),
                           query,
                           &check_attrs_hash, check->config);
    query = interpolated_query;
  }
  noit_hash_destroy(&check_attrs_hash, NULL, NULL);

  check->flags |= NP_RUNNING;
  noitL(nldeb, "dns_check_send(%p,%s,%s,%s,%s,%s)\n",
        self, check->target, nameserver ? nameserver : "default",
        query ? query : "null", ctype, rtype);

  __activate_ci(ci);
  /* If this ci has a handle and it isn't the one we need,
   * we should release it
   */
  if(ci->h &&
     ((ci->h->ns == NULL && nameserver != NULL) ||
      (ci->h->ns != NULL && nameserver == NULL) ||
      (ci->h->ns && strcmp(ci->h->ns, nameserver)))) {
    dns_ctx_release(ci->h);
    ci->h = NULL;
  }
  /* use the cached one, unless we don't have one */
  if(!ci->h) ci->h = dns_ctx_alloc(nameserver, port);
  if(!ci->h) ci->error = strdup("bad nameserver");

  /* Lookup out class */
  if(!noit_hash_retrieve(&dns_ctypes, ctype, strlen(ctype),
                         &vnv_pair)) {
    if(ci->error) free(ci->error);
    ci->error = strdup("bad class");
  }
  else {
    nv_pair = (struct dns_nameval *)vnv_pair;
    ci->query_ctype = nv_pair->val;
  }
  /* Lookup out rr type */
  if(!noit_hash_retrieve(&dns_rtypes, rtype, strlen(rtype),
                         &vnv_pair)) {
    if(ci->error) free(ci->error);
    ci->error = strdup("bad rr type");
  }
  else {
    nv_pair = (struct dns_nameval *)vnv_pair;
    ci->query_rtype = nv_pair->val;
  }

  if(!ci->error) {
    /* Submit the query */
    int abs;
    if(!dns_ptodn(query, strlen(query), ci->dn, sizeof(ci->dn), &abs) ||
       !dns_submit_dn(ci->h->ctx, ci->dn, ci->query_ctype, ci->query_rtype,
                      abs | DNS_NOSRCH, NULL, dns_cb, ci)) {
      ci->error = strdup("submission error");
    }
    else {
      dns_timeouts(ci->h->ctx, -1, now.tv_sec);
    }
  }

  /* we could have completed by now... if so, we've nothing to do */

  if(!__isactive_ci(ci)) return 0;

  if(ci->error) {
    /* Errors here are easy, fail and avoid scheduling a timeout */
    ci->check->flags &= ~NP_RUNNING;
    dns_check_log_results(ci);
    __deactivate_ci(ci);
    return 0;
  }

  newe = eventer_alloc();
  newe->mask = EVENTER_TIMER;
  gettimeofday(&now, NULL);
  p_int.tv_sec = check->timeout / 1000;
  p_int.tv_usec = (check->timeout % 1000) * 1000;
  add_timeval(now, p_int, &newe->whence);
  newe->closure = ci;
  newe->callback = dns_check_timeout;
  ci->timeout_event = newe;
  eventer_add(newe);

  return 0;
}
Example #18
0
/**
 * Sends a FILEINFO_ACK in response to a FILEINFO
 */
void send_fileinfo_ack(struct group_list_t *group, int restart)
{
    unsigned char *buf, *encrypted, *outpacket;
    struct uftp_h *header;
    struct fileinfoack_h *fileinfo_ack;
    struct timeval now, send_time;
    unsigned int payloadlen;
    int enclen;

    buf = safe_calloc(MAXMTU, 1);

    header = (struct uftp_h *)buf;
    fileinfo_ack = (struct fileinfoack_h *)(buf + sizeof(struct uftp_h));

    payloadlen = sizeof(struct fileinfoack_h);
    set_uftp_header(header, FILEINFO_ACK, group);
    fileinfo_ack->func = FILEINFO_ACK;
    fileinfo_ack->hlen = sizeof(struct fileinfoack_h) / 4;
    fileinfo_ack->file_id = htons(group->file_id);
    if (restart) {
        fileinfo_ack->flags |= FLAG_PARTIAL;
    }
    gettimeofday(&now, NULL);
    if (cmptimestamp(now, group->last_server_rx_ts) <= 0) {
        send_time = group->last_server_ts;
    } else {
        send_time = add_timeval(group->last_server_ts,
                diff_timeval(now, group->last_server_rx_ts));
    }
    fileinfo_ack->tstamp_sec = htonl((uint32_t)send_time.tv_sec);
    fileinfo_ack->tstamp_usec = htonl((uint32_t)send_time.tv_usec);
    if (group->keytype != KEY_NONE) {
        encrypted = NULL;
        if (!encrypt_and_sign(buf, &encrypted, payloadlen, &enclen,
                group->keytype, group->groupkey, group->groupsalt,&group->ivctr,
                group->ivlen, group->hashtype, group->grouphmackey,
                group->hmaclen, group->sigtype, group->keyextype,
                group->client_privkey, group->client_privkeylen)) {
            glog0(group, "Error encrypting FILEINFO_ACK");
            free(buf);
            return;
        }
        outpacket = encrypted;
        payloadlen = enclen;
    } else {
        encrypted = NULL;
        outpacket = buf;
    }
    payloadlen += sizeof(struct uftp_h);

    if (nb_sendto(listener, outpacket, payloadlen, 0,
               (struct sockaddr *)&(group->replyaddr),
               family_len(group->replyaddr)) == SOCKET_ERROR) {
        gsockerror(group, "Error sending FILEINFO_ACK");
    } else {
        glog2(group, "FILEINFO_ACK sent");
    }
    glog3(group, "send time: %d.%06d", send_time.tv_sec, send_time.tv_usec);
    free(encrypted);
    free(buf);
}
Example #19
0
static int eventer_ports_impl_loop(int id) {
  struct timeval __dyna_sleep = { 0, 0 };
  struct ports_spec *spec;
  spec = eventer_get_spec_for_event(NULL);

  while(1) {
    struct timeval __sleeptime;
    struct timespec __ports_sleeptime;
    unsigned int fd_cnt = 0;
    int ret;
    port_event_t pevents[MAX_PORT_EVENTS];

    if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0)
      __dyna_sleep = eventer_max_sleeptime;
 
    __sleeptime = __dyna_sleep;

    eventer_dispatch_timed(&__sleeptime);

    if(compare_timeval(__sleeptime, __dyna_sleep) > 0)
      __sleeptime = __dyna_sleep;

    /* Handle cross_thread dispatches */
    eventer_cross_thread_process();

    /* Handle recurrent events */
    eventer_dispatch_recurrent();

    /* Now we move on to our fd-based events */
    __ports_sleeptime.tv_sec = __sleeptime.tv_sec;
    __ports_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000;
    fd_cnt = 1;

    pevents[0].portev_source = 65535; /* This is impossible */

    ret = port_getn(spec->port_fd, pevents, MAX_PORT_EVENTS, &fd_cnt,
                    &__ports_sleeptime);
    spec->wakeup_notify = 0; /* force unlock */
    /* The timeout case is a tad complex with ports.  -1/ETIME is clearly
     * a timeout.  However, it i spossible that we got that and fd_cnt isn't
     * 0, which means we both timed out and got events... WTF?
     */
    if(fd_cnt == 0 ||
       (ret == -1 && errno == ETIME && pevents[0].portev_source == 65535))
      add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep);

    if(ret == -1 && (errno != ETIME && errno != EINTR))
      mtevL(eventer_err, "port_getn: %s\n", strerror(errno));

    if(ret < 0)
      mtevL(eventer_deb, "port_getn: %s\n", strerror(errno));

    mtevL(eventer_deb, "debug: port_getn(%d, [], %d) => %d\n",
          spec->port_fd, fd_cnt, ret);

    if(pevents[0].portev_source == 65535) {
      /* the impossible still remains, which means our fd_cnt _must_ be 0 */
      fd_cnt = 0;
    }

    if(fd_cnt > 0) {
      int idx;
      /* Loop a last time to process */
      __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */
      for(idx = 0; idx < fd_cnt; idx++) {
        port_event_t *pe;
        eventer_t e;
        int fd, mask;

        pe = &pevents[idx];
        if(pe->portev_source != PORT_SOURCE_FD) continue;
        fd = (int)pe->portev_object;
        mtevAssert((intptr_t)pe->portev_user == fd);
        e = master_fds[fd].e;

        /* It's possible that someone removed the event and freed it
         * before we got here.... bail out if we're null.
         */
        if (!e) continue;

        mask = 0;
        if(pe->portev_events & (POLLIN | POLLHUP))
          mask |= EVENTER_READ;
        if(pe->portev_events & (POLLOUT))
          mask |= EVENTER_WRITE;
        if(pe->portev_events & (POLLERR | POLLHUP | POLLNVAL))
          mask |= EVENTER_EXCEPTION;

        eventer_ports_impl_trigger(e, mask);
      }
    }
  }
  /* NOTREACHED */
  return 0;
}
Example #20
0
static int test_abort_drive_session(eventer_t e, int mask, void *closure,
                                  struct timeval *passed_now) {
  struct timespec rqtp;
  struct timeval target_time, now, diff;
  double i, r;
  test_abort_check_info_t *ci = closure;
  noit_check_t *check = ci->check;

  if(mask & (EVENTER_READ | EVENTER_WRITE)) {
    /* this case is impossible from the eventer.  It is called as
     * such on the synchronous completion of the event.
     */
    noit_check_stats_clear(check, &check->stats.inprogress);
    check->stats.inprogress.available = NP_AVAILABLE;
    check->stats.inprogress.state = ci->timed_out ? NP_BAD : NP_GOOD;
    noitL(nlerr, "test_abort: EVENTER_READ | EVENTER_WRITE\n");
    noit_check_set_stats(check, &check->stats.inprogress);
    noit_check_stats_clear(check, &check->stats.inprogress);
    check->flags &= ~NP_RUNNING;
    return 0;
  }
  switch(mask) {
    case EVENTER_ASYNCH_WORK:
      noitL(nlerr, "test_abort: EVENTER_ASYNCH_WORK\n");
      r = modf(ci->timeout, &i);
      ci->timed_out = 1;

      if(ci->ignore_signals) { /* compuational loop */
        double trash = 1.0;
        gettimeofday(&now, NULL);
        diff.tv_sec = (int)i;
        diff.tv_usec = (int)(r * 1000000.0);
        add_timeval(now, diff, &target_time);

        do {
          for(i=0; i<100000; i++) {
            trash += drand48();
            trash = log(trash);
            trash += 1.1;
            trash = exp(trash);
          }
          gettimeofday(&now, NULL);
          sub_timeval(target_time, now, &diff);
        } while(diff.tv_sec >= 0 && diff.tv_usec >= 0);
      }
      else {
        rqtp.tv_sec = (int)i;
        rqtp.tv_nsec = (int)(r * 1000000000.0);
        nanosleep(&rqtp,NULL);
      }
      noitL(nlerr, "test_abort: EVENTER_ASYNCH_WORK (done)\n");
      ci->timed_out = 0;
      return 0;
      break;
    case EVENTER_ASYNCH_CLEANUP:
      /* This sets us up for a completion call. */
      noitL(nlerr, "test_abort: EVENTER_ASYNCH_CLEANUP\n");
      e->mask = EVENTER_READ | EVENTER_WRITE;
      break;
    default:
      abort();
  }
  return 0;
}
Example #21
0
static int external_invoke(noit_module_t *self, noit_check_t *check,
                           noit_check_t *cause) {
  struct timeval when, p_int;
  external_closure_t *ecl;
  struct check_info *ci = (struct check_info *)check->closure;
  eventer_t newe;
  external_data_t *data;
  noit_hash_table check_attrs_hash = NOIT_HASH_EMPTY;
  int i, klen;
  noit_hash_iter iter = NOIT_HASH_ITER_ZERO;
  const char *name, *value;
  char interp_fmt[4096], interp_buff[4096];

  data = noit_module_get_userdata(self);

  check->flags |= NP_RUNNING;
  noitL(data->nldeb, "external_invoke(%p,%s)\n",
        self, check->target);

  /* remove a timeout if we still have one -- we should unless someone
   * has set a lower timeout than the period.
   */
  if(ci->timeout_event) {
    eventer_remove(ci->timeout_event);
    free(ci->timeout_event->closure);
    eventer_free(ci->timeout_event);
    ci->timeout_event = NULL;
  }

  check_info_clean(ci);

  gettimeofday(&when, NULL);
  memcpy(&check->last_fire_time, &when, sizeof(when));

  /* Setup all our check bits */
  ci->check_no = noit_atomic_inc64(&data->check_no_seq);
  ci->check = check;
  /* We might want to extract metrics */
  if(noit_hash_retr_str(check->config,
                        "output_extract", strlen("output_extract"),
                        &value) != 0) {
    const char *error;
    int erroffset;
    ci->matcher = pcre_compile(value, 0, &error, &erroffset, NULL);
    if(!ci->matcher) {
      noitL(data->nlerr, "external pcre /%s/ failed @ %d: %s\n",
            value, erroffset, error);
    }
  }

  noit_check_make_attrs(check, &check_attrs_hash);

  /* Count the args */
  i = 1;
  while(1) {
    char argname[10];
    snprintf(argname, sizeof(argname), "arg%d", i);
    if(noit_hash_retr_str(check->config, argname, strlen(argname),
                          &value) == 0) break;
    i++;
  }
  ci->argcnt = i + 1; /* path, arg0, (i-1 more args) */
  ci->arglens = calloc(ci->argcnt, sizeof(*ci->arglens));
  ci->args = calloc(ci->argcnt, sizeof(*ci->args));

  /* Make the command */
  if(noit_hash_retr_str(check->config, "command", strlen("command"),
                        &value) == 0) {
    value = "/bin/true";
  }
  ci->args[0] = strdup(value);
  ci->arglens[0] = strlen(ci->args[0]) + 1;

  i = 0;
  while(1) {
    char argname[10];
    snprintf(argname, sizeof(argname), "arg%d", i);
    if(noit_hash_retr_str(check->config, argname, strlen(argname),
                          &value) == 0) {
      if(i == 0) {
        /* if we don't have arg0, make it last element of path */
        char *cp = ci->args[0] + strlen(ci->args[0]);
        while(cp > ci->args[0] && *(cp-1) != '/') cp--;
        value = cp;
      }
      else break; /* if we don't have argn, we're done */
    }
    noit_check_interpolate(interp_buff, sizeof(interp_buff), value,
                           &check_attrs_hash, check->config);
    ci->args[i+1] = strdup(interp_buff);
    ci->arglens[i+1] = strlen(ci->args[i+1]) + 1;
    i++;
  }

  /* Make the environment */
  memset(&iter, 0, sizeof(iter));
  ci->envcnt = 0;
  while(noit_hash_next_str(check->config, &iter, &name, &klen, &value))
    if(!strncasecmp(name, "env_", 4))
      ci->envcnt++;
  memset(&iter, 0, sizeof(iter));
  ci->envlens = calloc(ci->envcnt, sizeof(*ci->envlens));
  ci->envs = calloc(ci->envcnt, sizeof(*ci->envs));
  ci->envcnt = 0;
  while(noit_hash_next_str(check->config, &iter, &name, &klen, &value))
    if(!strncasecmp(name, "env_", 4)) {
      snprintf(interp_fmt, sizeof(interp_fmt), "%s=%s", name+4, value);
      noit_check_interpolate(interp_buff, sizeof(interp_buff), interp_fmt,
                             &check_attrs_hash, check->config);
      ci->envs[ci->envcnt] = strdup(interp_buff);
      ci->envlens[ci->envcnt] = strlen(ci->envs[ci->envcnt]) + 1;
      ci->envcnt++;
    }

  noit_hash_destroy(&check_attrs_hash, NULL, NULL);

  noit_hash_store(&data->external_checks,
                  (const char *)&ci->check_no, sizeof(ci->check_no),
                  ci);

  /* Setup a timeout */
  newe = eventer_alloc();
  newe->mask = EVENTER_TIMER;
  gettimeofday(&when, NULL);
  p_int.tv_sec = check->timeout / 1000;
  p_int.tv_usec = (check->timeout % 1000) * 1000;
  add_timeval(when, p_int, &newe->whence);
  ecl = calloc(1, sizeof(*ecl));
  ecl->self = self;
  ecl->check = check;
  newe->closure = ecl;
  newe->callback = external_timeout;
  eventer_add(newe);
  ci->timeout_event = newe;

  /* Setup push */
  newe = eventer_alloc();
  newe->mask = EVENTER_ASYNCH;
  add_timeval(when, p_int, &newe->whence);
  ecl = calloc(1, sizeof(*ecl));
  ecl->self = self;
  ecl->check = check;
  newe->closure = ecl;
  newe->callback = external_enqueue;
  eventer_add(newe);

  return 0;
}
Example #22
0
int
noit_check_schedule_next(noit_module_t *self,
                         struct timeval *last_check, noit_check_t *check,
                         struct timeval *now, dispatch_func_t dispatch,
                         noit_check_t *cause) {
  eventer_t newe;
  struct timeval period, earliest, diff;
  int64_t diffms, periodms, offsetms;
  recur_closure_t *rcl;
  int initial = last_check ? 1 : 0;

  assert(cause == NULL);
  assert(check->fire_event == NULL);
  if(check->period == 0) return 0;

  /* if last_check is not passed, we use the initial_schedule_time
   * otherwise, we set the initial_schedule_time
   */
  if(!last_check) last_check = &check->initial_schedule_time;
  else memcpy(&check->initial_schedule_time, last_check, sizeof(*last_check));

  if(NOIT_CHECK_DISABLED(check) || NOIT_CHECK_KILLED(check)) {
    if(!(check->flags & NP_TRANSIENT)) check_slots_dec_tv(last_check);
    memset(&check->initial_schedule_time, 0, sizeof(struct timeval));
    return 0;
  }

  /* If we have an event, we know when we intended it to fire.  This means
   * we should schedule that point + period.
   */
  if(now)
    memcpy(&earliest, now, sizeof(earliest));
  else
    gettimeofday(&earliest, NULL);

  /* If the check is unconfigured and needs resolving, we'll set the
   * period down a bit lower so we can pick up the resolution quickly.
   * The one exception is if this is the initial run.
   */
  if(!initial &&
     !NOIT_CHECK_RESOLVED(check) && NOIT_CHECK_SHOULD_RESOLVE(check) &&
     check->period > 1000) {
    period.tv_sec = 1;
    period.tv_usec = 0;
  }
  else {
    period.tv_sec = check->period / 1000;
    period.tv_usec = (check->period % 1000) * 1000;
  }
  periodms = period.tv_sec * 1000 + period.tv_usec / 1000;

  newe = eventer_alloc();
  /* calculate the differnet between the initial schedule time and "now" */
  if(compare_timeval(earliest, *last_check) >= 0) {
    sub_timeval(earliest, *last_check, &diff);
    diffms = (int64_t)diff.tv_sec * 1000 + diff.tv_usec / 1000;
  }
  else {
    noitL(noit_error, "time is going backwards. abort.\n");
    abort();
  }
  /* determine the offset from initial schedule time that would place
   * us at the next period-aligned point past "now" */
  offsetms = ((diffms / periodms) + 1) * periodms;
  diff.tv_sec = offsetms / 1000;
  diff.tv_usec = (offsetms % 1000) * 1000;

  memcpy(&newe->whence, last_check, sizeof(*last_check));
  add_timeval(newe->whence, diff, &newe->whence);

  sub_timeval(newe->whence, earliest, &diff);
  diffms = (int64_t)diff.tv_sec * 1000 + (int)diff.tv_usec / 1000;
  assert(compare_timeval(newe->whence, earliest) > 0);
  newe->mask = EVENTER_TIMER;
  newe->callback = noit_check_recur_handler;
  rcl = calloc(1, sizeof(*rcl));
  rcl->self = self;
  rcl->check = check;
  rcl->cause = cause;
  rcl->dispatch = dispatch;
  newe->closure = rcl;

  eventer_add(newe);
  check->fire_event = newe;
  return diffms;
}