void
noit_jlog_listener_init() {
  xmlNodePtr node;
  eventer_name_callback("log_transit/1.0", noit_jlog_handler);
  mtev_control_dispatch_delegate(mtev_control_dispatch,
                                 NOIT_JLOG_DATA_FEED,
                                 noit_jlog_handler);
  mtev_control_dispatch_delegate(mtev_control_dispatch,
                                 NOIT_JLOG_DATA_TEMP_FEED,
                                 noit_jlog_handler);
  node = mtev_conf_get_section(NULL, "//logs");
  if (node) {
    mtev_conf_get_int(node, "//jlog/max_msg_batch_lines", &MAX_ROWS_AT_ONCE);
    mtev_conf_get_int(node, "//jlog/default_mseconds_between_batches", &DEFAULT_MSECONDS_BETWEEN_BATCHES);
    mtev_conf_get_int(node, "//jlog/default_transient_mseconds_between_batches", &DEFAULT_TRANSIENT_MSECONDS_BETWEEN_BATCHES);
  }
  mtevAssert(mtev_http_rest_register_auth(
    "GET", "/", "^feed$",
    rest_show_feed, mtev_http_rest_client_cert_auth
  ) == 0);
  mtevAssert(mtev_http_rest_register_auth(
    "DELETE", "/feed/", "^(.+)$",
    rest_delete_feed, mtev_http_rest_client_cert_auth
  ) == 0);
  mtevAssert(mtev_http_rest_register_auth(
    "PUT", "/", "^feed$",
    rest_add_feed, mtev_http_rest_client_cert_auth
  ) == 0);
}
Esempio n. 2
0
static void eventer_ports_impl_add(eventer_t e) {
  mtevAssert(e->mask);
  ev_lock_state_t lockstate;
  const char *cbname;
  cbname = eventer_name_for_callback_e(e->callback, e);

  if(e->mask & EVENTER_ASYNCH) {
    mtevL(eventer_deb, "debug: eventer_add asynch (%s)\n", cbname ? cbname : "???");
    eventer_add_asynch(NULL, e);
    return;
  }

  /* Recurrent delegation */
  if(e->mask & EVENTER_RECURRENT) {
    mtevL(eventer_deb, "debug: eventer_add recurrent (%s)\n", cbname ? cbname : "???");
    eventer_add_recurrent(e);
    return;
  }

  /* Timed events are simple */
  if(e->mask & EVENTER_TIMER) {
    eventer_add_timed(e);
    return;
  }

  /* file descriptor event */
  mtevL(eventer_deb, "debug: eventer_add fd (%s,%d,0x%04x)\n", cbname ? cbname : "???", e->fd, e->mask);
  lockstate = acquire_master_fd(e->fd);
  mtevAssert(e->whence.tv_sec == 0 && e->whence.tv_usec == 0);
  master_fds[e->fd].e = e;
  alter_fd(e, e->mask);
  release_master_fd(e->fd, lockstate);
}
Esempio n. 3
0
void mtev_hash_init_locks(mtev_hash_table *h, int size, mtev_hash_lock_mode_t lock_mode) {
  mtev_rand_init();

  if(size < 8) size = 8;

  mtevAssert(ck_hs_init(&h->u.hs, CK_HS_MODE_OBJECT | CK_HS_MODE_SPMC, hs_hash, hs_compare, &my_allocator,
                        size, mtev_rand()));
  mtevAssert(h->u.hs.hf != NULL);

  h->u.locks.locks = calloc(1, sizeof(struct locks_container));

  mtev_hash_set_lock_mode_funcs(h, lock_mode);
}
Esempio n. 4
0
static int
noit_lua_module_config(noit_module_t *mod,
                       mtev_hash_table *options) {
  struct module_conf *mc;
  struct module_tls_conf *mtlsc;
  LMC_DECL(L, mod, object);

  mc = noit_module_get_userdata(mod);
  if(options) {
    mtevAssert(mc->options == NULL);
    mc->options = calloc(1, sizeof(*mc->options));
    mtev_hash_init(mc->options);
    mtev_hash_merge_as_dict(mc->options, options);
  }
  else options = mc->options;
  mtlsc = __get_module_tls_conf(&mod->hdr);
  if(mtlsc->configured) return mtlsc->configured_return;

  SETUP_CALL(L, object, "config", return 0);

  noit_lua_setup_module(L, mod);
  mtev_lua_hash_to_table(L, options);
  lua_pcall(L, 2, 1, 0);

  /* If rv == 0, the caller will free options. We've
   * already freed options, that would be bad. fudge -> 1 */
  RETURN_INT(L, object, "config",
             { mtlsc->configured = 1; mtlsc->configured_return = rv; });
void
noit_adjust_metric_interest(uuid_t id, const char *metric, short cnt) {
  int thread_id, icnt;
  void *vhash, *vinterests;
  mtev_hash_table *level2;
  caql_cnt_t *interests;

  thread_id = get_my_lane();

  /* Get us our UUID->HASH(METRICS) mapping */
  if(!mtev_hash_retrieve(&id_level, (const char *)id, UUID_SIZE, &vhash)) {
    int found;
    uuid_t *copy = malloc(UUID_SIZE);
    level2 = calloc(1,sizeof(*level2));
    mtev_hash_init_locks(level2, MTEV_HASH_DEFAULT_SIZE, MTEV_HASH_LOCK_MODE_MUTEX);
    uuid_copy(*copy, id);
    if(!mtev_hash_store(&id_level, (const char *)copy, UUID_SIZE, level2)) {
      free(copy);
      free(level2);
    }
    found = mtev_hash_retrieve(&id_level, (const char *)id, UUID_SIZE, &vhash);
    mtevAssert(found);
  }
  level2 = vhash;

  if(!mtev_hash_retrieve(level2, metric, strlen(metric), &vinterests)) {
    int found;
    char *metric_copy;
    metric_copy = strdup(metric);
    vinterests = calloc(nthreads, sizeof(*interests));
    if(!mtev_hash_store(level2, metric_copy, strlen(metric_copy), vinterests)) {
      free(metric_copy);
      free(vinterests);
    }
    found = mtev_hash_retrieve(level2, metric, strlen(metric), &vinterests);
    mtevAssert(found);
  }
  interests = vinterests;
  /* This is fine because thread_id is only ours */
  icnt = interests[thread_id];
  icnt += cnt;
  if(icnt < 0) icnt = 0;
  mtevAssert(icnt <= 0xffff);
  interests[thread_id] = icnt;
}
Esempio n. 6
0
static void eventer_epoll_impl_add(eventer_t e) {
  int rv;
  struct epoll_spec *spec;
  struct epoll_event _ev;
  ev_lock_state_t lockstate;
  mtevAssert(e->mask);

  if(e->mask & EVENTER_ASYNCH) {
    eventer_add_asynch(NULL, e);
    return;
  }

  /* Recurrent delegation */
  if(e->mask & EVENTER_RECURRENT) {
    eventer_add_recurrent(e);
    return;
  }

  /* Timed events are simple */
  if(e->mask & EVENTER_TIMER) {
    eventer_add_timed(e);
    return;
  }

  spec = eventer_get_spec_for_event(e);
  /* file descriptor event */
  mtevAssert(e->whence.tv_sec == 0 && e->whence.tv_usec == 0);
  memset(&_ev, 0, sizeof(_ev));
  _ev.data.fd = e->fd;
  if(e->mask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI);
  if(e->mask & EVENTER_WRITE) _ev.events |= (EPOLLOUT);
  if(e->mask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP);

  lockstate = acquire_master_fd(e->fd);
  master_fds[e->fd].e = e;

  mtevL(eventer_deb, "epoll_ctl(%d, add, %d)\n", spec->epoll_fd, e->fd);
  rv = epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, e->fd, &_ev);
  if(rv != 0) {
    mtevFatal(mtev_error, "epoll_ctl(%d,add,%d,%x) -> %d (%d: %s)\n",
          spec->epoll_fd, e->fd, e->mask, rv, errno, strerror(errno));
  }

  release_master_fd(e->fd, lockstate);
}
Esempio n. 7
0
static uint64_t
metric_value_uint64(noit_metric_value_t *v) {
  if(!v->is_null) {
    switch(v->type) {
      case METRIC_INT32:
        mtevAssert(v->value.v_int32 >= 0);
        return (uint64_t)v->value.v_int32;
      case METRIC_UINT32: return (uint64_t)v->value.v_uint32;
      case METRIC_INT64:
        mtevAssert(v->value.v_int64 >= 0);
        return (uint64_t)v->value.v_int64;
      case METRIC_UINT64: return v->value.v_uint64;
      default: ;
    }
  }
  mtevFatal(mtev_error, "failed metric_value_uint64(%x, null: %d)\n",
            v->type, v->is_null);
}
void
mtev_capabilities_listener_init() {
  eventer_name_callback("capabilities_transit/1.0", mtev_capabilities_handler);
  mtev_control_dispatch_delegate(mtev_control_dispatch,
                                 MTEV_CAPABILITIES_SERVICE,
                                 mtev_capabilities_handler);
  mtevAssert(mtev_http_rest_register("GET", "/", "capa(\\.json)?",
                                 mtev_capabilities_rest) == 0);
}
Esempio n. 9
0
static void
register_console_rabbitmq_commands() {
  mtev_console_state_t *tl;
  cmd_info_t *showcmd;

  tl = mtev_console_state_initial();
  showcmd = mtev_console_state_get_cmd(tl, "show");
  mtevAssert(showcmd && showcmd->dstate);
  mtev_console_state_add_cmd(showcmd->dstate,
    NCSCMD("rabbitmq", noit_console_show_rabbitmq, NULL, NULL, NULL));
}
Esempio n. 10
0
void
noit_adjust_checks_interest(short cnt) {
  int thread_id, icnt;

  thread_id = get_my_lane();

  icnt = check_interests[thread_id];
  icnt += cnt;
  if(icnt < 0) icnt = 0;
  mtevAssert(icnt <= 0xffff);
  check_interests[thread_id] = icnt;
}
static void
register_console_dns_cache_commands() {
  mtev_console_state_t *tl;
  cmd_info_t *showcmd, *nocmd;

  tl = mtev_console_state_initial();
  showcmd = mtev_console_state_get_cmd(tl, "show");
  mtevAssert(showcmd && showcmd->dstate);

  nocmd = mtev_console_state_get_cmd(tl, "no");
  mtevAssert(nocmd && nocmd->dstate);

  mtev_console_state_add_cmd(showcmd->dstate,
    NCSCMD("dns_cache", noit_console_show_dns_cache, NULL, NULL, NULL));

  mtev_console_state_add_cmd(tl,
    NCSCMD("dns_cache", noit_console_manip_dns_cache, NULL, NULL, NULL));

  mtev_console_state_add_cmd(nocmd->dstate,
    NCSCMD("dns_cache", noit_console_manip_dns_cache, NULL, NULL, (void *)0x1));
}
Esempio n. 12
0
static void eventer_kqueue_impl_add(eventer_t e) {
  mtevAssert(e->mask);
  mtevAssert(eventer_is_loop(e->thr_owner) >= 0);
  ev_lock_state_t lockstate;
  const char *cbname;
  cbname = eventer_name_for_callback_e(e->callback, e);

  if(e->mask & EVENTER_ASYNCH) {
    mtevL(eventer_deb, "debug: eventer_add asynch (%s)\n", cbname ? cbname : "???");
    eventer_add_asynch(NULL, e);
    return;
  }

  /* Recurrent delegation */
  if(e->mask & EVENTER_RECURRENT) {
    mtevL(eventer_deb, "debug: eventer_add recurrent (%s)\n", cbname ? cbname : "???");
    eventer_add_recurrent(e);
    return;
  }

  /* Timed events are simple */
  if(e->mask & EVENTER_TIMER) {
    eventer_add_timed(e);
    return;
  }

  /* file descriptor event */
  mtevL(eventer_deb, "debug: eventer_add fd (%s,%d,0x%04x)\n", cbname ? cbname : "???", e->fd, e->mask);
  mtevAssert(e->whence.tv_sec == 0 && e->whence.tv_usec == 0);
  lockstate = acquire_master_fd(e->fd);
  master_fds[e->fd].e = e;
  if(e->mask & (EVENTER_READ | EVENTER_EXCEPTION))
    ke_change(e->fd, EVFILT_READ, EV_ADD | EV_ENABLE, e);
  if(e->mask & (EVENTER_WRITE))
    ke_change(e->fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, e);
  release_master_fd(e->fd, lockstate);
}
Esempio n. 13
0
static int
get_my_lane() {
  if(my_lane.fifo == NULL) {
    int new_thread;
    my_lane.fifo = calloc(1, sizeof(ck_fifo_spsc_t));
    ck_fifo_spsc_init(my_lane.fifo, malloc(sizeof(ck_fifo_spsc_entry_t)));
    for(new_thread=0;new_thread<nthreads;new_thread++) {
      if(mtev_atomic_casptr(&thread_queues[new_thread], my_lane.fifo, NULL) == NULL) break;
    }
    mtevAssert(new_thread<nthreads);
    my_lane.id = new_thread;
    mtevL(mtev_debug, "Assigning thread(%p) to %d\n", (void*)(uintptr_t)pthread_self(), my_lane.id);
  }
  return my_lane.id;
}
Esempio n. 14
0
void noit_metric_director_init() {
  nthreads = eventer_loop_concurrency();
  mtevAssert(nthreads > 0);
  thread_queues = calloc(sizeof(*thread_queues),nthreads);
  check_interests = calloc(sizeof(*check_interests),nthreads);
  if(mtev_fq_handle_message_hook_register_available())
    mtev_fq_handle_message_hook_register("metric-director", handle_fq_message, NULL);
  mtev_log_line_hook_register("metric-director", handle_log_line, NULL);

  eventer_t e = eventer_alloc();
  e->mask = EVENTER_TIMER;
  e->callback = prune_old_dedupe_hashes;
  mtev_gettimeofday(&e->whence, NULL);
  e->whence.tv_sec += 2;
  eventer_add_timed(e);
}
Esempio n. 15
0
void
stratcon_datastore_init() {
  static int initialized = 0;
  if(initialized) return;
  initialized = 1;
  stratcon_datastore_core_init();

  stratcon_ingest_sweep_journals(basejpath,
                                 is_raw_ingestion_file,
                                 stratcon_ingest);

  mtevAssert(mtev_http_rest_register_auth(
    "GET", "/noits/", "^config$", rest_get_noit_config,
             mtev_http_rest_client_cert_auth
  ) == 0);
}
Esempio n. 16
0
int mtev_skiplist_remove_compare(mtev_skiplist *sli,
                                 const void *data,
                                 mtev_freefunc_t myfree,
                                 mtev_skiplist_comparator_t comp) {
  mtev_skiplist_node *m;
  mtev_skiplist *sl;
  if(comp==sli->comparek || !sli->index) {
    sl = sli;
  } else {
    mtev_skiplist_find(sli->index, (void *)comp, &m);
    mtevAssert(m);
    sl= (mtev_skiplist *) m->data;
  }
  mtev_skiplisti_find_compare(sl, data, &m, NULL, NULL, sl->comparek);
  if(!m) return 0;
  while(m->previndex) m=m->previndex;
  return mtev_skiplisti_remove(m->sl, m, myfree);
}
Esempio n. 17
0
static int
noit_module_index_func(lua_State *L) {
  noit_module_t **udata, *module;
  const char *k;
  int n;
  n = lua_gettop(L);    /* number of arguments */
  mtevAssert(n == 2);
  if(!luaL_checkudata(L, 1, "noit_module_t")) {
    luaL_error(L, "metatable error, arg1 not a noit_module_t!");
  }
  udata = lua_touserdata(L, 1);
  module = *udata;
  if(!lua_isstring(L, 2)) {
    luaL_error(L, "metatable error, arg2 not a string!");
  }
  k = lua_tostring(L, 2);
  switch(*k) {
    case 'd':
      if(!strcmp(k, "description")) {
        lua_pushlightuserdata(L, module);
        lua_pushcclosure(L, noit_lua_module_set_description, 1);
      }
      else break;
      return 1;
    case 'n':
      if(!strcmp(k, "name")) {
        lua_pushlightuserdata(L, module);
        lua_pushcclosure(L, noit_lua_module_set_name, 1);
      }
      else break;
      return 1;
    case 'x':
      if(!strcmp(k, "xml_description")) {
        lua_pushlightuserdata(L, module);
        lua_pushcclosure(L, noit_lua_module_set_xml_description, 1);
      }
      else break;
      return 1;
    default:
      break;
  }
  luaL_error(L, "noit_module_t no such element: %s", k);
  return 0;
}
Esempio n. 18
0
int
mtev_cht_vlookup_n(mtev_cht_t *cht, const void *key, size_t keylen,
                   int w, mtev_cht_node_t **nodes) {
  int i, l, r, m = 0, rsize = cht->node_cnt * cht->weight;
  int w_out = 0;
  int found[CHT_MAX_W] = {-1};
  uint32_t val, hash;

  if(w > CHT_MAX_W) w = CHT_MAX_W;
  if(w < 0) w = 0;
  if(cht->node_cnt < 1) return -1;
  hash = mtev_cht_hash(cht, key, keylen, CHT_INITVAL);

  /* binary search for the node */
  l = 0;
  r = rsize;
  while(l < r) {
    m = (l + r) / 2;
    val = cht->ring[m].pos;
    if(hash == val) break;
    if(hash < val) r = m;
    else if(l == m) break;
    else l = m;
  }

  /* handle wrap under */
  if(hash < cht->ring[m].pos) {
    mtevAssert(m == 0);
    m = rsize - 1;
  }
  for(i=m;w_out < cht->node_cnt && i < rsize+m;i++) {
    int id = cht->ring[i % rsize].node_idx;
    if(iarrcontains(w_out, found, id) < 0) {
      found[w_out++] = id;
    }
  }
  found[w_out++] = cht->ring[m].node_idx;
  for(i=0;i<w && i<w_out;i++)
    nodes[i] = &cht->nodes[found[i]];
  return i;
}
Esempio n. 19
0
void *mtev_skiplist_find_neighbors_compare(mtev_skiplist *sli,
                                           const void *data,
                                           mtev_skiplist_node **iter,
                                           mtev_skiplist_node **prev,
                                           mtev_skiplist_node **next,
                                           mtev_skiplist_comparator_t comp) {
  mtev_skiplist_node *m = NULL;
  mtev_skiplist *sl;
  if(iter) *iter = NULL;
  if(prev) *prev = NULL;
  if(next) *next = NULL;
  if(comp==sli->compare || !sli->index) {
    sl = sli;
  } else {
    mtev_skiplist_find(sli->index, (void *)comp, &m);
    mtevAssert(m);
    sl= (mtev_skiplist *) m->data;
  }
  mtev_skiplisti_find_compare(sl, data, iter, prev, next, sl->comparek);
  return (iter && *iter)?((*iter)->data):NULL;
}
Esempio n. 20
0
static int
lua_general_resume(mtev_lua_resume_info_t *ri, int nargs) {
  const char *err = NULL;
  int status, base, rv = 0;

  mtevAssert(pthread_equal(pthread_self(), ri->bound_thread));

#if LUA_VERSION_NUM >= 502
  status = lua_resume(ri->coro_state, ri->lmc->lua_state, nargs);
#else
  status = lua_resume(ri->coro_state, nargs);
#endif

  switch(status) {
    case 0: break;
    case LUA_YIELD:
      lua_gc(ri->coro_state, LUA_GCCOLLECT, 0);
      return 0;
    default: /* The complicated case */
      mtevL(nlerr, "lua coro resume failed: %d\n", status);
      base = lua_gettop(ri->coro_state);
      if(base>0) {
        mtev_lua_traceback(ri->coro_state);
        base = lua_gettop(ri->coro_state);
        if(lua_isstring(ri->coro_state, base)) {
          err = lua_tostring(ri->coro_state, base);
          mtevL(nlerr, "lua error: %s\n", err);
        }
      }
      tragic_failure(ri->lmc->self);
      rv = -1;
  }

  lua_general_ctx_free(ri);
  return rv;
}
Esempio n. 21
0
static void eventer_epoll_impl_trigger(eventer_t e, int mask) {
  struct epoll_spec *spec;
  struct timeval __now;
  int fd, newmask;
  const char *cbname;
  ev_lock_state_t lockstate;
  int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER;
  int added_to_master_fds = 0;
  u_int64_t start, duration;

  mask = mask & ~(EVENTER_RESERVED);
  fd = e->fd;
  if(cross_thread) {
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */
  }
  if(!pthread_equal(pthread_self(), e->thr_owner)) {
    /* If we're triggering across threads, it can't be registered yet */
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */
    
    eventer_cross_thread_trigger(e,mask);
    return;
  }
  if(master_fds[fd].e == NULL) {
    master_fds[fd].e = e;
    e->mask = 0;
    added_to_master_fds = 1;
  }
  if(e != master_fds[fd].e) return;
  lockstate = acquire_master_fd(fd);
  if(lockstate == EV_ALREADY_OWNED) return;
  mtevAssert(lockstate == EV_OWNED);

  mtev_gettimeofday(&__now, NULL);
  cbname = eventer_name_for_callback_e(e->callback, e);
  mtevLT(eventer_deb, &__now, "epoll: fire on %d/%x to %s(%p)\n",
         fd, mask, cbname?cbname:"???", e->callback);
  mtev_memory_begin();
  LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask);
  start = mtev_gethrtime();
  newmask = e->callback(e, mask, e->closure, &__now);
  duration = mtev_gethrtime() - start;
  LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask);
  mtev_memory_end();
  stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1);
  stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1);

  if(newmask) {
    struct epoll_event _ev;
    memset(&_ev, 0, sizeof(_ev));
    _ev.data.fd = fd;
    if(newmask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI);
    if(newmask & EVENTER_WRITE) _ev.events |= (EPOLLOUT);
    if(newmask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP);
    if(master_fds[fd].e == NULL) {
      mtevL(mtev_debug, "eventer %s(%p) epoll asked to modify descheduled fd: %d\n",
            cbname?cbname:"???", e->callback, fd);
    } else {
      if(!pthread_equal(pthread_self(), e->thr_owner)) {
        pthread_t tgt = e->thr_owner;
        e->thr_owner = pthread_self();
        spec = eventer_get_spec_for_event(e);
        if(! added_to_master_fds && epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) != 0) {
          mtevFatal(mtev_error,
                    "epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) failed; "
                    "spec->epoll_fd: %d; fd: %d; errno: %d (%s)\n",
                    spec->epoll_fd, fd, errno, strerror(errno));
        }
        e->thr_owner = tgt;
        spec = eventer_get_spec_for_event(e);
        mtevAssert(epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, fd, &_ev) == 0);
        mtevL(eventer_deb, "moved event[%p] from t@%d to t@%d\n", e, (int)pthread_self(), (int)tgt);
      }
      else {
        int epoll_cmd = added_to_master_fds ? EPOLL_CTL_ADD : EPOLL_CTL_MOD;
        spec = eventer_get_spec_for_event(e);
        if(epoll_ctl(spec->epoll_fd, epoll_cmd, fd, &_ev) != 0) {
          const char *cb_name = eventer_name_for_callback_e(e->callback, e);
          mtevFatal(mtev_error,
                    "epoll_ctl(spec->epoll_fd, EPOLL_CTL_MOD, fd, &_ev) failed; "
              "spec->epoll_fd: %d; fd: %d; errno: %d (%s); callback: %s\n",
              spec->epoll_fd, fd, errno, strerror(errno), cb_name ? cb_name : "???");
        }
      }
    }
    /* Set our mask */
    e->mask = newmask;
  }
  else {
    /* see kqueue implementation for details on the next line */
    if(master_fds[fd].e == e) master_fds[fd].e = NULL;
    eventer_free(e);
  }
  release_master_fd(fd, lockstate);
}
Esempio n. 22
0
int mtev_skiplist_remove_node(mtev_skiplist *sl, mtev_skiplist_node *m,
                              mtev_freefunc_t myfree) {
  while(m->previndex) m = m->previndex;
  mtevAssert(sl == m->sl);
  return mtev_skiplisti_remove(sl, m, myfree);
}
Esempio n. 23
0
mtev_skiplist_node *mtev_skiplist_insert_compare(mtev_skiplist *sl,
                                                 const void *data,
                                                 mtev_skiplist_comparator_t comp) {
  mtev_skiplist_node *m, *p, *tmp, *ret = NULL, **stack;
  int nh=1, ch, stacki;
  if(!sl->top) {
    sl->height = 1;
    sl->top = sl->bottom = 
      calloc(1, sizeof(mtev_skiplist_node));
    sl->top->sl = sl;
  }
  if(sl->preheight) {
    while(nh < sl->preheight && get_b_rand()) nh++;
  } else {
    while(nh <= sl->height && get_b_rand()) nh++;
  }
  /* Now we have the new height at which we wish to insert our new node */
  /* Let us make sure that our tree is a least that tall (grow if necessary)*/
  for(;sl->height<nh;sl->height++) {
    sl->top->up = (mtev_skiplist_node *)calloc(1, sizeof(mtev_skiplist_node));
    sl->top->up->down = sl->top;
    sl->top = sl->top->up;
    sl->top->sl = sl;
  }
  ch = sl->height;
  /* Find the node (or node after which we would insert) */
  /* Keep a stack to pop back through for insertion */
  m = sl->top;
  stack = (mtev_skiplist_node **)alloca(sizeof(mtev_skiplist_node *)*(nh));
  stacki=0;
  while(m) {
    int compared=-1;
    if(m->next) compared=comp(data, m->next->data);
    if(compared == 0) {
      return 0;
    }
    if(compared<0) {
      if(ch<=nh) {
	/* push on stack */
	stack[stacki++] = m;
      }
      m = m->down;
      ch--;
    } else {
      m = m->next;
    }
  }
  /* Pop the stack and insert nodes */
  p = NULL;
  for(;stacki>0;stacki--) {
    m = stack[stacki-1];
    tmp = calloc(1, sizeof(*tmp));
    tmp->next = m->next;
    if(m->next) m->next->prev=tmp;
    tmp->prev = m;
    tmp->down = p;
    if(p) p->up=tmp;
    tmp->data = (void *)data;
    tmp->sl = sl;
    m->next = tmp;
    /* This sets ret to the bottom-most node we are inserting */
    if(!p) ret=tmp;
    p = tmp;
  }
  if(sl->index != NULL) {
    /* this is a external insertion, we must insert into each index as well */
    mtev_skiplist_node *p, *ni, *li;
    mtevAssert(ret);
    li=ret;
    for(p = mtev_skiplist_getlist(sl->index); p; mtev_skiplist_next(sl->index, &p)) {
      ni = mtev_skiplist_insert((mtev_skiplist *)p->data, ret->data);
      mtevAssert(ni);
      li->nextindex = ni;
      ni->previndex = li;
      li = ni;
    }
  }
  sl->size++;
  return ret;
}
Esempio n. 24
0
static int
noit_check_index_func(lua_State *L) {
  int n;
  const char *k;
  noit_check_t **udata, *check;
  n = lua_gettop(L);    /* number of arguments */
  mtevAssert(n == 2);
  if(!luaL_checkudata(L, 1, "noit_check_t")) {
    luaL_error(L, "metatable error, arg1 not a noit_check_t!");
  }
  udata = lua_touserdata(L, 1);
  check = *udata;
  if(!lua_isstring(L, 2)) {
    luaL_error(L, "metatable error, arg2 not a string!");
  }
  k = lua_tostring(L, 2);
  switch(*k) {
    case 'a':
      if(!strcmp(k, "available")) {
        lua_pushlightuserdata(L, check);
        lua_pushinteger(L, NP_AVAILABLE);
        lua_pushcclosure(L, noit_lua_set_available, 2);
      }
      else if(!strcmp(k, "availability")) {
        lua_pushlightuserdata(L, check);
        lua_pushcclosure(L, noit_lua_get_available, 1);
      }
      else break;
      return 1;
    case 'b':
      if(!strcmp(k, "bad")) {
        lua_pushlightuserdata(L, check);
        lua_pushinteger(L, NP_BAD);
        lua_pushcclosure(L, noit_lua_set_state, 2);
      }
      else break;
      return 1;
    case 'c':
      if(!strcmp(k, "config")) mtev_lua_hash_to_table(L, check->config);
      else if(!strcmp(k, "checkid")) {
        char uuid_str[UUID_STR_LEN + 1];
        uuid_unparse_lower(check->checkid, uuid_str);
        lua_pushstring(L, uuid_str);
      }
      else break;
      return 1;
    case 'f':
      if(!strcmp(k, "flags")) {
        lua_pushlightuserdata(L, check);
        lua_pushcclosure(L, noit_lua_get_flags, 1);
      }
      else break;
      return 1;
    case 'g':
      if(!strcmp(k, "good")) {
        lua_pushlightuserdata(L, check);
        lua_pushinteger(L, NP_GOOD);
        lua_pushcclosure(L, noit_lua_set_state, 2);
      }
      else break;
      return 1;
    case 'i':
      if(!strcmp(k, "interpolate")) {
        lua_pushlightuserdata(L, check);
        lua_pushcclosure(L, noit_lua_interpolate, 1);
      }
      else if(!strcmp(k, "is_thread_local")) {
        if(check->fire_event &&
           pthread_equal(pthread_self(), check->fire_event->thr_owner)) {
          lua_pushboolean(L, 1);
        }
        else {
          lua_pushboolean(L, 0);
        }
        return 1;
      }

#define IF_METRIC_IMMEDIATE_BLOCK(name,type) \
      if(!strcmp(k, "immediate_" name)) { \
        lua_pushlightuserdata(L, check); \
        lua_pushinteger(L, type); \
        lua_pushcclosure(L, noit_lua_log_immediate_metric, 2); \
      }
      else IF_METRIC_IMMEDIATE_BLOCK("metric", METRIC_GUESS)
      else IF_METRIC_IMMEDIATE_BLOCK("metric_string", METRIC_STRING)
      else IF_METRIC_IMMEDIATE_BLOCK("metric_int32", METRIC_INT32)
      else IF_METRIC_IMMEDIATE_BLOCK("metric_uint32", METRIC_UINT32)
      else IF_METRIC_IMMEDIATE_BLOCK("metric_int64", METRIC_INT64)
      else IF_METRIC_IMMEDIATE_BLOCK("metric_uint64", METRIC_UINT64)
      else IF_METRIC_IMMEDIATE_BLOCK("metric_double", METRIC_DOUBLE)
      else if(!strcmp(k, "immediate_histogram")) {
        lua_pushlightuserdata(L, check);
        lua_pushcclosure(L, noit_lua_set_histo_metric, 1);
      }
      else break;

      return 1;
    case 'm':
      if(!strcmp(k, "module")) lua_pushstring(L, check->module);

#define IF_METRIC_BLOCK(name,type) \
      if(!strcmp(k, name)) { \
        lua_pushlightuserdata(L, check); \
        lua_pushinteger(L, type); \
        lua_pushcclosure(L, noit_lua_set_metric, 2); \
      }

      else IF_METRIC_BLOCK("metric", METRIC_GUESS)
      else IF_METRIC_BLOCK("metric_string", METRIC_STRING)
      else IF_METRIC_BLOCK("metric_int32", METRIC_INT32)
      else IF_METRIC_BLOCK("metric_uint32", METRIC_UINT32)
      else IF_METRIC_BLOCK("metric_int64", METRIC_INT64)
      else IF_METRIC_BLOCK("metric_uint64", METRIC_UINT64)
      else IF_METRIC_BLOCK("metric_double", METRIC_DOUBLE)
      else if(!strcmp(k, "metric_json")) {
        lua_pushlightuserdata(L, check);
        lua_pushcclosure(L, noit_lua_set_metric_json, 1);
      }

      else break;
      return 1;
    case 'n':
      if(!strcmp(k, "name")) lua_pushstring(L, check->name);
      else break;
      return 1;
    case 'p':
      if(!strcmp(k, "period")) lua_pushinteger(L, check->period);
      else break;
      return 1;
    case 's':
      if(!strcmp(k, "set_flags")) {
        lua_pushlightuserdata(L, check);
        lua_pushcclosure(L, noit_lua_set_flags, 1);
      }
      else if(!strcmp(k, "state")) {
        lua_pushlightuserdata(L, check);
        lua_pushcclosure(L, noit_lua_get_state, 1);
      }
      else if(!strcmp(k, "status")) {
        lua_pushlightuserdata(L, check);
        lua_pushcclosure(L, noit_lua_set_status, 1);
      }
      else break;
      return 1;
   case 't':
      if(!strcmp(k, "target")) lua_pushstring(L, check->target);
      else if(!strcmp(k, "target_ip")) {
        if(check->target_ip[0] == '\0') lua_pushnil(L);
        else lua_pushstring(L, check->target_ip);
      }
      else if(!strcmp(k, "timeout")) lua_pushinteger(L, check->timeout);
      else break;
      return 1;
    case 'u':
      if(!strcmp(k, "unavailable")) {
        lua_pushlightuserdata(L, check);
        lua_pushinteger(L, NP_UNAVAILABLE);
        lua_pushcclosure(L, noit_lua_set_available, 2);
      }
      else if(!strcmp(k, "unset_flags")) {
        lua_pushlightuserdata(L, check);
        lua_pushcclosure(L, noit_lua_unset_flags, 1);
      }
      else if(!strcmp(k, "uuid")) {
        char uuid_str[UUID_STR_LEN+1];
        uuid_unparse_lower(check->checkid, uuid_str);
        lua_pushstring(L, uuid_str);
        return 1;
      }
      else break;
      return 1;
    default:
      break;
  }
  luaL_error(L, "noit_check_t no such element: %s", k);
  return 0;
}
Esempio n. 25
0
static int eventer_ports_impl_loop(int id) {
  struct timeval __dyna_sleep = { 0, 0 };
  struct ports_spec *spec;
  spec = eventer_get_spec_for_event(NULL);

  while(1) {
    struct timeval __sleeptime;
    struct timespec __ports_sleeptime;
    unsigned int fd_cnt = 0;
    int ret;
    port_event_t pevents[MAX_PORT_EVENTS];

    if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0)
      __dyna_sleep = eventer_max_sleeptime;
 
    __sleeptime = __dyna_sleep;

    eventer_dispatch_timed(&__sleeptime);

    if(compare_timeval(__sleeptime, __dyna_sleep) > 0)
      __sleeptime = __dyna_sleep;

    /* Handle cross_thread dispatches */
    eventer_cross_thread_process();

    /* Handle recurrent events */
    eventer_dispatch_recurrent();

    /* Now we move on to our fd-based events */
    __ports_sleeptime.tv_sec = __sleeptime.tv_sec;
    __ports_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000;
    fd_cnt = 1;

    pevents[0].portev_source = 65535; /* This is impossible */

    ret = port_getn(spec->port_fd, pevents, MAX_PORT_EVENTS, &fd_cnt,
                    &__ports_sleeptime);
    spec->wakeup_notify = 0; /* force unlock */
    /* The timeout case is a tad complex with ports.  -1/ETIME is clearly
     * a timeout.  However, it i spossible that we got that and fd_cnt isn't
     * 0, which means we both timed out and got events... WTF?
     */
    if(fd_cnt == 0 ||
       (ret == -1 && errno == ETIME && pevents[0].portev_source == 65535))
      add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep);

    if(ret == -1 && (errno != ETIME && errno != EINTR))
      mtevL(eventer_err, "port_getn: %s\n", strerror(errno));

    if(ret < 0)
      mtevL(eventer_deb, "port_getn: %s\n", strerror(errno));

    mtevL(eventer_deb, "debug: port_getn(%d, [], %d) => %d\n",
          spec->port_fd, fd_cnt, ret);

    if(pevents[0].portev_source == 65535) {
      /* the impossible still remains, which means our fd_cnt _must_ be 0 */
      fd_cnt = 0;
    }

    if(fd_cnt > 0) {
      int idx;
      /* Loop a last time to process */
      __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */
      for(idx = 0; idx < fd_cnt; idx++) {
        port_event_t *pe;
        eventer_t e;
        int fd, mask;

        pe = &pevents[idx];
        if(pe->portev_source != PORT_SOURCE_FD) continue;
        fd = (int)pe->portev_object;
        mtevAssert((intptr_t)pe->portev_user == fd);
        e = master_fds[fd].e;

        /* It's possible that someone removed the event and freed it
         * before we got here.... bail out if we're null.
         */
        if (!e) continue;

        mask = 0;
        if(pe->portev_events & (POLLIN | POLLHUP))
          mask |= EVENTER_READ;
        if(pe->portev_events & (POLLOUT))
          mask |= EVENTER_WRITE;
        if(pe->portev_events & (POLLERR | POLLHUP | POLLNVAL))
          mask |= EVENTER_EXCEPTION;

        eventer_ports_impl_trigger(e, mask);
      }
    }
  }
  /* NOTREACHED */
  return 0;
}
Esempio n. 26
0
static void
eventer_ports_impl_trigger(eventer_t e, int mask) {
  ev_lock_state_t lockstate;
  const char *cbname;
  struct timeval __now;
  int fd, newmask;
  uint64_t start, duration;
  int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER;

  mask = mask & ~(EVENTER_RESERVED);
  fd = e->fd;
  if(cross_thread) {
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */
  }
  if(!pthread_equal(pthread_self(), e->thr_owner)) {
    /* If we're triggering across threads, it can't be registered yet */
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */

    eventer_cross_thread_trigger(e,mask);
    return;
  }
  if(master_fds[fd].e == NULL) {
    lockstate = acquire_master_fd(fd);
    if (lockstate == EV_ALREADY_OWNED) {
      /* The incoming triggered event is already owned by this thread.
	 This means our floated event completed before the current
	 event handler even exited.  So it retriggered recursively
	 from inside the event handler.

	 Treat this special case the same as a cross thread trigger
	 and just queue this event to be picked up on the next loop
      */
      eventer_cross_thread_trigger(e, mask);
      return;
    }
    release_master_fd(fd, lockstate);
    master_fds[fd].e = e;
    e->mask = 0;
  }
  if(e != master_fds[fd].e) return;
  lockstate = acquire_master_fd(fd);
  if(lockstate == EV_ALREADY_OWNED) {
    mtevL(eventer_deb, "Incoming event: %p already owned by this thread\n", e);
    return;
  }
  mtevAssert(lockstate == EV_OWNED);

  eventer_mark_callback_time();
  eventer_gettimeofcallback(&__now, NULL);
  cbname = eventer_name_for_callback_e(e->callback, e);
  mtevL(eventer_deb, "ports: fire on %d/%x to %s(%p)\n",
        fd, mask, cbname?cbname:"???", e->callback);
  mtev_memory_begin();
  LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask);
  start = mtev_gethrtime();
  newmask = eventer_run_callback(e, mask, e->closure, &__now);
  duration = mtev_gethrtime() - start;
  LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask);
  mtev_memory_end();
  stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1);
  stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1);

  if(newmask) {
    if(!pthread_equal(pthread_self(), e->thr_owner)) {
      pthread_t tgt = e->thr_owner;
      e->thr_owner = pthread_self();
      alter_fd(e, 0);
      e->thr_owner = tgt;
      alter_fd(e, newmask);
      mtevL(eventer_deb, "moved event[%p] from t@%d to t@%d\n", e, pthread_self(), tgt);
    }
    else {
      alter_fd(e, newmask);
      /* Set our mask */
      e->mask = newmask;
      mtevL(eventer_deb, "ports: complete on %d/(%x->%x) to %s(%p)\n",
            fd, mask, newmask, cbname?cbname:"???", e->callback);
    }
  }
  else {
    mtevL(eventer_deb, "ports: complete on %d/none to %s(%p)\n",
          fd, cbname?cbname:"???", e->callback);
    /*
     * Long story long:
     *  When integrating with a few external event systems, we find
     *  it difficult to make their use of remove+add as an update
     *  as it can be recurrent in a single handler call and you cannot
     *  remove completely from the event system if you are going to
     *  just update (otherwise the eventer_t in your call stack could
     *  be stale).  What we do is perform a superficial remove, marking
     *  the mask as 0, but not eventer_remove_fd.  Then on an add, if
     *  we already have an event, we just update the mask (as we
     *  have not yet returned to the eventer's loop.
     *  This leaves us in a tricky situation when a remove is called
     *  and the add doesn't roll in, we return 0 (mask == 0) and hit
     *  this spot.  We have intended to remove the event, but it still
     *  resides at master_fds[fd].e -- even after we free it.
     *  So, in the evnet that we return 0 and the event that
     *  master_fds[fd].e == the event we're about to free... we NULL
     *  it out.
     */
    if(master_fds[fd].e == e) master_fds[fd].e = NULL;
    eventer_free(e);
  }
  release_master_fd(fd, lockstate);
}
Esempio n. 27
0
static void eventer_kqueue_impl_trigger(eventer_t e, int mask) {
  ev_lock_state_t lockstate;
  struct timeval __now;
  int oldmask, newmask;
  const char *cbname;
  int fd;
  u_int64_t start, duration;
  int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER;

  mask = mask & ~(EVENTER_RESERVED);
  fd = e->fd;
  if(cross_thread) {
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */
  }
  if(!pthread_equal(pthread_self(), e->thr_owner)) {
    /* If we're triggering across threads, it can't be registered yet */
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */

    eventer_cross_thread_trigger(e,mask);
    return;
  }
  if(master_fds[fd].e == NULL) {
    master_fds[fd].e = e;
    e->mask = 0;
  }
  if(e != master_fds[fd].e) return;
  lockstate = acquire_master_fd(fd);
  if(lockstate == EV_ALREADY_OWNED) return;
  mtevAssert(lockstate == EV_OWNED);

  mtev_gettimeofday(&__now, NULL);
  /* We're going to lie to ourselves.  You'd think this should be:
   * oldmask = e->mask;  However, we just fired with masks[fd], so
   * kqueue is clearly looking for all of the events in masks[fd].
   * So, we combine them "just to be safe."
   */
  oldmask = e->mask | masks[fd];
  cbname = eventer_name_for_callback_e(e->callback, e);
  mtevLT(eventer_deb, &__now, "kqueue: fire on %d/%x to %s(%p)\n",
         fd, masks[fd], cbname?cbname:"???", e->callback);
  mtev_memory_begin();
  LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask);
  start = mtev_gethrtime();
  newmask = e->callback(e, mask, e->closure, &__now);
  duration = mtev_gethrtime() - start;
  LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask);
  mtev_memory_end();
  stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1);
  stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1);

  if(newmask) {
    if(!pthread_equal(pthread_self(), e->thr_owner)) {
      pthread_t tgt = e->thr_owner;
      e->thr_owner = pthread_self();
      alter_kqueue_mask(e, oldmask, 0);
      e->thr_owner = tgt;
      mtevL(eventer_deb, "moved event[%p] from t@%llx to t@%llx\n", e, (vpsized_int)pthread_self(), (vpsized_int)tgt);
      if(newmask) eventer_cross_thread_trigger(e, newmask & ~(EVENTER_EXCEPTION));
    }
    else {
      if(master_fds[fd].e != e) {
        e = master_fds[fd].e;
        mtevL(eventer_deb, "%strigger complete [event switched] %d : %x->%x\n", cross_thread ? "[X]" : "", e->fd, master_fds[fd].e->mask, newmask);
      } else {
        mtevL(eventer_deb, "%strigger complete %d : %x->%x\n", cross_thread ? "[X]" : "", e->fd, oldmask, newmask);
      }
      alter_kqueue_mask(e, (e->mask == 0 || cross_thread) ? 0 : oldmask, newmask);
      /* Set our mask */
      e->mask = newmask;
    }
  }
  else {
    /*
     * Long story long:
     *  When integrating with a few external event systems, we find
     *  it difficult to make their use of remove+add as an update
     *  as it can be recurrent in a single handler call and you cannot
     *  remove completely from the event system if you are going to
     *  just update (otherwise the eventer_t in your call stack could
     *  be stale).  What we do is perform a superficial remove, marking
     *  the mask as 0, but not eventer_remove_fd.  Then on an add, if
     *  we already have an event, we just update the mask (as we
     *  have not yet returned to the eventer's loop.
     *  This leaves us in a tricky situation when a remove is called
     *  and the add doesn't roll in, we return 0 (mask == 0) and hit
     *  this spot.  We have intended to remove the event, but it still
     *  resides at master_fds[fd].e -- even after we free it.
     *  So, in the evnet that we return 0 and the event that
     *  master_fds[fd].e == the event we're about to free... we NULL
     *  it out.
     */
    if(master_fds[fd].e == e) master_fds[fd].e = NULL;
    eventer_free(e);
  }
  release_master_fd(fd, lockstate);
}
Esempio n. 28
0
static int eventer_kqueue_impl_loop() {
  struct timeval __dyna_sleep = { 0, 0 };
  KQUEUE_DECL;
  KQUEUE_SETUP(NULL);

  if(eventer_kqueue_impl_register_wakeup(kqs) == -1) {
    mtevFatal(mtev_error, "error in eventer_kqueue_impl_loop: could not eventer_kqueue_impl_register_wakeup\n");
  }

  while(1) {
    struct timeval __now, __sleeptime;
    struct timespec __kqueue_sleeptime;
    int fd_cnt = 0;

    if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0)
      __dyna_sleep = eventer_max_sleeptime;

    __sleeptime = __dyna_sleep;

    eventer_dispatch_timed(&__now, &__sleeptime);

    if(compare_timeval(__sleeptime, __dyna_sleep) > 0)
      __sleeptime = __dyna_sleep;

    /* Handle cross_thread dispatches */
    eventer_cross_thread_process();

    /* Handle recurrent events */
    eventer_dispatch_recurrent(&__now);

    /* Now we move on to our fd-based events */
    __kqueue_sleeptime.tv_sec = __sleeptime.tv_sec;
    __kqueue_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000;
    fd_cnt = kevent(kqs->kqueue_fd, ke_vec, ke_vec_used,
                    ke_vec, ke_vec_a,
                    &__kqueue_sleeptime);
    kqs->wakeup_notify = 0;
    if(fd_cnt > 0 || ke_vec_used)
      mtevLT(eventer_deb, &__now, "[t@%llx] kevent(%d, [...], %d) => %d\n", (vpsized_int)pthread_self(), kqs->kqueue_fd, ke_vec_used, fd_cnt);
    ke_vec_used = 0;
    if(fd_cnt < 0) {
      mtevLT(eventer_err, &__now, "kevent(s/%d): %s\n", kqs->kqueue_fd, strerror(errno));
    }
    else if(fd_cnt == 0 ||
            (fd_cnt == 1 && ke_vec[0].filter == EVFILT_USER)) {
      /* timeout */
      if(fd_cnt) eventer_kqueue_impl_register_wakeup(kqs);
      add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep);
    }
    else {
      int idx;
      __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */
      /* loop once to clear */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        if(ke->filter == EVFILT_USER) {
          eventer_kqueue_impl_register_wakeup(kqs);
          continue;
        }
        masks[ke->ident] = 0;
      }
      /* Loop again to aggregate */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        if(ke->filter == EVFILT_USER) continue;
        if(ke->filter == EVFILT_READ) masks[ke->ident] |= EVENTER_READ;
        if(ke->filter == EVFILT_WRITE) masks[ke->ident] |= EVENTER_WRITE;
      }
      /* Loop a last time to process */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        eventer_t e;
        int fd;

        ke = &ke_vec[idx];
        if(ke->filter == EVFILT_USER) continue;
        if(ke->flags & EV_ERROR) {
          if(ke->data != EBADF && ke->data != ENOENT)
            mtevLT(eventer_err, &__now, "error [%d]: %s\n",
                   (int)ke->ident, strerror(ke->data));
          continue;
        }
        mtevAssert((vpsized_int)ke->udata == (vpsized_int)ke->ident);
        fd = ke->ident;
        e = master_fds[fd].e;
        /* If we've seen this fd, don't callback twice */
        if(!masks[fd]) continue;
        /* It's possible that someone removed the event and freed it
         * before we got here.
         */
        if(e) eventer_kqueue_impl_trigger(e, masks[fd]);
        masks[fd] = 0; /* indicates we've processed this fd */
      }
    }
  }
  /* NOTREACHED */
  return 0;
}
Esempio n. 29
0
static void
mtev_cht_calculate_ring(mtev_cht_t *cht) {
  int rsize = cht->node_cnt * cht->weight;
  int i, j, idx = 0;
  int collision = 1;
  uint32_t max;
  struct ring_pos *s, *e;

  if(cht->nbits == 32) max = ~0U;
  else max = (1 << cht->nbits) - 1;

  /* HASH */
  for(i=0;i<cht->node_cnt;i++) {
    cht->nodes[i].owned = 0.0;
    for(j=0;j<cht->weight;j++) {
      cht->ring[idx].node_idx = i;
      cht->ring[idx].vnode = j;
      cht->ring[idx].pos =
        mtev_cht_hash(cht, cht->nodes[i].name, strlen(cht->nodes[i].name), j);
      idx++;
    }
  }

  /* SORT */
  qsort(cht->ring, rsize, sizeof(*cht->ring), ring_node_cmp);

  /* FIX POSSIBLE COLLISIONS */
  cht->collisions = 0;
  while(collision) {
    collision = 0;
    for(i=rsize-1;i>0;i--) {
      s = &cht->ring[i-1];
      e = &cht->ring[i];
      if(s->pos == e->pos && s->pos != 0) {
        i--;
        while(i > 0) {
          s = &cht->ring[i--];
          if(s->pos == e->pos && s->pos > 0) {
            s->pos--;
            collision = 1;
            cht->collisions++;
          }
        }
      }
    }
  }
  /* We could have collision at zero now */
  collision = 1;
  while(collision) {
    collision = 0;
    for(i=0;i<rsize-1;i++) {
      s = &cht->ring[i];
      e = &cht->ring[i+1];
      if(s->pos == e->pos && e->pos != max) {
        i++;
        while(i < rsize) {
          e = &cht->ring[i++];
          if(s->pos == e->pos && e->pos < max) {
            e->pos++;
            collision = 1;
            cht->collisions++;
          }
        }
      }
    }
  }

  /* CALCULATE OWNERSHIP */
  for(i=0;i<rsize-1;i++) {
    s = &cht->ring[i];
    e = &cht->ring[i+1];
    mtevAssert(s->pos <= max);
    cht->nodes[s->node_idx].owned +=
      (double)(e->pos - s->pos)/((double)max + 1.0);
  }
  /* the last element wraps */
  s = &cht->ring[rsize-1];
  e = &cht->ring[0];
  mtevAssert(s->pos <= max);
  cht->nodes[s->node_idx].owned +=
    ((double)(max - s->pos) + 1.0 + (double)(e->pos))/((double)max + 1.0);
}
Esempio n. 30
0
static int
replace_config(mtev_console_closure_t ncct,
               mtev_conf_t_userdata_t *info, const char *name,
               const char *value) {
  int i, cnt, rv = -1, active = 0;
  xmlXPathObjectPtr pobj = NULL;
  xmlXPathContextPtr xpath_ctxt = NULL;
  xmlNodePtr node, confignode;
  char xpath[1024], *path;

  path = info->path;
  if(!strcmp(path, "/")) path = "";

  mtev_conf_xml_xpath(NULL, &xpath_ctxt);

  /* Only if checks will fixate this attribute shall we check for
   * child <check> nodes.
   * NOTE: this return nothing and "seems" okay if we are _in_
   *       a <check> node.  That case is handled below.
   */
  snprintf(xpath, sizeof(xpath), "/noit/%s//check[@uuid]", path);
  pobj = xmlXPathEval((xmlChar *)xpath, xpath_ctxt);
  if(!pobj || pobj->type != XPATH_NODESET) goto out;
  cnt = xmlXPathNodeSetGetLength(pobj->nodesetval);
  for(i=0; i<cnt; i++) {
    uuid_t checkid;
    node = (mtev_conf_section_t)xmlXPathNodeSetItem(pobj->nodesetval, i);
    if(mtev_conf_get_uuid(node, "@uuid", checkid)) {
      noit_check_t *check;
      check = noit_poller_lookup(checkid);
      if(check && NOIT_CHECK_LIVE(check)) active++;
    }
  }
  if(pobj) xmlXPathFreeObject(pobj);

#ifdef UNSAFE_RECONFIG
  snprintf(xpath, sizeof(xpath), "/noit/%s", path);
  pobj = xmlXPathEval((xmlChar *)xpath, xpath_ctxt);
  if(!pobj || pobj->type != XPATH_NODESET) goto out;
  cnt = xmlXPathNodeSetGetLength(pobj->nodesetval);
  if(cnt != 1) {
    nc_printf(ncct, "Internal error: context node disappeared\n");
    goto out;
  }
  node = (mtev_conf_section_t)xmlXPathNodeSetItem(pobj->nodesetval, 0);
  if(strcmp((const char *)node->name, "check")) {
    uuid_t checkid;
    /* Detect if  we are actually a <check> node and attempting to
     * change something we shouldn't.
     * This is the counterpart noted above.
     */
    if(mtev_conf_get_uuid(node, "@uuid", checkid)) {
      noit_check_t *check;
      check = noit_poller_lookup(checkid);
      if(NOIT_CHECK_LIVE(check)) active++;
    }
  }
  if(active) {
    nc_printf(ncct, "Cannot set '%s', it would effect %d live check(s)\n",
              name, active);
    goto out;
  }
  if(pobj) xmlXPathFreeObject(pobj);
#endif

  /* Here we want to remove /noit/path/config/name */
  snprintf(xpath, sizeof(xpath), "/noit/%s/config/%s", path, name);
  pobj = xmlXPathEval((xmlChar *)xpath, xpath_ctxt);
  if(!pobj || pobj->type != XPATH_NODESET) goto out;
  if(xmlXPathNodeSetGetLength(pobj->nodesetval) > 0) {
    xmlNodePtr toremove;
    toremove = xmlXPathNodeSetItem(pobj->nodesetval, 0);
    CONF_REMOVE(toremove);
    xmlUnlinkNode(toremove);
  }
  /* TODO: if there are no more children of config, remove config? */
  if(value) {
    if(pobj) xmlXPathFreeObject(pobj);
    /* He we create config if needed and place a child node under it */
    snprintf(xpath, sizeof(xpath), "/noit/%s/config", path);
    pobj = xmlXPathEval((xmlChar *)xpath, xpath_ctxt);
    if(!pobj || pobj->type != XPATH_NODESET) goto out;
    if(xmlXPathNodeSetGetLength(pobj->nodesetval) == 0) {
      if(pobj) xmlXPathFreeObject(pobj);
      snprintf(xpath, sizeof(xpath), "/noit/%s", path);
      pobj = xmlXPathEval((xmlChar *)xpath, xpath_ctxt);
      if(!pobj || pobj->type != XPATH_NODESET) goto out;
      if(xmlXPathNodeSetGetLength(pobj->nodesetval) != 1) {
        nc_printf(ncct, "Node disappeared from under you!\n");
        goto out;
      }
      confignode = xmlNewChild(xmlXPathNodeSetItem(pobj->nodesetval, 0),
                               NULL, (xmlChar *)"config", NULL);
      if(confignode == NULL) {
        nc_printf(ncct, "Error creating config child node.\n");
        goto out;
      }
    }
    else confignode = xmlXPathNodeSetItem(pobj->nodesetval, 0);

    mtevAssert(confignode);
    /* Now we create a child */
    xmlNewChild(confignode, NULL, (xmlChar *)name, (xmlChar *)value);
    CONF_DIRTY(confignode);
  }
  mtev_conf_mark_changed();
  rv = 0;
 out:
  if(pobj) xmlXPathFreeObject(pobj);
  return rv;
}