static ERL_NIF_TERM stats(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM atom; ERL_NIF_TERM ret, q1s, q2s, incrs, wakeups, duds; struct cache *c; if (!enif_is_atom(env, argv[0])) return enif_make_badarg(env); atom = argv[0]; if ((c = get_cache(atom))) { enif_rwlock_rlock(c->cache_lock); q1s = enif_make_uint64(env, c->q1.size); q2s = enif_make_uint64(env, c->q2.size); incrs = enif_make_uint64(env, __sync_fetch_and_add(&(c->incr_count), 0)); wakeups = enif_make_uint64(env, __sync_fetch_and_add(&(c->wakeups), 0)); duds = enif_make_uint64(env, __sync_fetch_and_add(&(c->dud_wakeups), 0)); enif_rwlock_runlock(c->cache_lock); ret = enif_make_tuple7(env, enif_make_uint64(env, c->hit), enif_make_uint64(env, c->miss), q1s, q2s, incrs, wakeups, duds); enif_consume_timeslice(env, 10); return ret; } else { return enif_make_atom(env, "notfound"); } }
int get_list_id(ErlNifTid self) { unsigned int i = 0; int result = -1; enif_rwlock_rlock(lookup_lock); for (i = 0; i < schedulers; ++i) { if (scheduler_ids[i] == self) { result = i; break; } } enif_rwlock_runlock(lookup_lock); return result; }
static struct cache * get_cache(ERL_NIF_TERM atom) { struct atom_node n, *res; struct cache *ret = NULL; memset(&n, 0, sizeof(n)); n.atom = atom; enif_rwlock_rlock(gbl->atom_lock); res = RB_FIND(atom_tree, &(gbl->atom_head), &n); if (res) ret = res->cache; enif_rwlock_runlock(gbl->atom_lock); return ret; }
static ERL_NIF_TERM robin_q_next(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { robin_q_handle *handle = NULL; unsigned int index; ERL_NIF_TERM result; if (enif_get_resource(env, argv[0], robin_q_RESOURCE, (void**)&handle) == 0) { return enif_make_badarg(env); } // CRITICAL SECTION // No writing to handle->elements should occur here enif_rwlock_rlock(handle->lock); index = (handle->index++) % handle->size; result = enif_make_copy(env, handle->elements[index]); enif_rwlock_runlock(handle->lock); return enif_make_tuple2(env, enif_make_atom(env, "ok"), result); }
static INLINE void locking(int mode, ErlNifRWLock* lock) { switch (mode) { case CRYPTO_LOCK|CRYPTO_READ: enif_rwlock_rlock(lock); break; case CRYPTO_LOCK|CRYPTO_WRITE: enif_rwlock_rwlock(lock); break; case CRYPTO_UNLOCK|CRYPTO_READ: enif_rwlock_runlock(lock); break; case CRYPTO_UNLOCK|CRYPTO_WRITE: enif_rwlock_rwunlock(lock); break; default: ASSERT(!"Invalid lock mode"); } }
static ERL_NIF_TERM get(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM atom; ErlNifBinary kbin; struct cache *c; struct cache_node *n; struct cache_incr_node *in; struct timespec now; int incrqs, hashv, bkt; ERL_NIF_TERM ret; ErlNifTid tid; if (!enif_is_atom(env, argv[0])) return enif_make_badarg(env); atom = argv[0]; if (!enif_inspect_binary(env, argv[1], &kbin)) return enif_make_badarg(env); if ((c = get_cache(atom))) { enif_rwlock_rlock(c->lookup_lock); HASH_FIND(hh, c->lookup, kbin.data, kbin.size, n); if (!n) { enif_rwlock_runlock(c->lookup_lock); __sync_add_and_fetch(&c->miss, 1); enif_consume_timeslice(env, 10); return enif_make_atom(env, "notfound"); } if (n->expiry.tv_sec != 0) { clock_now(&now); if (n->expiry.tv_sec < now.tv_sec) { enif_rwlock_runlock(c->lookup_lock); __sync_add_and_fetch(&c->miss, 1); enif_consume_timeslice(env, 10); return enif_make_atom(env, "notfound"); } } in = enif_alloc(sizeof(*in)); memset(in, 0, sizeof(*in)); in->node = n; __sync_add_and_fetch(&c->hit, 1); tid = enif_thread_self(); HASH_SFH(&tid, sizeof(ErlNifTid), N_INCR_BKT, hashv, bkt); enif_mutex_lock(c->incr_lock[bkt]); TAILQ_INSERT_TAIL(&(c->incr_head[bkt]), in, entry); enif_mutex_unlock(c->incr_lock[bkt]); incrqs = __sync_add_and_fetch(&(c->incr_count), 1); ret = enif_make_resource_binary(env, n->val, n->val, n->vsize); enif_rwlock_runlock(c->lookup_lock); if (incrqs > 1024) enif_cond_broadcast(c->check_cond); enif_consume_timeslice(env, 20); return ret; } return enif_make_atom(env, "notfound"); }