Ejemplo n.º 1
0
static struct zdoor_result *
zdoor_cb(struct zdoor_cookie *cookie, char *argp, size_t argp_sz)
{
	struct door *d;
	struct req *r;
	ErlNifEnv *env = enif_alloc_env();

	/* we kept the struct door in the biscuit */
	d = (struct door *)cookie->zdc_biscuit;

	/* this request */
	r = req_alloc();

	/* take the rlist lock first, then the req lock */
	enif_rwlock_rwlock(d->rlock);
	enif_mutex_lock(r->lock);

	req_insert(d, r);

	enif_rwlock_rwunlock(d->rlock);

	/* make the request into a binary term to put it into enif_send() */
	ErlNifBinary bin;
	enif_alloc_binary(argp_sz, &bin);
	memcpy(bin.data, argp, argp_sz);
	ERL_NIF_TERM binTerm = enif_make_binary(env, &bin);

	/* send a message back to the session owner */
	enif_send(NULL, &d->owner, env,
		enif_make_tuple3(env,
			enif_make_atom(env, "zdoor"),
			enif_make_resource(env, r),
			binTerm));

	/* now wait until the request has been replied to */
	enif_cond_wait(r->cond, r->lock);

	/* convert the reply into a zdoor_result */
	/* we have to use naked malloc() since libzdoor will use free() */
	struct zdoor_result *res = malloc(sizeof(struct zdoor_result));
	res->zdr_size = r->replen;
	res->zdr_data = r->rep;

	r->rep = NULL;
	r->replen = 0;

	/* yes, we have to unlock and re-lock to avoid lock inversion here */
	enif_mutex_unlock(r->lock);

	/* remove and free the struct req */
	enif_rwlock_rwlock(d->rlock);
	enif_mutex_lock(r->lock);
	req_remove(d, r);
	enif_rwlock_rwunlock(d->rlock);
	req_free(r);

	enif_free_env(env);

	return res;
}
Ejemplo n.º 2
0
Archivo: nif.c Proyecto: arekinath/e2qc
static void
unload_cb(ErlNifEnv *env, void *priv_data)
{
	struct atom_node *an;

	enif_rwlock_rwlock(gbl->atom_lock);

	/* when we unload, we want to tell all of the active caches to die,
	   then join() their bg_threads to wait until they're completely gone */
	while ((an = RB_MIN(atom_tree, &(gbl->atom_head)))) {
		struct cache *c = an->cache;
		enif_rwlock_rwunlock(gbl->atom_lock);

		enif_mutex_lock(c->ctrl_lock);
		c->flags |= FL_DYING;
		enif_mutex_unlock(c->ctrl_lock);
		enif_cond_broadcast(c->check_cond);

		enif_thread_join(c->bg_thread, NULL);

		enif_rwlock_rwlock(gbl->atom_lock);
	}

	enif_rwlock_rwunlock(gbl->atom_lock);
	enif_rwlock_destroy(gbl->atom_lock);
	enif_clear_env(gbl->atom_env);
	enif_free(gbl);

	gbl = NULL;
}
Ejemplo n.º 3
0
Archivo: nif.c Proyecto: arekinath/e2qc
/* destroy(Cache :: atom()) -- destroys and entire cache
   destroy(Cache :: atom(), Key :: binary()) -- removes an entry
   		from a cache */
static ERL_NIF_TERM
destroy(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
{
	ERL_NIF_TERM atom;
	struct cache *c;
	ErlNifBinary kbin;
	struct cache_node *n;

	if (!enif_is_atom(env, argv[0]))
		return enif_make_badarg(env);
	atom = argv[0];

	if ((c = get_cache(atom))) {
		if (argc == 2) {
			if (!enif_inspect_binary(env, argv[1], &kbin))
				return enif_make_badarg(env);

			enif_rwlock_rwlock(c->cache_lock);
			enif_rwlock_rwlock(c->lookup_lock);

			HASH_FIND(hh, c->lookup, kbin.data, kbin.size, n);
			if (!n) {
				enif_rwlock_rwunlock(c->lookup_lock);
				enif_rwlock_rwunlock(c->cache_lock);
				return enif_make_atom(env, "notfound");
			}

			enif_mutex_lock(c->ctrl_lock);

			destroy_cache_node(n);

			enif_mutex_unlock(c->ctrl_lock);
			enif_rwlock_rwunlock(c->lookup_lock);
			enif_rwlock_rwunlock(c->cache_lock);

			enif_consume_timeslice(env, 50);

			return enif_make_atom(env, "ok");

		} else {
			enif_mutex_lock(c->ctrl_lock);
			c->flags |= FL_DYING;
			enif_mutex_unlock(c->ctrl_lock);
			enif_cond_broadcast(c->check_cond);

			enif_thread_join(c->bg_thread, NULL);

			enif_consume_timeslice(env, 100);

			return enif_make_atom(env, "ok");
		}

		return enif_make_atom(env, "ok");
	}

	return enif_make_atom(env, "notfound");
}
Ejemplo n.º 4
0
    static ERL_NIF_TERM robin_q_set(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
        robin_q_handle *handle = NULL;
        if (enif_get_resource(env, argv[0], robin_q_RESOURCE, (void**)&handle) == 0) {
            return enif_make_badarg(env);
        }

        enif_rwlock_rwlock(handle->lock);
        enif_clear_env(handle->env);
        do_set(handle, argv[1]);
        enif_rwlock_rwunlock(handle->lock);

        return enif_make_atom(env, "ok");
    }
Ejemplo n.º 5
0
static INLINE void locking(int mode, ErlNifRWLock* lock)
{
    switch (mode) {
    case CRYPTO_LOCK|CRYPTO_READ:
	enif_rwlock_rlock(lock);
	break;
    case CRYPTO_LOCK|CRYPTO_WRITE:
	enif_rwlock_rwlock(lock);
	break;
    case CRYPTO_UNLOCK|CRYPTO_READ:
	enif_rwlock_runlock(lock);
	break;
    case CRYPTO_UNLOCK|CRYPTO_WRITE:
	enif_rwlock_rwunlock(lock);
	break;
    default:
	ASSERT(!"Invalid lock mode");
    }
}
Ejemplo n.º 6
0
Archivo: nif.c Proyecto: arekinath/e2qc
/* create(Cache :: atom(), MaxSize :: integer(), MinQ1Size :: integer()) */
static ERL_NIF_TERM
create(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
{
	ERL_NIF_TERM atom;
	ErlNifUInt64 max_size, min_q1_size;
	struct cache *c;

	if (!enif_is_atom(env, argv[0]))
		return enif_make_badarg(env);
	atom = argv[0];

	if (!enif_get_uint64(env, argv[1], &max_size))
		return enif_make_badarg(env);
	if (!enif_get_uint64(env, argv[2], &min_q1_size))
		return enif_make_badarg(env);

	if ((c = get_cache(atom))) {
		ERL_NIF_TERM ret = enif_make_atom(env, "already_exists");
		enif_consume_timeslice(env, 5);

		enif_rwlock_rwlock(c->cache_lock);
		/* expansion is safe because we don't have to engage the background
		   thread and won't cause sudden eviction pressure
		   TODO: a nice way to shrink the cache without seizing it up */
		if (c->max_size < max_size && c->min_q1_size < min_q1_size) {
			c->max_size = max_size;
			c->min_q1_size = min_q1_size;
			enif_rwlock_rwunlock(c->cache_lock);

			ret = enif_make_atom(env, "ok");
			enif_consume_timeslice(env, 10);
		} else {
			enif_rwlock_rwunlock(c->cache_lock);
		}

		return ret;
	} else {
		c = new_cache(atom, max_size, min_q1_size);
		enif_consume_timeslice(env, 20);
		return enif_make_atom(env, "ok");
	}
}
Ejemplo n.º 7
0
Archivo: nif.c Proyecto: arekinath/e2qc
static struct cache *
new_cache(ERL_NIF_TERM atom, int max_size, int min_q1_size)
{
	struct cache *c;
	struct atom_node *an;
	int i;

	c = enif_alloc(sizeof(*c));
	memset(c, 0, sizeof(*c));
	c->max_size = max_size;
	c->min_q1_size = min_q1_size;
	c->lookup_lock = enif_rwlock_create("cache->lookup_lock");
	c->cache_lock = enif_rwlock_create("cache->cache_lock");
	c->ctrl_lock = enif_mutex_create("cache->ctrl_lock");
	c->check_cond = enif_cond_create("cache->check_cond");
	TAILQ_INIT(&(c->q1.head));
	TAILQ_INIT(&(c->q2.head));
	for (i = 0; i < N_INCR_BKT; ++i) {
		TAILQ_INIT(&(c->incr_head[i]));
		c->incr_lock[i] = enif_mutex_create("cache->incr_lock");
	}
	RB_INIT(&(c->expiry_head));

	an = enif_alloc(sizeof(*an));
	memset(an, 0, sizeof(*an));
	an->atom = enif_make_copy(gbl->atom_env, atom);
	an->cache = c;

	c->atom_node = an;

	enif_rwlock_rwlock(gbl->atom_lock);
	RB_INSERT(atom_tree, &(gbl->atom_head), an);
	/* start the background thread for the cache. after this, the bg thread now
	   owns the cache and all its data and will free it at exit */
	enif_thread_create("cachethread", &(c->bg_thread), cache_bg_thread, c, NULL);
	enif_rwlock_rwunlock(gbl->atom_lock);

	return c;
}
Ejemplo n.º 8
0
    static ERL_NIF_TERM cqueue_register(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
        ErlNifTid thread_id = enif_thread_self();
        unsigned int i = 0;

        enif_rwlock_rwlock(lookup_lock);
        for (i = 0; i < schedulers; ++i) {
            if (scheduler_ids[i] == thread_id) {
                printf("Scheduler (%p) already registered.\r\n", thread_id);
                break;
            } else if (scheduler_ids[i] == NULL) {
                printf("Registering scheduler (%p) with index %d\r\n", thread_id, i);
                scheduler_ids[i] = thread_id;
                break;
            }
        }
        enif_rwlock_rwunlock(lookup_lock);

        if (i == schedulers) {
            return enif_make_badarg(env);
        } else {
            return enif_make_atom(env, "ok");
        }
    }
Ejemplo n.º 9
0
Archivo: nif.c Proyecto: arekinath/e2qc
static ERL_NIF_TERM
put(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
{
	ERL_NIF_TERM atom;
	ErlNifBinary kbin, vbin;
	struct cache *c;
	struct cache_node *n, *ng;
	ErlNifUInt64 lifetime = 0;

	if (!enif_is_atom(env, argv[0]))
		return enif_make_badarg(env);
	atom = argv[0];

	if (!enif_inspect_binary(env, argv[1], &kbin))
		return enif_make_badarg(env);
	if (!enif_inspect_binary(env, argv[2], &vbin))
		return enif_make_badarg(env);

	if ((c = get_cache(atom))) {
		enif_consume_timeslice(env, 1);

	} else {
		/* if we've been asked to put() in to a cache that doesn't exist yet
		   then we should create it! */
		ErlNifUInt64 max_size, min_q1_size;
		if (!enif_get_uint64(env, argv[3], &max_size))
			return enif_make_badarg(env);
		if (!enif_get_uint64(env, argv[4], &min_q1_size))
			return enif_make_badarg(env);
		c = new_cache(atom, max_size, min_q1_size);
		enif_consume_timeslice(env, 20);
	}

	if (argc > 5)
		if (!enif_get_uint64(env, argv[5], &lifetime))
			return enif_make_badarg(env);

	n = enif_alloc(sizeof(*n));
	memset(n, 0, sizeof(*n));
	n->c = c;
	n->vsize = vbin.size;
	n->ksize = kbin.size;
	n->size = vbin.size + kbin.size;
	n->key = enif_alloc(kbin.size);
	memcpy(n->key, kbin.data, kbin.size);
	n->val = enif_alloc_resource(value_type, vbin.size);
	memcpy(n->val, vbin.data, vbin.size);
	n->q = &(c->q1);
	if (lifetime) {
		clock_now(&(n->expiry));
		n->expiry.tv_sec += lifetime;
	}

	enif_rwlock_rwlock(c->cache_lock);
	enif_rwlock_rwlock(c->lookup_lock);
	HASH_FIND(hh, c->lookup, kbin.data, kbin.size, ng);
	if (ng) {
		enif_mutex_lock(c->ctrl_lock);
		destroy_cache_node(ng);
		enif_mutex_unlock(c->ctrl_lock);
	}
	TAILQ_INSERT_HEAD(&(c->q1.head), n, entry);
	c->q1.size += n->size;
	HASH_ADD_KEYPTR(hh, c->lookup, n->key, n->ksize, n);
	if (lifetime) {
		struct cache_node *rn;
		rn = RB_INSERT(expiry_tree, &(c->expiry_head), n);
		/* it's possible to get two timestamps that are the same, if this happens
		   just bump us forwards by 1 usec until we're unique */
		while (rn != NULL) {
			++(n->expiry.tv_nsec);
			rn = RB_INSERT(expiry_tree, &(c->expiry_head), n);
		}
	}
	enif_rwlock_rwunlock(c->lookup_lock);
	enif_rwlock_rwunlock(c->cache_lock);

	enif_cond_broadcast(c->check_cond);
	enif_consume_timeslice(env, 50);

	return enif_make_atom(env, "ok");
}
Ejemplo n.º 10
0
Archivo: nif.c Proyecto: arekinath/e2qc
static void *
cache_bg_thread(void *arg)
{
	struct cache *c = (struct cache *)arg;
	int i, dud;

	while (1) {
		enif_mutex_lock(c->ctrl_lock);

		/* if we've been told to die, quit this loop and start cleaning up */
		if (c->flags & FL_DYING) {
			enif_mutex_unlock(c->ctrl_lock);
			break;
		}

		/* sleep until there is work to do */
		enif_cond_wait(c->check_cond, c->ctrl_lock);

		__sync_add_and_fetch(&(c->wakeups), 1);
		dud = 1;

		/* we have to let go of ctrl_lock so we can take cache_lock then
		   ctrl_lock again to get them back in the right order */
		enif_mutex_unlock(c->ctrl_lock);
		enif_rwlock_rwlock(c->cache_lock);
		enif_mutex_lock(c->ctrl_lock);

		/* first process the promotion queue before we do any evicting */
		for (i = 0; i < N_INCR_BKT; ++i) {
			enif_mutex_lock(c->incr_lock[i]);
			while (!TAILQ_EMPTY(&(c->incr_head[i]))) {
				struct cache_incr_node *n;
				n = TAILQ_FIRST(&(c->incr_head[i]));
				TAILQ_REMOVE(&(c->incr_head[i]), n, entry);
				__sync_sub_and_fetch(&(c->incr_count), 1);

				dud = 0;

				/* let go of the ctrl_lock here, we don't need it when we aren't looking
				   at the incr_queue, and this way other threads can use it while we shuffle
				   queue nodes around */
				enif_mutex_unlock(c->incr_lock[i]);
				enif_mutex_unlock(c->ctrl_lock);

				if (n->node->q == &(c->q1)) {
					TAILQ_REMOVE(&(c->q1.head), n->node, entry);
					c->q1.size -= n->node->size;
					TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry);
					n->node->q = &(c->q2);
					c->q2.size += n->node->size;

				} else if (n->node->q == &(c->q2)) {
					TAILQ_REMOVE(&(c->q2.head), n->node, entry);
					TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry);
				}

				enif_free(n);

				/* take the ctrl_lock back again for the next loop around */
				enif_mutex_lock(c->ctrl_lock);
				enif_mutex_lock(c->incr_lock[i]);
			}
			enif_mutex_unlock(c->incr_lock[i]);
		}

		/* let go of the ctrl_lock here for two reasons:
		   1. avoid lock inversion, because if we have evictions to do we
		      will need to take lookup_lock, and we must take lookup_lock
		      before taking ctrl_lock
		   2. if we don't need to do evictions, we're done with the structures
		      that are behind ctrl_lock so we should give it up for others */
		enif_mutex_unlock(c->ctrl_lock);

		/* do timed evictions -- if anything has expired, nuke it */
		{
			struct cache_node *n;
			if ((n = RB_MIN(expiry_tree, &(c->expiry_head)))) {
				struct timespec now;
				clock_now(&now);
				while (n && n->expiry.tv_sec < now.tv_sec) {
					enif_mutex_lock(c->ctrl_lock);
					dud = 0;
					destroy_cache_node(n);
					enif_mutex_unlock(c->ctrl_lock);
					n = RB_MIN(expiry_tree, &(c->expiry_head));
				}
			}
		}

		/* now check if we need to do ordinary size limit evictions */
		if (c->q1.size + c->q2.size > c->max_size) {
			enif_rwlock_rwlock(c->lookup_lock);
			enif_mutex_lock(c->ctrl_lock);

			while ((c->q1.size + c->q2.size > c->max_size) &&
					(c->q1.size > c->min_q1_size)) {
				struct cache_node *n;
				n = TAILQ_LAST(&(c->q1.head), cache_q);
				destroy_cache_node(n);
			}

			while (c->q1.size + c->q2.size > c->max_size) {
				struct cache_node *n;
				n = TAILQ_LAST(&(c->q2.head), cache_q);
				destroy_cache_node(n);
			}

			dud = 0;

			enif_mutex_unlock(c->ctrl_lock);
			enif_rwlock_rwunlock(c->lookup_lock);
		}

		if (dud)
			__sync_add_and_fetch(&(c->dud_wakeups), 1);
		/* now let go of the cache_lock that we took right back at the start of
		   this iteration */
		enif_rwlock_rwunlock(c->cache_lock);
	}

	/* first remove us from the atom_tree, so we get no new operations coming in */
	enif_rwlock_rwlock(gbl->atom_lock);
	RB_REMOVE(atom_tree, &(gbl->atom_head), c->atom_node);
	enif_rwlock_rwunlock(gbl->atom_lock);
	enif_free(c->atom_node);

	/* now take all of our locks, to make sure any pending operations are done */
	enif_rwlock_rwlock(c->cache_lock);
	enif_rwlock_rwlock(c->lookup_lock);
	enif_mutex_lock(c->ctrl_lock);

	c->atom_node = NULL;

	/* free the actual cache queues */
	{
		struct cache_node *n, *nextn;
		nextn = TAILQ_FIRST(&(c->q1.head));
		while ((n = nextn)) {
			nextn = TAILQ_NEXT(n, entry);
			destroy_cache_node(n);
		}
		nextn = TAILQ_FIRST(&(c->q2.head));
		while ((n = nextn)) {
			nextn = TAILQ_NEXT(n, entry);
			destroy_cache_node(n);
		}
	}

	for (i = 0; i < N_INCR_BKT; ++i)
		enif_mutex_lock(c->incr_lock[i]);

	/* free the incr_queue */
	for (i = 0; i < N_INCR_BKT; ++i) {
		struct cache_incr_node *in, *nextin;
		nextin = TAILQ_FIRST(&(c->incr_head[i]));
		while ((in = nextin)) {
			nextin = TAILQ_NEXT(in, entry);
			TAILQ_REMOVE(&(c->incr_head[i]), in, entry);
			in->node = 0;
			enif_free(in);
		}
		enif_mutex_unlock(c->incr_lock[i]);
		enif_mutex_destroy(c->incr_lock[i]);
	}

	/* unlock and destroy! */
	enif_cond_destroy(c->check_cond);

	enif_mutex_unlock(c->ctrl_lock);
	enif_mutex_destroy(c->ctrl_lock);

	enif_rwlock_rwunlock(c->lookup_lock);
	enif_rwlock_destroy(c->lookup_lock);

	enif_rwlock_rwunlock(c->cache_lock);
	enif_rwlock_destroy(c->cache_lock);

	enif_free(c);

	return 0;
}
Ejemplo n.º 11
0
/* the async job thread that handles opening/closing of doors */
void *
job_thread(void *arg)
{
	struct zdoor_handle *zhandle;
	int cont = 1;
	int res;

	/* first init the handle */
	zhandle = zdoor_handle_init();

	enif_mutex_lock(gbl.jlock);

	while (cont) {
		struct job *j;
		while (!gbl.jlist)
			enif_cond_wait(gbl.jcond, gbl.jlock);

		j = gbl.jlist;
		while (j) {
			gbl.jlist = j->next;
			enif_mutex_unlock(gbl.jlock);

			if (j->action == ACT_OPEN) {
				enif_rwlock_rwlock(gbl.dlock);
				j->door->next = NULL;
				if (gbl.dlist != NULL)
					j->door->next = gbl.dlist;
				gbl.dlist = j->door;
				enif_rwlock_rwunlock(gbl.dlock);

				res = zdoor_open(zhandle, j->door->zonename, j->door->service, j->door, zdoor_cb);

				ErlNifEnv *env = enif_alloc_env();
				ERL_NIF_TERM ret = enif_make_atom(env, "ok");
				switch (res) {
					case ZDOOR_ERROR:
						ret = enif_make_atom(env, "error");
						break;
					case ZDOOR_NOT_GLOBAL_ZONE:
						ret = enif_make_atom(env, "not_global");
						break;
					case ZDOOR_ZONE_NOT_RUNNING:
						ret = enif_make_atom(env, "not_running");
						break;
					case ZDOOR_ZONE_FORBIDDEN:
						ret = enif_make_atom(env, "eperm");
						break;
					case ZDOOR_ARGS_ERROR:
						ret = enif_make_atom(env, "badarg");
						break;
					case ZDOOR_OUT_OF_MEMORY:
						ret = enif_make_atom(env, "enomem");
						break;
				}
				enif_send(NULL, &j->owner, env,
					enif_make_tuple3(env,
						enif_make_atom(env, "zdoor_job"),
						enif_make_atom(env, "open"),
						ret));
				enif_free_env(env);
			} else if (j->action == ACT_CLOSE) {
				enif_rwlock_rwlock(gbl.dlock);
				enif_rwlock_rwlock(j->door->rlock);

				if (j->door->rlist) {
					enif_rwlock_rwunlock(j->door->rlock);
					enif_rwlock_rwunlock(gbl.dlock);

					ErlNifEnv *env = enif_alloc_env();
					enif_send(NULL, &j->owner, env,
						enif_make_tuple3(env,
							enif_make_atom(env, "zdoor_job"),
							enif_make_atom(env, "close"),
							enif_make_atom(env, "busy")));
					enif_free_env(env);
				} else {
					struct door *d = gbl.dlist;
					if (d == j->door) {
						gbl.dlist = j->door->next;
					} else {
						for (; d; d = d->next) {
							if (d->next == j->door) break;
						}
						if (d)
							d->next = j->door->next;
					}
					enif_rwlock_rwunlock(gbl.dlock);

					zdoor_close(zhandle, j->door->zonename, j->door->service);
					door_free(j->door);

					ErlNifEnv *env = enif_alloc_env();
					enif_send(NULL, &j->owner, env,
						enif_make_tuple3(env,
							enif_make_atom(env, "zdoor_job"),
							enif_make_atom(env, "close"),
							enif_make_atom(env, "ok")));
					enif_free_env(env);
				}
			} else if (j->action == ACT_QUIT) {
				cont = 0;
			}

			enif_free(j);

			enif_mutex_lock(gbl.jlock);
			j = gbl.jlist;
		}
	}

	enif_mutex_unlock(gbl.jlock);

	zdoor_handle_destroy(zhandle);

	return NULL;
}