Esempio n. 1
0
File: nif.c Progetto: arekinath/e2qc
static void
unload_cb(ErlNifEnv *env, void *priv_data)
{
	struct atom_node *an;

	enif_rwlock_rwlock(gbl->atom_lock);

	/* when we unload, we want to tell all of the active caches to die,
	   then join() their bg_threads to wait until they're completely gone */
	while ((an = RB_MIN(atom_tree, &(gbl->atom_head)))) {
		struct cache *c = an->cache;
		enif_rwlock_rwunlock(gbl->atom_lock);

		enif_mutex_lock(c->ctrl_lock);
		c->flags |= FL_DYING;
		enif_mutex_unlock(c->ctrl_lock);
		enif_cond_broadcast(c->check_cond);

		enif_thread_join(c->bg_thread, NULL);

		enif_rwlock_rwlock(gbl->atom_lock);
	}

	enif_rwlock_rwunlock(gbl->atom_lock);
	enif_rwlock_destroy(gbl->atom_lock);
	enif_clear_env(gbl->atom_env);
	enif_free(gbl);

	gbl = NULL;
}
Esempio n. 2
0
void emmap_dtor(ErlNifEnv* env, void* arg)
{
  mhandle* handle = (mhandle*)arg;
  emmap_unmap(handle, true);

  // only the destructor destroys the rwlock
  if (handle->rwlock != 0) enif_rwlock_destroy(handle->rwlock);
}
Esempio n. 3
0
File: nif.c Progetto: arekinath/e2qc
static void *
cache_bg_thread(void *arg)
{
	struct cache *c = (struct cache *)arg;
	int i, dud;

	while (1) {
		enif_mutex_lock(c->ctrl_lock);

		/* if we've been told to die, quit this loop and start cleaning up */
		if (c->flags & FL_DYING) {
			enif_mutex_unlock(c->ctrl_lock);
			break;
		}

		/* sleep until there is work to do */
		enif_cond_wait(c->check_cond, c->ctrl_lock);

		__sync_add_and_fetch(&(c->wakeups), 1);
		dud = 1;

		/* we have to let go of ctrl_lock so we can take cache_lock then
		   ctrl_lock again to get them back in the right order */
		enif_mutex_unlock(c->ctrl_lock);
		enif_rwlock_rwlock(c->cache_lock);
		enif_mutex_lock(c->ctrl_lock);

		/* first process the promotion queue before we do any evicting */
		for (i = 0; i < N_INCR_BKT; ++i) {
			enif_mutex_lock(c->incr_lock[i]);
			while (!TAILQ_EMPTY(&(c->incr_head[i]))) {
				struct cache_incr_node *n;
				n = TAILQ_FIRST(&(c->incr_head[i]));
				TAILQ_REMOVE(&(c->incr_head[i]), n, entry);
				__sync_sub_and_fetch(&(c->incr_count), 1);

				dud = 0;

				/* let go of the ctrl_lock here, we don't need it when we aren't looking
				   at the incr_queue, and this way other threads can use it while we shuffle
				   queue nodes around */
				enif_mutex_unlock(c->incr_lock[i]);
				enif_mutex_unlock(c->ctrl_lock);

				if (n->node->q == &(c->q1)) {
					TAILQ_REMOVE(&(c->q1.head), n->node, entry);
					c->q1.size -= n->node->size;
					TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry);
					n->node->q = &(c->q2);
					c->q2.size += n->node->size;

				} else if (n->node->q == &(c->q2)) {
					TAILQ_REMOVE(&(c->q2.head), n->node, entry);
					TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry);
				}

				enif_free(n);

				/* take the ctrl_lock back again for the next loop around */
				enif_mutex_lock(c->ctrl_lock);
				enif_mutex_lock(c->incr_lock[i]);
			}
			enif_mutex_unlock(c->incr_lock[i]);
		}

		/* let go of the ctrl_lock here for two reasons:
		   1. avoid lock inversion, because if we have evictions to do we
		      will need to take lookup_lock, and we must take lookup_lock
		      before taking ctrl_lock
		   2. if we don't need to do evictions, we're done with the structures
		      that are behind ctrl_lock so we should give it up for others */
		enif_mutex_unlock(c->ctrl_lock);

		/* do timed evictions -- if anything has expired, nuke it */
		{
			struct cache_node *n;
			if ((n = RB_MIN(expiry_tree, &(c->expiry_head)))) {
				struct timespec now;
				clock_now(&now);
				while (n && n->expiry.tv_sec < now.tv_sec) {
					enif_mutex_lock(c->ctrl_lock);
					dud = 0;
					destroy_cache_node(n);
					enif_mutex_unlock(c->ctrl_lock);
					n = RB_MIN(expiry_tree, &(c->expiry_head));
				}
			}
		}

		/* now check if we need to do ordinary size limit evictions */
		if (c->q1.size + c->q2.size > c->max_size) {
			enif_rwlock_rwlock(c->lookup_lock);
			enif_mutex_lock(c->ctrl_lock);

			while ((c->q1.size + c->q2.size > c->max_size) &&
					(c->q1.size > c->min_q1_size)) {
				struct cache_node *n;
				n = TAILQ_LAST(&(c->q1.head), cache_q);
				destroy_cache_node(n);
			}

			while (c->q1.size + c->q2.size > c->max_size) {
				struct cache_node *n;
				n = TAILQ_LAST(&(c->q2.head), cache_q);
				destroy_cache_node(n);
			}

			dud = 0;

			enif_mutex_unlock(c->ctrl_lock);
			enif_rwlock_rwunlock(c->lookup_lock);
		}

		if (dud)
			__sync_add_and_fetch(&(c->dud_wakeups), 1);
		/* now let go of the cache_lock that we took right back at the start of
		   this iteration */
		enif_rwlock_rwunlock(c->cache_lock);
	}

	/* first remove us from the atom_tree, so we get no new operations coming in */
	enif_rwlock_rwlock(gbl->atom_lock);
	RB_REMOVE(atom_tree, &(gbl->atom_head), c->atom_node);
	enif_rwlock_rwunlock(gbl->atom_lock);
	enif_free(c->atom_node);

	/* now take all of our locks, to make sure any pending operations are done */
	enif_rwlock_rwlock(c->cache_lock);
	enif_rwlock_rwlock(c->lookup_lock);
	enif_mutex_lock(c->ctrl_lock);

	c->atom_node = NULL;

	/* free the actual cache queues */
	{
		struct cache_node *n, *nextn;
		nextn = TAILQ_FIRST(&(c->q1.head));
		while ((n = nextn)) {
			nextn = TAILQ_NEXT(n, entry);
			destroy_cache_node(n);
		}
		nextn = TAILQ_FIRST(&(c->q2.head));
		while ((n = nextn)) {
			nextn = TAILQ_NEXT(n, entry);
			destroy_cache_node(n);
		}
	}

	for (i = 0; i < N_INCR_BKT; ++i)
		enif_mutex_lock(c->incr_lock[i]);

	/* free the incr_queue */
	for (i = 0; i < N_INCR_BKT; ++i) {
		struct cache_incr_node *in, *nextin;
		nextin = TAILQ_FIRST(&(c->incr_head[i]));
		while ((in = nextin)) {
			nextin = TAILQ_NEXT(in, entry);
			TAILQ_REMOVE(&(c->incr_head[i]), in, entry);
			in->node = 0;
			enif_free(in);
		}
		enif_mutex_unlock(c->incr_lock[i]);
		enif_mutex_destroy(c->incr_lock[i]);
	}

	/* unlock and destroy! */
	enif_cond_destroy(c->check_cond);

	enif_mutex_unlock(c->ctrl_lock);
	enif_mutex_destroy(c->ctrl_lock);

	enif_rwlock_rwunlock(c->lookup_lock);
	enif_rwlock_destroy(c->lookup_lock);

	enif_rwlock_rwunlock(c->cache_lock);
	enif_rwlock_destroy(c->cache_lock);

	enif_free(c);

	return 0;
}
Esempio n. 4
0
 static void on_unload(ErlNifEnv* env, void* priv_data) {
     printf("Unloading.\r\n");
     delete scheduler_ids;
     enif_rwlock_destroy(lookup_lock);
 }
Esempio n. 5
0
static void dyn_destroy_function(struct CRYPTO_dynlock_value *ptr, const char *file, int line)
{
    enif_rwlock_destroy((ErlNifRWLock*)ptr);
}