Пример #1
0
void*
queue_pop(queue *queue) {
    qitem *entry;
    void* item;

    enif_mutex_lock(queue->lock);

    /* Wait for an item to become available. */
    while (queue->head == NULL) {
        enif_cond_wait(queue->cond, queue->lock);
    }

    assert(queue->length >= 0 && "Invalid queue size at pop.");

    /* Woke up because queue->head != NULL
       Remove the entry and return the payload. */
    entry = queue->head;
    queue->head = entry->next;
    entry->next = NULL;

    if (queue->head == NULL) {
        assert(queue->tail == entry && "Invalid queue state: Bad tail.");
        queue->tail = NULL;
    }

    queue->length -= 1;

    enif_mutex_unlock(queue->lock);

    item = entry->data;
    enif_free(entry);

    return item;
}
Пример #2
0
void* queue_get(queue_t* queue)
{
    queue_item_t* item;

    enif_mutex_lock(queue->mutex);

    // Block until theres something in the queue
    while (queue->head == NULL) {
        enif_cond_wait(queue->cond, queue->mutex);
    }

    item = queue->head;
    queue->head = queue->head->next;
    item->next = NULL;

    if (queue->head == NULL) {
        queue->tail = NULL;
    }

    enif_mutex_unlock(queue->mutex);

    void* data = item->data;

    enif_free(item);

    return data;
}
Пример #3
0
qitem*
queue_pop(queue *queue)
{
    qitem *entry;

    while (enif_mutex_trylock(queue->lock) != 0)
    {
    }
    while(queue->head == NULL)
        enif_cond_wait(queue->cond, queue->lock);

    assert(queue->length >= 0 && "Invalid queue size at pop.");

    /* Woke up because queue->head != NULL
     * Remove the entry and return the payload.
     */
    entry = queue->head;
    queue->head = entry->next;
    entry->next = NULL;

    if(queue->head == NULL) {
        assert(queue->tail == entry && "Invalid queue state: Bad tail.");
        queue->tail = NULL;
    }

    queue->length--;

    enif_mutex_unlock(queue->lock);

    return entry;
}
Пример #4
0
ErlNifPid*
queue_pop(queue_t* queue)
{
    qitem_t* item;
    ErlNifPid* ret = NULL;

    enif_mutex_lock(queue->lock);

    while(queue->head == NULL)
    {
        enif_cond_wait(queue->cond, queue->lock);
    }

    item = queue->head;
    queue->head = item->next;
    item->next = NULL;

    if(queue->head == NULL)
    {
        queue->tail = NULL;
    }

    enif_mutex_unlock(queue->lock);

    ret = item->pid;
    enif_free(item);

    return ret;
}
Пример #5
0
static struct zdoor_result *
zdoor_cb(struct zdoor_cookie *cookie, char *argp, size_t argp_sz)
{
	struct door *d;
	struct req *r;
	ErlNifEnv *env = enif_alloc_env();

	/* we kept the struct door in the biscuit */
	d = (struct door *)cookie->zdc_biscuit;

	/* this request */
	r = req_alloc();

	/* take the rlist lock first, then the req lock */
	enif_rwlock_rwlock(d->rlock);
	enif_mutex_lock(r->lock);

	req_insert(d, r);

	enif_rwlock_rwunlock(d->rlock);

	/* make the request into a binary term to put it into enif_send() */
	ErlNifBinary bin;
	enif_alloc_binary(argp_sz, &bin);
	memcpy(bin.data, argp, argp_sz);
	ERL_NIF_TERM binTerm = enif_make_binary(env, &bin);

	/* send a message back to the session owner */
	enif_send(NULL, &d->owner, env,
		enif_make_tuple3(env,
			enif_make_atom(env, "zdoor"),
			enif_make_resource(env, r),
			binTerm));

	/* now wait until the request has been replied to */
	enif_cond_wait(r->cond, r->lock);

	/* convert the reply into a zdoor_result */
	/* we have to use naked malloc() since libzdoor will use free() */
	struct zdoor_result *res = malloc(sizeof(struct zdoor_result));
	res->zdr_size = r->replen;
	res->zdr_data = r->rep;

	r->rep = NULL;
	r->replen = 0;

	/* yes, we have to unlock and re-lock to avoid lock inversion here */
	enif_mutex_unlock(r->lock);

	/* remove and free the struct req */
	enif_rwlock_rwlock(d->rlock);
	enif_mutex_lock(r->lock);
	req_remove(d, r);
	enif_rwlock_rwunlock(d->rlock);
	req_free(r);

	enif_free_env(env);

	return res;
}
Пример #6
0
int wait_pointer(couchfile_modify_request* rq, couchfile_pointer_info *ptr)
{
    if(ptr->writerq_resource == NULL)
        return 0;

    int ret = 0;
    btreenif_state *state = rq->globalstate;

    enif_mutex_lock(state->writer_cond.mtx);

    while(ptr->pointer == 0)
    {
        enif_cond_wait(state->writer_cond.cond, state->writer_cond.mtx);
        if(ptr->pointer == 0 &&
           !enif_send(rq->caller_env, &rq->writer, state->check_env, state->atom_heart))
        {
            //The writer process has died
            ret = ERROR_WRITER_DEAD;
            break;
        }
        enif_clear_env(state->check_env);
    }

    if(ptr->pointer != 0)
    {
        enif_release_resource(ptr->writerq_resource);
    }

    enif_mutex_unlock(state->writer_cond.mtx);

    ptr->writerq_resource = NULL;
    return ret;
}
Пример #7
0
static ERL_NIF_TERM ErlangCall(ErlNifEnv *env, ERL_NIF_TERM fun, ERL_NIF_TERM args) {
  ErlCall *erlCall = CreateCall(fun, args);

  enif_mutex_lock(erlCall->mutex);
  enif_send(env, &server, erlCall->env, erlCall->msg);
  while(!erlCall->complete) {
    enif_cond_wait(erlCall->cond, erlCall->mutex);
  }
  enif_mutex_unlock(erlCall->mutex);

  ERL_NIF_TERM result = enif_make_copy(env, erlCall->result);
  DestroyCall(erlCall);

  return result;
}
Пример #8
0
void* threaded_sender(void *arg)
{

    union { void* vp; struct make_term_info* p; }mti;
    mti.vp = arg;

    enif_mutex_lock(mti.p->mtx);
    while (!mti.p->send_it) {
	enif_cond_wait(mti.p->cond, mti.p->mtx);
    }
    mti.p->send_it = 0;
    enif_mutex_unlock(mti.p->mtx);
    mti.p->send_res = enif_send(NULL, &mti.p->to_pid, mti.p->dst_env, mti.p->blob);
    return NULL;
}
Пример #9
0
int nif_thread_receive(nif_thread_state* st, nif_thread_message** msg)
{
	enif_mutex_lock(st->lock);

	while (TAILQ_EMPTY(st->mailbox))
		enif_cond_wait(st->cond, st->lock);

	*msg = TAILQ_FIRST(st->mailbox);
	TAILQ_REMOVE(st->mailbox, TAILQ_FIRST(st->mailbox), next_entry);

	enif_mutex_unlock(st->lock);

	if ((*msg)->function == NULL)
		return 0;

	return 1;
}
Пример #10
0
void *
queue_receive(queue *queue) {
    void *item;

    enif_mutex_lock(queue->lock);

    /* Wait for an item to become available. */
    while (queue->message == NULL) {
        enif_cond_wait(queue->cond, queue->lock);
    }

    item = queue->message;
    queue->message = NULL;

    enif_mutex_unlock(queue->lock);

    return item;
}
Пример #11
0
static void *
salt_worker_loop(void *arg)
{
	struct salt_pcb 	*sc = arg;
	struct salt_msg 	*sm;
	struct salt_msg 	*tmp;

	/* XXX initialization of libsodium */
	/* XXX send readiness indication to owner */

	/* Pick up next batch of work, react promptly to termination requests. */
 loop:
	enif_mutex_lock(sc->sc_lock);
 wait:
	if (sc->sc_exit_flag) {
		enif_mutex_unlock(sc->sc_lock);
		return (NULL);
	}
	if (sc->sc_req_first == NULL) {
		enif_cond_wait(sc->sc_cond, sc->sc_lock);
		goto wait;
	}

	sm = sc->sc_req_first;
	sc->sc_req_first = NULL;
	sc->sc_req_lastp = &sc->sc_req_first;
	sc->sc_req_npend = 0;
	
	enif_mutex_unlock(sc->sc_lock);

	/* Handle all requests, release when done. */
 next:
	salt_handle_req(sc, sm);
	tmp = sm->msg_next;
	
	enif_free_env(sm->msg_heap);
	enif_free(sm);

	if (tmp == NULL)
		goto loop;

	sm = tmp;
	goto next;
}
Пример #12
0
static int queue_pop_core(queue_ptr queue, void **data, const int wait)
{
	node_ptr node = NULL;
	*data = NULL;

	enif_mutex_lock(queue->lock);

    while(1 == wait && 0 == queue->size)
    {
        enif_cond_wait(queue->cond, queue->lock);
    }

	if(queue->size > 0)
	{
		node = queue->first;

		// remove from queue, if we are shifting up the last one
		// then we are setting the 'first' one to NULL
		queue->first = node->next;

		--queue->size;
		if(0 == queue->size)
		{
			// queue is empty
			queue->last = NULL;
		}
	}	
	enif_mutex_unlock(queue->lock);

	// if we were able to get one, then we set the result and return
	if(NULL != node)
	{
		*data = node->data;
		node_free(node);
		return 1;
	}

	return 0;
}
Пример #13
0
static void *
cache_bg_thread(void *arg)
{
	struct cache *c = (struct cache *)arg;
	int i, dud;

	while (1) {
		enif_mutex_lock(c->ctrl_lock);

		/* if we've been told to die, quit this loop and start cleaning up */
		if (c->flags & FL_DYING) {
			enif_mutex_unlock(c->ctrl_lock);
			break;
		}

		/* sleep until there is work to do */
		enif_cond_wait(c->check_cond, c->ctrl_lock);

		__sync_add_and_fetch(&(c->wakeups), 1);
		dud = 1;

		/* we have to let go of ctrl_lock so we can take cache_lock then
		   ctrl_lock again to get them back in the right order */
		enif_mutex_unlock(c->ctrl_lock);
		enif_rwlock_rwlock(c->cache_lock);
		enif_mutex_lock(c->ctrl_lock);

		/* first process the promotion queue before we do any evicting */
		for (i = 0; i < N_INCR_BKT; ++i) {
			enif_mutex_lock(c->incr_lock[i]);
			while (!TAILQ_EMPTY(&(c->incr_head[i]))) {
				struct cache_incr_node *n;
				n = TAILQ_FIRST(&(c->incr_head[i]));
				TAILQ_REMOVE(&(c->incr_head[i]), n, entry);
				__sync_sub_and_fetch(&(c->incr_count), 1);

				dud = 0;

				/* let go of the ctrl_lock here, we don't need it when we aren't looking
				   at the incr_queue, and this way other threads can use it while we shuffle
				   queue nodes around */
				enif_mutex_unlock(c->incr_lock[i]);
				enif_mutex_unlock(c->ctrl_lock);

				if (n->node->q == &(c->q1)) {
					TAILQ_REMOVE(&(c->q1.head), n->node, entry);
					c->q1.size -= n->node->size;
					TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry);
					n->node->q = &(c->q2);
					c->q2.size += n->node->size;

				} else if (n->node->q == &(c->q2)) {
					TAILQ_REMOVE(&(c->q2.head), n->node, entry);
					TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry);
				}

				enif_free(n);

				/* take the ctrl_lock back again for the next loop around */
				enif_mutex_lock(c->ctrl_lock);
				enif_mutex_lock(c->incr_lock[i]);
			}
			enif_mutex_unlock(c->incr_lock[i]);
		}

		/* let go of the ctrl_lock here for two reasons:
		   1. avoid lock inversion, because if we have evictions to do we
		      will need to take lookup_lock, and we must take lookup_lock
		      before taking ctrl_lock
		   2. if we don't need to do evictions, we're done with the structures
		      that are behind ctrl_lock so we should give it up for others */
		enif_mutex_unlock(c->ctrl_lock);

		/* do timed evictions -- if anything has expired, nuke it */
		{
			struct cache_node *n;
			if ((n = RB_MIN(expiry_tree, &(c->expiry_head)))) {
				struct timespec now;
				clock_now(&now);
				while (n && n->expiry.tv_sec < now.tv_sec) {
					enif_mutex_lock(c->ctrl_lock);
					dud = 0;
					destroy_cache_node(n);
					enif_mutex_unlock(c->ctrl_lock);
					n = RB_MIN(expiry_tree, &(c->expiry_head));
				}
			}
		}

		/* now check if we need to do ordinary size limit evictions */
		if (c->q1.size + c->q2.size > c->max_size) {
			enif_rwlock_rwlock(c->lookup_lock);
			enif_mutex_lock(c->ctrl_lock);

			while ((c->q1.size + c->q2.size > c->max_size) &&
					(c->q1.size > c->min_q1_size)) {
				struct cache_node *n;
				n = TAILQ_LAST(&(c->q1.head), cache_q);
				destroy_cache_node(n);
			}

			while (c->q1.size + c->q2.size > c->max_size) {
				struct cache_node *n;
				n = TAILQ_LAST(&(c->q2.head), cache_q);
				destroy_cache_node(n);
			}

			dud = 0;

			enif_mutex_unlock(c->ctrl_lock);
			enif_rwlock_rwunlock(c->lookup_lock);
		}

		if (dud)
			__sync_add_and_fetch(&(c->dud_wakeups), 1);
		/* now let go of the cache_lock that we took right back at the start of
		   this iteration */
		enif_rwlock_rwunlock(c->cache_lock);
	}

	/* first remove us from the atom_tree, so we get no new operations coming in */
	enif_rwlock_rwlock(gbl->atom_lock);
	RB_REMOVE(atom_tree, &(gbl->atom_head), c->atom_node);
	enif_rwlock_rwunlock(gbl->atom_lock);
	enif_free(c->atom_node);

	/* now take all of our locks, to make sure any pending operations are done */
	enif_rwlock_rwlock(c->cache_lock);
	enif_rwlock_rwlock(c->lookup_lock);
	enif_mutex_lock(c->ctrl_lock);

	c->atom_node = NULL;

	/* free the actual cache queues */
	{
		struct cache_node *n, *nextn;
		nextn = TAILQ_FIRST(&(c->q1.head));
		while ((n = nextn)) {
			nextn = TAILQ_NEXT(n, entry);
			destroy_cache_node(n);
		}
		nextn = TAILQ_FIRST(&(c->q2.head));
		while ((n = nextn)) {
			nextn = TAILQ_NEXT(n, entry);
			destroy_cache_node(n);
		}
	}

	for (i = 0; i < N_INCR_BKT; ++i)
		enif_mutex_lock(c->incr_lock[i]);

	/* free the incr_queue */
	for (i = 0; i < N_INCR_BKT; ++i) {
		struct cache_incr_node *in, *nextin;
		nextin = TAILQ_FIRST(&(c->incr_head[i]));
		while ((in = nextin)) {
			nextin = TAILQ_NEXT(in, entry);
			TAILQ_REMOVE(&(c->incr_head[i]), in, entry);
			in->node = 0;
			enif_free(in);
		}
		enif_mutex_unlock(c->incr_lock[i]);
		enif_mutex_destroy(c->incr_lock[i]);
	}

	/* unlock and destroy! */
	enif_cond_destroy(c->check_cond);

	enif_mutex_unlock(c->ctrl_lock);
	enif_mutex_destroy(c->ctrl_lock);

	enif_rwlock_rwunlock(c->lookup_lock);
	enif_rwlock_destroy(c->lookup_lock);

	enif_rwlock_rwunlock(c->cache_lock);
	enif_rwlock_destroy(c->cache_lock);

	enif_free(c);

	return 0;
}
Пример #14
0
/* the async job thread that handles opening/closing of doors */
void *
job_thread(void *arg)
{
	struct zdoor_handle *zhandle;
	int cont = 1;
	int res;

	/* first init the handle */
	zhandle = zdoor_handle_init();

	enif_mutex_lock(gbl.jlock);

	while (cont) {
		struct job *j;
		while (!gbl.jlist)
			enif_cond_wait(gbl.jcond, gbl.jlock);

		j = gbl.jlist;
		while (j) {
			gbl.jlist = j->next;
			enif_mutex_unlock(gbl.jlock);

			if (j->action == ACT_OPEN) {
				enif_rwlock_rwlock(gbl.dlock);
				j->door->next = NULL;
				if (gbl.dlist != NULL)
					j->door->next = gbl.dlist;
				gbl.dlist = j->door;
				enif_rwlock_rwunlock(gbl.dlock);

				res = zdoor_open(zhandle, j->door->zonename, j->door->service, j->door, zdoor_cb);

				ErlNifEnv *env = enif_alloc_env();
				ERL_NIF_TERM ret = enif_make_atom(env, "ok");
				switch (res) {
					case ZDOOR_ERROR:
						ret = enif_make_atom(env, "error");
						break;
					case ZDOOR_NOT_GLOBAL_ZONE:
						ret = enif_make_atom(env, "not_global");
						break;
					case ZDOOR_ZONE_NOT_RUNNING:
						ret = enif_make_atom(env, "not_running");
						break;
					case ZDOOR_ZONE_FORBIDDEN:
						ret = enif_make_atom(env, "eperm");
						break;
					case ZDOOR_ARGS_ERROR:
						ret = enif_make_atom(env, "badarg");
						break;
					case ZDOOR_OUT_OF_MEMORY:
						ret = enif_make_atom(env, "enomem");
						break;
				}
				enif_send(NULL, &j->owner, env,
					enif_make_tuple3(env,
						enif_make_atom(env, "zdoor_job"),
						enif_make_atom(env, "open"),
						ret));
				enif_free_env(env);
			} else if (j->action == ACT_CLOSE) {
				enif_rwlock_rwlock(gbl.dlock);
				enif_rwlock_rwlock(j->door->rlock);

				if (j->door->rlist) {
					enif_rwlock_rwunlock(j->door->rlock);
					enif_rwlock_rwunlock(gbl.dlock);

					ErlNifEnv *env = enif_alloc_env();
					enif_send(NULL, &j->owner, env,
						enif_make_tuple3(env,
							enif_make_atom(env, "zdoor_job"),
							enif_make_atom(env, "close"),
							enif_make_atom(env, "busy")));
					enif_free_env(env);
				} else {
					struct door *d = gbl.dlist;
					if (d == j->door) {
						gbl.dlist = j->door->next;
					} else {
						for (; d; d = d->next) {
							if (d->next == j->door) break;
						}
						if (d)
							d->next = j->door->next;
					}
					enif_rwlock_rwunlock(gbl.dlock);

					zdoor_close(zhandle, j->door->zonename, j->door->service);
					door_free(j->door);

					ErlNifEnv *env = enif_alloc_env();
					enif_send(NULL, &j->owner, env,
						enif_make_tuple3(env,
							enif_make_atom(env, "zdoor_job"),
							enif_make_atom(env, "close"),
							enif_make_atom(env, "ok")));
					enif_free_env(env);
				}
			} else if (j->action == ACT_QUIT) {
				cont = 0;
			}

			enif_free(j);

			enif_mutex_lock(gbl.jlock);
			j = gbl.jlist;
		}
	}

	enif_mutex_unlock(gbl.jlock);

	zdoor_handle_destroy(zhandle);

	return NULL;
}