static struct zdoor_result * zdoor_cb(struct zdoor_cookie *cookie, char *argp, size_t argp_sz) { struct door *d; struct req *r; ErlNifEnv *env = enif_alloc_env(); /* we kept the struct door in the biscuit */ d = (struct door *)cookie->zdc_biscuit; /* this request */ r = req_alloc(); /* take the rlist lock first, then the req lock */ enif_rwlock_rwlock(d->rlock); enif_mutex_lock(r->lock); req_insert(d, r); enif_rwlock_rwunlock(d->rlock); /* make the request into a binary term to put it into enif_send() */ ErlNifBinary bin; enif_alloc_binary(argp_sz, &bin); memcpy(bin.data, argp, argp_sz); ERL_NIF_TERM binTerm = enif_make_binary(env, &bin); /* send a message back to the session owner */ enif_send(NULL, &d->owner, env, enif_make_tuple3(env, enif_make_atom(env, "zdoor"), enif_make_resource(env, r), binTerm)); /* now wait until the request has been replied to */ enif_cond_wait(r->cond, r->lock); /* convert the reply into a zdoor_result */ /* we have to use naked malloc() since libzdoor will use free() */ struct zdoor_result *res = malloc(sizeof(struct zdoor_result)); res->zdr_size = r->replen; res->zdr_data = r->rep; r->rep = NULL; r->replen = 0; /* yes, we have to unlock and re-lock to avoid lock inversion here */ enif_mutex_unlock(r->lock); /* remove and free the struct req */ enif_rwlock_rwlock(d->rlock); enif_mutex_lock(r->lock); req_remove(d, r); enif_rwlock_rwunlock(d->rlock); req_free(r); enif_free_env(env); return res; }
/* destroy(Cache :: atom()) -- destroys and entire cache destroy(Cache :: atom(), Key :: binary()) -- removes an entry from a cache */ static ERL_NIF_TERM destroy(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM atom; struct cache *c; ErlNifBinary kbin; struct cache_node *n; if (!enif_is_atom(env, argv[0])) return enif_make_badarg(env); atom = argv[0]; if ((c = get_cache(atom))) { if (argc == 2) { if (!enif_inspect_binary(env, argv[1], &kbin)) return enif_make_badarg(env); enif_rwlock_rwlock(c->cache_lock); enif_rwlock_rwlock(c->lookup_lock); HASH_FIND(hh, c->lookup, kbin.data, kbin.size, n); if (!n) { enif_rwlock_rwunlock(c->lookup_lock); enif_rwlock_rwunlock(c->cache_lock); return enif_make_atom(env, "notfound"); } enif_mutex_lock(c->ctrl_lock); destroy_cache_node(n); enif_mutex_unlock(c->ctrl_lock); enif_rwlock_rwunlock(c->lookup_lock); enif_rwlock_rwunlock(c->cache_lock); enif_consume_timeslice(env, 50); return enif_make_atom(env, "ok"); } else { enif_mutex_lock(c->ctrl_lock); c->flags |= FL_DYING; enif_mutex_unlock(c->ctrl_lock); enif_cond_broadcast(c->check_cond); enif_thread_join(c->bg_thread, NULL); enif_consume_timeslice(env, 100); return enif_make_atom(env, "ok"); } return enif_make_atom(env, "ok"); } return enif_make_atom(env, "notfound"); }
int queue_push(queue_t* queue, ErlNifPid* pid) { qitem_t* item = (qitem_t*) enif_alloc(sizeof(qitem_t)); if(item == NULL) return 0; item->pid = pid; item->next = NULL; enif_mutex_lock(queue->lock); if(queue->tail != NULL) { queue->tail->next = item; } queue->tail = item; if(queue->head == NULL) { queue->head = queue->tail; } enif_cond_signal(queue->cond); enif_mutex_unlock(queue->lock); return 1; }
ErlNifPid* queue_pop(queue_t* queue) { qitem_t* item; ErlNifPid* ret = NULL; enif_mutex_lock(queue->lock); while(queue->head == NULL) { enif_cond_wait(queue->cond, queue->lock); } item = queue->head; queue->head = item->next; item->next = NULL; if(queue->head == NULL) { queue->tail = NULL; } enif_mutex_unlock(queue->lock); ret = item->pid; enif_free(item); return ret; }
static ERL_NIF_TERM add(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { PRIVDATA *data = NULL; int k = 0; int v = 0; int nelem = 0; data = (PRIVDATA *)enif_priv_data(env); nelem = NELEM(data); if ( !enif_get_int(env, argv[0], &k) || !enif_get_int(env, argv[1], &v)) return enif_make_badarg(env); if ( (k < 0) || (k >= nelem)) return error_tuple(env, "out_of_bounds"); enif_mutex_lock(data->lock); VAL(data, k) += v; v = VAL(data, k); enif_mutex_unlock(data->lock); return enif_make_int(env, v); }
static void salt_pcb_free(nif_heap_t *hp, void *obj) { struct salt_pcb *sc = obj; struct salt_msg *sm; struct salt_msg *tmp; /* Signal termination request, join worker thread, release all resources. */ enif_mutex_lock(sc->sc_lock); sc->sc_exit_flag = true; enif_cond_signal(sc->sc_cond); enif_mutex_unlock(sc->sc_lock); (void)enif_thread_join(sc->sc_thread, NULL); sm = sc->sc_req_first; loop: if (sm == NULL) goto done; tmp = sm->msg_next; enif_free_env(sm->msg_heap); enif_free(sm); sm = tmp; goto loop; done: enif_mutex_destroy(sc->sc_lock); enif_cond_destroy(sc->sc_cond); /* Done, PCB itself released by ERTS. */ return ; }
int wait_pointer(couchfile_modify_request* rq, couchfile_pointer_info *ptr) { if(ptr->writerq_resource == NULL) return 0; int ret = 0; btreenif_state *state = rq->globalstate; enif_mutex_lock(state->writer_cond.mtx); while(ptr->pointer == 0) { enif_cond_wait(state->writer_cond.cond, state->writer_cond.mtx); if(ptr->pointer == 0 && !enif_send(rq->caller_env, &rq->writer, state->check_env, state->atom_heart)) { //The writer process has died ret = ERROR_WRITER_DEAD; break; } enif_clear_env(state->check_env); } if(ptr->pointer != 0) { enif_release_resource(ptr->writerq_resource); } enif_mutex_unlock(state->writer_cond.mtx); ptr->writerq_resource = NULL; return ret; }
void* queue_pop(queue *queue) { qitem *entry; void* item; enif_mutex_lock(queue->lock); /* Wait for an item to become available. */ while (queue->head == NULL) { enif_cond_wait(queue->cond, queue->lock); } assert(queue->length >= 0 && "Invalid queue size at pop."); /* Woke up because queue->head != NULL Remove the entry and return the payload. */ entry = queue->head; queue->head = entry->next; entry->next = NULL; if (queue->head == NULL) { assert(queue->tail == entry && "Invalid queue state: Bad tail."); queue->tail = NULL; } queue->length -= 1; enif_mutex_unlock(queue->lock); item = entry->data; enif_free(entry); return item; }
/* to call this you must have all of the caches locks held (cache_lock, lookup_lock and ctrl_lock)! */ static void destroy_cache_node(struct cache_node *n) { struct cache_incr_node *in, *nextin; int i; TAILQ_REMOVE(&(n->q->head), n, entry); n->q->size -= n->size; n->q = NULL; HASH_DEL(n->c->lookup, n); if (n->expiry.tv_sec != 0) RB_REMOVE(expiry_tree, &(n->c->expiry_head), n); for (i = 0; i < N_INCR_BKT; ++i) { enif_mutex_lock(n->c->incr_lock[i]); nextin = TAILQ_FIRST(&(n->c->incr_head[i])); while ((in = nextin)) { nextin = TAILQ_NEXT(in, entry); if (in->node == n) { TAILQ_REMOVE(&(n->c->incr_head[i]), in, entry); __sync_sub_and_fetch(&(n->c->incr_count), 1); in->node = 0; enif_free(in); } } enif_mutex_unlock(n->c->incr_lock[i]); } n->c = NULL; enif_free(n->key); n->key = NULL; enif_release_resource(n->val); n->val = NULL; enif_free(n); }
void* queue_get(queue_t* queue) { queue_item_t* item; enif_mutex_lock(queue->mutex); // Block until theres something in the queue while (queue->head == NULL) { enif_cond_wait(queue->cond, queue->mutex); } item = queue->head; queue->head = queue->head->next; item->next = NULL; if (queue->head == NULL) { queue->tail = NULL; } enif_mutex_unlock(queue->mutex); void* data = item->data; enif_free(item); return data; }
static void unload_cb(ErlNifEnv *env, void *priv_data) { struct atom_node *an; enif_rwlock_rwlock(gbl->atom_lock); /* when we unload, we want to tell all of the active caches to die, then join() their bg_threads to wait until they're completely gone */ while ((an = RB_MIN(atom_tree, &(gbl->atom_head)))) { struct cache *c = an->cache; enif_rwlock_rwunlock(gbl->atom_lock); enif_mutex_lock(c->ctrl_lock); c->flags |= FL_DYING; enif_mutex_unlock(c->ctrl_lock); enif_cond_broadcast(c->check_cond); enif_thread_join(c->bg_thread, NULL); enif_rwlock_rwlock(gbl->atom_lock); } enif_rwlock_rwunlock(gbl->atom_lock); enif_rwlock_destroy(gbl->atom_lock); enif_clear_env(gbl->atom_env); enif_free(gbl); gbl = NULL; }
int queue_push(queue *queue, void *item) { qitem * entry = (qitem *) enif_alloc(sizeof(qitem)); if (entry == NULL) return 0; entry->data = item; entry->next = NULL; enif_mutex_lock(queue->lock); assert(queue->length >= 0 && "Invalid queue size at push"); if (queue->tail != NULL) { queue->tail->next = entry; } queue->tail = entry; if (queue->head == NULL) { queue->head = queue->tail; } queue->length += 1; enif_cond_signal(queue->cond); enif_mutex_unlock(queue->lock); return 1; }
int queue_send(queue *queue, void *item) { enif_mutex_lock(queue->lock); assert(queue->message == NULL && "Attempting to send multiple messages."); queue->message = item; enif_cond_signal(queue->cond); enif_mutex_unlock(queue->lock); return 1; }
int queue_empty(queue *queue) { int ret; enif_mutex_lock(queue->lock); ret = (queue->head == NULL); enif_mutex_unlock(queue->lock); return ret; }
void job_insert(struct job *j) { enif_mutex_lock(gbl.jlock); if (gbl.jlist) j->next = gbl.jlist; gbl.jlist = j; enif_cond_signal(gbl.jcond); enif_mutex_unlock(gbl.jlock); }
static ErlCall *FindCall(int id) { enif_mutex_lock(callsMutex); ErlCall *erlCall; HASH_FIND_INT(calls, &id, erlCall); enif_mutex_unlock(callsMutex); return erlCall; }
ERL_NIF_TERM nif_thread_send(nif_thread_state* st, nif_thread_message* msg) { enif_mutex_lock(st->lock); TAILQ_INSERT_TAIL(st->mailbox, msg, next_entry); enif_cond_signal(st->cond); enif_mutex_unlock(st->lock); return atom_ok; }
int queue_has_item(queue *queue) { int ret; enif_mutex_lock(queue->lock); ret = (queue->head != NULL); enif_mutex_unlock(queue->lock); return ret; }
PyObject *pytherl_eval(char *code, char *var_name, ErlNifMutex *mutex) { enif_mutex_lock(mutex); if(!Py_IsInitialized()) { Py_Initialize(); } PyRun_SimpleString(code); enif_mutex_unlock(mutex); return pytherl_value(var_name, mutex); };
static void DestroyCall(ErlCall *erlCall) { enif_mutex_lock(callsMutex); HASH_DEL(calls, erlCall); enif_clear_env(erlCall->env); enif_free_env(erlCall->env); enif_mutex_destroy(erlCall->mutex); enif_cond_destroy(erlCall->cond); free(erlCall); enif_mutex_unlock(callsMutex); }
PyObject *pytherl_value(const char *var_name, ErlNifMutex *mutex) { enif_mutex_lock(mutex); PyObject *module = PyImport_AddModule("__main__"); assert(module); PyObject *dictionary = PyModule_GetDict(module); assert(dictionary); PyObject *result = PyDict_GetItemString(dictionary, var_name); assert(result); enif_mutex_unlock(mutex); return result; };
static ERL_NIF_TERM nif_mod_call_history(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { PrivData* data = (PrivData*) enif_priv_data(env); ERL_NIF_TERM ret; if (data->nif_mod == NULL) { return enif_make_string(env,"nif_mod pointer is NULL", ERL_NIF_LATIN1); } enif_mutex_lock(data->nif_mod->mtx); ret = make_call_history(env, &data->nif_mod->call_history); enif_mutex_unlock(data->nif_mod->mtx); return ret; }
static nif_term_t salt_enqueue_req(nif_heap_t *hp, struct salt_pcb *sc, nif_pid_t pid, nif_term_t ref, uint_t type, uint_t aux) { struct salt_msg *sm; const char *err; /* Prepare async request for worker thread. */ sm = enif_alloc(sizeof(*sm)); if (sm == NULL) return (BADARG); sm->msg_heap = enif_alloc_env(); assert(sm->msg_heap != NULL); sm->msg_next = NULL; sm->msg_from = pid; /* struct copy */ sm->msg_mref = enif_make_copy(sm->msg_heap, ref); sm->msg_type = type; sm->msg_aux = aux; /* Enqueue request checking for failure scenarios. */ enif_mutex_lock(sc->sc_lock); if (sc->sc_req_npend >= 128) { err = "congested"; goto fail; } if (sc->sc_exit_flag) { /* XXX This should not even be possible, no? */ err = "exiting"; goto fail; } *sc->sc_req_lastp = sm; sc->sc_req_lastp = &sm->msg_next; sc->sc_req_npend += 1; enif_cond_signal(sc->sc_cond); enif_mutex_unlock(sc->sc_lock); return (enif_make_atom(hp, "enqueued")); /* Failure treatment. */ fail: enif_mutex_unlock(sc->sc_lock); enif_free_env(sm->msg_heap); enif_free(sm); return (enif_make_atom(hp, err)); }
static ERL_NIF_TERM ErlangCall(ErlNifEnv *env, ERL_NIF_TERM fun, ERL_NIF_TERM args) { ErlCall *erlCall = CreateCall(fun, args); enif_mutex_lock(erlCall->mutex); enif_send(env, &server, erlCall->env, erlCall->msg); while(!erlCall->complete) { enif_cond_wait(erlCall->cond, erlCall->mutex); } enif_mutex_unlock(erlCall->mutex); ERL_NIF_TERM result = enif_make_copy(env, erlCall->result); DestroyCall(erlCall); return result; }
void* threaded_sender(void *arg) { union { void* vp; struct make_term_info* p; }mti; mti.vp = arg; enif_mutex_lock(mti.p->mtx); while (!mti.p->send_it) { enif_cond_wait(mti.p->cond, mti.p->mtx); } mti.p->send_it = 0; enif_mutex_unlock(mti.p->mtx); mti.p->send_res = enif_send(NULL, &mti.p->to_pid, mti.p->dst_env, mti.p->blob); return NULL; }
static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { union { void* vp; struct make_term_info* p; }mti; int err; if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) { return enif_make_badarg(env); } enif_mutex_lock(mti.p->mtx); mti.p->send_it = 1; enif_cond_signal(mti.p->cond); enif_mutex_unlock(mti.p->mtx); err = enif_thread_join(mti.p->tid, NULL); assert(err == 0); enif_release_resource(mti.vp); return enif_make_tuple2(env, atom_ok, enif_make_int(env, mti.p->send_res)); }
int nif_thread_receive(nif_thread_state* st, nif_thread_message** msg) { enif_mutex_lock(st->lock); while (TAILQ_EMPTY(st->mailbox)) enif_cond_wait(st->cond, st->lock); *msg = TAILQ_FIRST(st->mailbox); TAILQ_REMOVE(st->mailbox, TAILQ_FIRST(st->mailbox), next_entry); enif_mutex_unlock(st->lock); if ((*msg)->function == NULL) return 0; return 1; }
void queue_destroy(queue *queue) { ErlNifMutex *lock; ErlNifCond *cond; int length; qitem *blocks = NULL; enif_mutex_lock(queue->lock); lock = queue->lock; cond = queue->cond; length = queue->length; queue->lock = NULL; queue->cond = NULL; queue->head = NULL; queue->tail = NULL; queue->length = -1; while(queue->reuseq != NULL) { qitem *tmp = queue->reuseq->next; if(tmp != NULL && tmp->env != NULL) enif_free_env(tmp->env); if (tmp != NULL && tmp->cmd != NULL) enif_free(tmp->cmd); if (queue->reuseq->blockStart) { queue->reuseq->next = blocks; blocks = queue->reuseq; } queue->reuseq = tmp; } while (blocks != NULL) { qitem *tmp = blocks->next; enif_free(blocks); blocks = tmp; } enif_mutex_unlock(lock); assert(length == 0 && "Attempting to destroy a non-empty queue."); enif_cond_destroy(cond); enif_mutex_destroy(lock); enif_free(queue); }
void * queue_receive(queue *queue) { void *item; enif_mutex_lock(queue->lock); /* Wait for an item to become available. */ while (queue->message == NULL) { enif_cond_wait(queue->cond, queue->lock); } item = queue->message; queue->message = NULL; enif_mutex_unlock(queue->lock); return item; }
static void * salt_worker_loop(void *arg) { struct salt_pcb *sc = arg; struct salt_msg *sm; struct salt_msg *tmp; /* XXX initialization of libsodium */ /* XXX send readiness indication to owner */ /* Pick up next batch of work, react promptly to termination requests. */ loop: enif_mutex_lock(sc->sc_lock); wait: if (sc->sc_exit_flag) { enif_mutex_unlock(sc->sc_lock); return (NULL); } if (sc->sc_req_first == NULL) { enif_cond_wait(sc->sc_cond, sc->sc_lock); goto wait; } sm = sc->sc_req_first; sc->sc_req_first = NULL; sc->sc_req_lastp = &sc->sc_req_first; sc->sc_req_npend = 0; enif_mutex_unlock(sc->sc_lock); /* Handle all requests, release when done. */ next: salt_handle_req(sc, sm); tmp = sm->msg_next; enif_free_env(sm->msg_heap); enif_free(sm); if (tmp == NULL) goto loop; sm = tmp; goto next; }