ErlNifPid* queue_pop(queue_t* queue) { qitem_t* item; ErlNifPid* ret = NULL; enif_mutex_lock(queue->lock); while(queue->head == NULL) { enif_cond_wait(queue->cond, queue->lock); } item = queue->head; queue->head = item->next; item->next = NULL; if(queue->head == NULL) { queue->tail = NULL; } enif_mutex_unlock(queue->lock); ret = item->pid; enif_free(item); return ret; }
int queue_push(queue_t* queue, ErlNifPid* pid) { qitem_t* item = (qitem_t*) enif_alloc(sizeof(qitem_t)); if(item == NULL) return 0; item->pid = pid; item->next = NULL; enif_mutex_lock(queue->lock); if(queue->tail != NULL) { queue->tail->next = item; } queue->tail = item; if(queue->head == NULL) { queue->head = queue->tail; } enif_cond_signal(queue->cond); enif_mutex_unlock(queue->lock); return 1; }
static ERL_NIF_TERM add(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { PRIVDATA *data = NULL; int k = 0; int v = 0; int nelem = 0; data = (PRIVDATA *)enif_priv_data(env); nelem = NELEM(data); if ( !enif_get_int(env, argv[0], &k) || !enif_get_int(env, argv[1], &v)) return enif_make_badarg(env); if ( (k < 0) || (k >= nelem)) return error_tuple(env, "out_of_bounds"); enif_mutex_lock(data->lock); VAL(data, k) += v; v = VAL(data, k); enif_mutex_unlock(data->lock); return enif_make_int(env, v); }
static struct zdoor_result * zdoor_cb(struct zdoor_cookie *cookie, char *argp, size_t argp_sz) { struct door *d; struct req *r; ErlNifEnv *env = enif_alloc_env(); /* we kept the struct door in the biscuit */ d = (struct door *)cookie->zdc_biscuit; /* this request */ r = req_alloc(); /* take the rlist lock first, then the req lock */ enif_rwlock_rwlock(d->rlock); enif_mutex_lock(r->lock); req_insert(d, r); enif_rwlock_rwunlock(d->rlock); /* make the request into a binary term to put it into enif_send() */ ErlNifBinary bin; enif_alloc_binary(argp_sz, &bin); memcpy(bin.data, argp, argp_sz); ERL_NIF_TERM binTerm = enif_make_binary(env, &bin); /* send a message back to the session owner */ enif_send(NULL, &d->owner, env, enif_make_tuple3(env, enif_make_atom(env, "zdoor"), enif_make_resource(env, r), binTerm)); /* now wait until the request has been replied to */ enif_cond_wait(r->cond, r->lock); /* convert the reply into a zdoor_result */ /* we have to use naked malloc() since libzdoor will use free() */ struct zdoor_result *res = malloc(sizeof(struct zdoor_result)); res->zdr_size = r->replen; res->zdr_data = r->rep; r->rep = NULL; r->replen = 0; /* yes, we have to unlock and re-lock to avoid lock inversion here */ enif_mutex_unlock(r->lock); /* remove and free the struct req */ enif_rwlock_rwlock(d->rlock); enif_mutex_lock(r->lock); req_remove(d, r); enif_rwlock_rwunlock(d->rlock); req_free(r); enif_free_env(env); return res; }
int queue_push(queue_ptr queue, void* item) { qitem_ptr entry = (qitem_ptr) enif_alloc(sizeof(struct qitem_t)); if(entry == NULL) return 0; entry->data = item; entry->next = NULL; enif_mutex_lock(queue->lock); assert(queue->length >= 0 && "Invalid queue size at push"); if(queue->tail != NULL) { queue->tail->next = entry; } queue->tail = entry; if(queue->head == NULL) { queue->head = queue->tail; } queue->length += 1; enif_cond_signal(queue->cond); enif_mutex_unlock(queue->lock); return 1; }
int wait_pointer(couchfile_modify_request* rq, couchfile_pointer_info *ptr) { if(ptr->writerq_resource == NULL) return 0; int ret = 0; btreenif_state *state = rq->globalstate; enif_mutex_lock(state->writer_cond.mtx); while(ptr->pointer == 0) { enif_cond_wait(state->writer_cond.cond, state->writer_cond.mtx); if(ptr->pointer == 0 && !enif_send(rq->caller_env, &rq->writer, state->check_env, state->atom_heart)) { //The writer process has died ret = ERROR_WRITER_DEAD; break; } enif_clear_env(state->check_env); } if(ptr->pointer != 0) { enif_release_resource(ptr->writerq_resource); } enif_mutex_unlock(state->writer_cond.mtx); ptr->writerq_resource = NULL; return ret; }
int queue_send(queue *queue, void *item) { enif_mutex_lock(queue->lock); assert(queue->message == NULL && "Attempting to send multiple messages."); queue->message = item; enif_cond_signal(queue->cond); enif_mutex_unlock(queue->lock); return 1; }
static void * salt_worker_loop(void *arg) { struct salt_pcb *sc = arg; struct salt_msg *sm; struct salt_msg *tmp; /* XXX initialization of libsodium */ /* XXX send readiness indication to owner */ /* Pick up next batch of work, react promptly to termination requests. */ loop: enif_mutex_lock(sc->sc_lock); wait: if (sc->sc_exit_flag) { enif_mutex_unlock(sc->sc_lock); return (NULL); } if (sc->sc_req_first == NULL) { enif_cond_wait(sc->sc_cond, sc->sc_lock); goto wait; } sm = sc->sc_req_first; sc->sc_req_first = NULL; sc->sc_req_lastp = &sc->sc_req_first; sc->sc_req_npend = 0; enif_mutex_unlock(sc->sc_lock); /* Handle all requests, release when done. */ next: salt_handle_req(sc, sm); tmp = sm->msg_next; enif_free_env(sm->msg_heap); enif_free(sm); if (tmp == NULL) goto loop; sm = tmp; goto next; }
void queue_recycle(queue *queue,qitem *entry) { while (enif_mutex_trylock(queue->lock) != 0) { } entry->next = queue->reuseq; queue->reuseq = entry; enif_mutex_unlock(queue->lock); }
int queue_empty(queue *queue) { int ret; enif_mutex_lock(queue->lock); ret = (queue->head == NULL); enif_mutex_unlock(queue->lock); return ret; }
int queue_size(queue *queue) { int r = 0; while (enif_mutex_trylock(queue->lock) != 0) { } r = queue->length; enif_mutex_unlock(queue->lock); return r; }
static ErlCall *FindCall(int id) { enif_mutex_lock(callsMutex); ErlCall *erlCall; HASH_FIND_INT(calls, &id, erlCall); enif_mutex_unlock(callsMutex); return erlCall; }
void job_insert(struct job *j) { enif_mutex_lock(gbl.jlock); if (gbl.jlist) j->next = gbl.jlist; gbl.jlist = j; enif_cond_signal(gbl.jcond); enif_mutex_unlock(gbl.jlock); }
PyObject *pytherl_eval(char *code, char *var_name, ErlNifMutex *mutex) { enif_mutex_lock(mutex); if(!Py_IsInitialized()) { Py_Initialize(); } PyRun_SimpleString(code); enif_mutex_unlock(mutex); return pytherl_value(var_name, mutex); };
int queue_has_item(queue *queue) { int ret; enif_mutex_lock(queue->lock); ret = (queue->head != NULL); enif_mutex_unlock(queue->lock); return ret; }
ERL_NIF_TERM nif_thread_send(nif_thread_state* st, nif_thread_message* msg) { enif_mutex_lock(st->lock); TAILQ_INSERT_TAIL(st->mailbox, msg, next_entry); enif_cond_signal(st->cond); enif_mutex_unlock(st->lock); return atom_ok; }
static ERL_NIF_TERM nif_mod_call_history(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { PrivData* data = (PrivData*) enif_priv_data(env); ERL_NIF_TERM ret; if (data->nif_mod == NULL) { return enif_make_string(env,"nif_mod pointer is NULL", ERL_NIF_LATIN1); } enif_mutex_lock(data->nif_mod->mtx); ret = make_call_history(env, &data->nif_mod->call_history); enif_mutex_unlock(data->nif_mod->mtx); return ret; }
PyObject *pytherl_value(const char *var_name, ErlNifMutex *mutex) { enif_mutex_lock(mutex); PyObject *module = PyImport_AddModule("__main__"); assert(module); PyObject *dictionary = PyModule_GetDict(module); assert(dictionary); PyObject *result = PyDict_GetItemString(dictionary, var_name); assert(result); enif_mutex_unlock(mutex); return result; };
static void DestroyCall(ErlCall *erlCall) { enif_mutex_lock(callsMutex); HASH_DEL(calls, erlCall); enif_clear_env(erlCall->env); enif_free_env(erlCall->env); enif_mutex_destroy(erlCall->mutex); enif_cond_destroy(erlCall->cond); free(erlCall); enif_mutex_unlock(callsMutex); }
void* threaded_sender(void *arg) { union { void* vp; struct make_term_info* p; }mti; mti.vp = arg; enif_mutex_lock(mti.p->mtx); while (!mti.p->send_it) { enif_cond_wait(mti.p->cond, mti.p->mtx); } mti.p->send_it = 0; enif_mutex_unlock(mti.p->mtx); mti.p->send_res = enif_send(NULL, &mti.p->to_pid, mti.p->dst_env, mti.p->blob); return NULL; }
static ERL_NIF_TERM ErlangCall(ErlNifEnv *env, ERL_NIF_TERM fun, ERL_NIF_TERM args) { ErlCall *erlCall = CreateCall(fun, args); enif_mutex_lock(erlCall->mutex); enif_send(env, &server, erlCall->env, erlCall->msg); while(!erlCall->complete) { enif_cond_wait(erlCall->cond, erlCall->mutex); } enif_mutex_unlock(erlCall->mutex); ERL_NIF_TERM result = enif_make_copy(env, erlCall->result); DestroyCall(erlCall); return result; }
static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { union { void* vp; struct make_term_info* p; }mti; int err; if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) { return enif_make_badarg(env); } enif_mutex_lock(mti.p->mtx); mti.p->send_it = 1; enif_cond_signal(mti.p->cond); enif_mutex_unlock(mti.p->mtx); err = enif_thread_join(mti.p->tid, NULL); assert(err == 0); enif_release_resource(mti.vp); return enif_make_tuple2(env, atom_ok, enif_make_int(env, mti.p->send_res)); }
int nif_thread_receive(nif_thread_state* st, nif_thread_message** msg) { enif_mutex_lock(st->lock); while (TAILQ_EMPTY(st->mailbox)) enif_cond_wait(st->cond, st->lock); *msg = TAILQ_FIRST(st->mailbox); TAILQ_REMOVE(st->mailbox, TAILQ_FIRST(st->mailbox), next_entry); enif_mutex_unlock(st->lock); if ((*msg)->function == NULL) return 0; return 1; }
void queue_destroy(queue *queue) { ErlNifMutex *lock; ErlNifCond *cond; int length; qitem *blocks = NULL; enif_mutex_lock(queue->lock); lock = queue->lock; cond = queue->cond; length = queue->length; queue->lock = NULL; queue->cond = NULL; queue->head = NULL; queue->tail = NULL; queue->length = -1; while(queue->reuseq != NULL) { qitem *tmp = queue->reuseq->next; if(tmp != NULL && tmp->env != NULL) enif_free_env(tmp->env); if (tmp != NULL && tmp->cmd != NULL) enif_free(tmp->cmd); if (queue->reuseq->blockStart) { queue->reuseq->next = blocks; blocks = queue->reuseq; } queue->reuseq = tmp; } while (blocks != NULL) { qitem *tmp = blocks->next; enif_free(blocks); blocks = tmp; } enif_mutex_unlock(lock); assert(length == 0 && "Attempting to destroy a non-empty queue."); enif_cond_destroy(cond); enif_mutex_destroy(lock); enif_free(queue); }
void * queue_receive(queue *queue) { void *item; enif_mutex_lock(queue->lock); /* Wait for an item to become available. */ while (queue->message == NULL) { enif_cond_wait(queue->cond, queue->lock); } item = queue->message; queue->message = NULL; enif_mutex_unlock(queue->lock); return item; }
static ErlCall *CreateCall(ERL_NIF_TERM fun, ERL_NIF_TERM args) { enif_mutex_lock(callsMutex); ErlCall *erlCall = (ErlCall *)malloc(sizeof(ErlCall)); erlCall->id = id++; erlCall->env = enif_alloc_env(); ERL_NIF_TERM msgFun = enif_make_copy(erlCall->env, fun); ERL_NIF_TERM msgArgs = enif_make_copy(erlCall->env, args); ERL_NIF_TERM msgId = enif_make_int(erlCall->env, erlCall->id); erlCall->msg = enif_make_tuple3(erlCall->env, msgId, msgFun, msgArgs); erlCall->cond = enif_cond_create("erlcl_cond"); erlCall->mutex = enif_mutex_create("erlcl_mutex"); erlCall->complete = 0; HASH_ADD_INT(calls, id, erlCall); enif_mutex_unlock(callsMutex); return erlCall; }
PyObject *pytherl_call(char *mod, char *fun, char *args, int arg_size, ErlNifMutex *mutex) { char *command = (char *)calloc(2*(arg_size + strlen(fun)), sizeof(char)); assert(command); enif_mutex_lock(mutex); if(!Py_IsInitialized()) { Py_Initialize(); } sprintf(command, "import %s", mod); PyRun_SimpleString(command); sprintf(command, "__pytherl_result = %s(%s)", fun, args); PyRun_SimpleString(command); enif_mutex_unlock(mutex); free(command); return pytherl_result(mutex); };
void queue_put(queue_t* queue, void *data) { queue_item_t* item = (queue_item_t*)enif_alloc(sizeof(queue_item_t)); item->next = NULL; item->data = data; enif_mutex_lock(queue->mutex); if (queue->tail != NULL) { queue->tail->next = item; } queue->tail = item; if (queue->head == NULL) { queue->head = queue->tail; } enif_cond_signal(queue->cond); enif_mutex_unlock(queue->mutex); }
static void add_call_with_arg(ErlNifEnv* env, NifModPrivData* data, const char* func_name, const char* arg, int arg_sz) { CallInfo* call = (CallInfo*)enif_alloc(sizeof(CallInfo)+strlen(func_name) + arg_sz); strcpy(call->func_name, func_name); call->lib_ver = NIF_LIB_VER; call->static_cntA = ++static_cntA; call->static_cntB = ++static_cntB; call->arg_sz = arg_sz; if (arg != NULL) { call->arg = call->func_name + strlen(func_name) + 1; memcpy(call->arg, arg, arg_sz); } else { call->arg = NULL; } enif_mutex_lock(data->mtx); call->next = data->call_history; data->call_history = call; enif_mutex_unlock(data->mtx); }
/* 1: password, 2: dictpath */ static ERL_NIF_TERM nif_check(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { PRIV *priv = NULL; ErlNifBinary passwd; ErlNifBinary path; char *err = NULL; priv = (PRIV *)enif_priv_data(env); if (!enif_inspect_iolist_as_binary(env, argv[0], &passwd)) return enif_make_badarg(env); if (!enif_inspect_iolist_as_binary(env, argv[1], &path)) return enif_make_badarg(env); /* NULL terminate strings */ if (!enif_realloc_binary(&passwd, passwd.size+1)) return atom_enomem; if (!enif_realloc_binary(&path, path.size+1)) return atom_enomem; /* passwd.size is now equal to old passwd.size+1 */ passwd.data[passwd.size-1] = '\0'; path.data[path.size-1] = '\0'; enif_mutex_lock(priv->lock); err = (char *)FascistCheck((char *)passwd.data, (char *)path.data); enif_mutex_unlock(priv->lock); (void)memset(passwd.data, '\0', passwd.size); enif_release_binary(&passwd); enif_release_binary(&path); return ( (err == NULL) ? atom_ok : error_tuple(env, err)); }