static void* _reading_thread (void* arg) { CAN_handle* handle = arg; ErlNifEnv* env = enif_alloc_env(); //ERL_NIF_TERM device = enif_make_int(env, handle->device); handle->threaded = 1; while (handle->threaded) { int status; ERL_NIF_TERM msg = _receive_can_messages(env, handle, handle->chunk_size, handle->timeout); if (!enif_get_int(env, msg, &status)) { enif_send(env, &handle->receiver, env, enif_make_tuple3(env, can_atom, handle->devpath_bin, msg)); enif_clear_env(env); } else if (status == 0) { enif_clear_env(env); } else break; } enif_free_env(env); return 0; }
void ewpcap_error(EWPCAP_STATE *ep, char *msg) { int rv = 0; if (ep->p == NULL) return; /* {ewpcap_error, Ref, Error} */ rv = enif_send( NULL, &ep->pid, ep->env, enif_make_tuple3(ep->env, atom_ewpcap_error, enif_make_copy(ep->env, ep->ref), enif_make_string(ep->env, msg, ERL_NIF_LATIN1) ) ); if (!rv) pcap_breakloop(ep->p); enif_clear_env(ep->env); }
static void unload_cb(ErlNifEnv *env, void *priv_data) { struct atom_node *an; enif_rwlock_rwlock(gbl->atom_lock); /* when we unload, we want to tell all of the active caches to die, then join() their bg_threads to wait until they're completely gone */ while ((an = RB_MIN(atom_tree, &(gbl->atom_head)))) { struct cache *c = an->cache; enif_rwlock_rwunlock(gbl->atom_lock); enif_mutex_lock(c->ctrl_lock); c->flags |= FL_DYING; enif_mutex_unlock(c->ctrl_lock); enif_cond_broadcast(c->check_cond); enif_thread_join(c->bg_thread, NULL); enif_rwlock_rwlock(gbl->atom_lock); } enif_rwlock_rwunlock(gbl->atom_lock); enif_rwlock_destroy(gbl->atom_lock); enif_clear_env(gbl->atom_env); enif_free(gbl); gbl = NULL; }
int wait_pointer(couchfile_modify_request* rq, couchfile_pointer_info *ptr) { if(ptr->writerq_resource == NULL) return 0; int ret = 0; btreenif_state *state = rq->globalstate; enif_mutex_lock(state->writer_cond.mtx); while(ptr->pointer == 0) { enif_cond_wait(state->writer_cond.cond, state->writer_cond.mtx); if(ptr->pointer == 0 && !enif_send(rq->caller_env, &rq->writer, state->check_env, state->atom_heart)) { //The writer process has died ret = ERROR_WRITER_DEAD; break; } enif_clear_env(state->check_env); } if(ptr->pointer != 0) { enif_release_resource(ptr->writerq_resource); } enif_mutex_unlock(state->writer_cond.mtx); ptr->writerq_resource = NULL; return ret; }
static void DestroyCall(ErlCall *erlCall) { enif_mutex_lock(callsMutex); HASH_DEL(calls, erlCall); enif_clear_env(erlCall->env); enif_free_env(erlCall->env); enif_mutex_destroy(erlCall->mutex); enif_cond_destroy(erlCall->cond); free(erlCall); enif_mutex_unlock(callsMutex); }
static ERL_NIF_TERM robin_q_set(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { robin_q_handle *handle = NULL; if (enif_get_resource(env, argv[0], robin_q_RESOURCE, (void**)&handle) == 0) { return enif_make_badarg(env); } enif_rwlock_rwlock(handle->lock); enif_clear_env(handle->env); do_set(handle, argv[1]); enif_rwlock_rwunlock(handle->lock); return enif_make_atom(env, "ok"); }
static ERL_NIF_TERM clear_msgenv(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { union { void* vp; struct make_term_info* p; }mti; if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) { return enif_make_badarg(env); } enif_clear_env(mti.p->dst_env); mti.p->reuse_pull = 0; mti.p->reuse_push = 0; mti.p->blob = enif_make_list(mti.p->dst_env, 0); return atom_ok; }
void nif_thread_handle(ErlNifEnv* env, nif_thread_state* st, nif_thread_message* msg) { if (msg->from_pid == NULL) { void (*cast)(nif_thread_arg*) = msg->function; cast(msg->args); } else { ERL_NIF_TERM (*call)(ErlNifEnv*, nif_thread_arg*) = msg->function; ERL_NIF_TERM ret = call(env, msg->args); enif_send(NULL, msg->from_pid, env, enif_make_tuple2(env, atom__nif_thread_ret_, ret)); enif_clear_env(env); } nif_thread_message_free(msg); }
static void* thr_main(void* obj) { state_t* state = (state_t*) obj; ErlNifEnv* env = enif_alloc_env(); ErlNifPid* pid; ERL_NIF_TERM msg; while((pid = queue_pop(state->queue)) != NULL) { msg = enif_make_int64(env, random()); enif_send(NULL, pid, env, msg); enif_free(pid); enif_clear_env(env); } return NULL; }
static void* worker(void *obj) { handle_t* handle = (handle_t*)obj; task_t* task; ErlNifEnv* env = enif_alloc_env(); while ((task = (task_t*)queue_get(handle->queue)) != NULL) { ERL_NIF_TERM result = handle->calltable[task->cmd](env, handle, task->args); enif_send(NULL, task->pid, env, result); enif_free(task->pid); enif_free(task->args); enif_free(task); enif_clear_env(env); } return NULL; }
void ewpcap_send(u_char *user, const struct pcap_pkthdr *h, const u_char *bytes) { EWPCAP_STATE *ep = (EWPCAP_STATE *)user; ErlNifBinary buf = {0}; int rv = 0; /* XXX no way to indicate an error? */ if (ep->p == NULL) return; if (!enif_alloc_binary(h->caplen, &buf)) { pcap_breakloop(ep->p); return; } (void)memcpy(buf.data, bytes, buf.size); /* {ewpcap, Ref, DatalinkType, Time, ActualLength, Packet} */ rv = enif_send( NULL, &ep->pid, ep->env, enif_make_tuple6(ep->env, atom_ewpcap, enif_make_copy(ep->env, ep->ref), enif_make_int(ep->env, ep->datalink), enif_make_tuple3(ep->env, enif_make_ulong(ep->env, abs(h->ts.tv_sec / 1000000)), enif_make_ulong(ep->env, h->ts.tv_sec % 1000000), enif_make_ulong(ep->env, h->ts.tv_usec) ), enif_make_ulong(ep->env, h->len), enif_make_binary(ep->env, &buf) ) ); if (!rv) pcap_breakloop(ep->p); enif_clear_env(ep->env); }
void enif_free_env(ErlNifEnv* env) { enif_clear_env(env); erts_free(ERTS_ALC_T_NIF, env); }