static ERL_NIF_TERM dirty_sleeper(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifPid pid; ErlNifEnv* msg_env = NULL; assert(ERL_NIF_THR_DIRTY_CPU_SCHEDULER == enif_thread_type() || ERL_NIF_THR_DIRTY_IO_SCHEDULER == enif_thread_type()); /* If we get a pid argument, it indicates a process involved in the test wants a message from us. Prior to the sleep we send a 'ready' message, and then after the sleep, send a 'done' message. */ if (argc == 1 && enif_get_local_pid(env, argv[0], &pid)) enif_send(env, &pid, NULL, enif_make_atom(env, "ready")); #ifdef __WIN32__ Sleep(2000); #else sleep(2); #endif if (argc == 1) enif_send(env, &pid, NULL, enif_make_atom(env, "done")); return enif_make_atom(env, "ok"); }
static struct zdoor_result * zdoor_cb(struct zdoor_cookie *cookie, char *argp, size_t argp_sz) { struct door *d; struct req *r; ErlNifEnv *env = enif_alloc_env(); /* we kept the struct door in the biscuit */ d = (struct door *)cookie->zdc_biscuit; /* this request */ r = req_alloc(); /* take the rlist lock first, then the req lock */ enif_rwlock_rwlock(d->rlock); enif_mutex_lock(r->lock); req_insert(d, r); enif_rwlock_rwunlock(d->rlock); /* make the request into a binary term to put it into enif_send() */ ErlNifBinary bin; enif_alloc_binary(argp_sz, &bin); memcpy(bin.data, argp, argp_sz); ERL_NIF_TERM binTerm = enif_make_binary(env, &bin); /* send a message back to the session owner */ enif_send(NULL, &d->owner, env, enif_make_tuple3(env, enif_make_atom(env, "zdoor"), enif_make_resource(env, r), binTerm)); /* now wait until the request has been replied to */ enif_cond_wait(r->cond, r->lock); /* convert the reply into a zdoor_result */ /* we have to use naked malloc() since libzdoor will use free() */ struct zdoor_result *res = malloc(sizeof(struct zdoor_result)); res->zdr_size = r->replen; res->zdr_data = r->rep; r->rep = NULL; r->replen = 0; /* yes, we have to unlock and re-lock to avoid lock inversion here */ enif_mutex_unlock(r->lock); /* remove and free the struct req */ enif_rwlock_rwlock(d->rlock); enif_mutex_lock(r->lock); req_remove(d, r); enif_rwlock_rwunlock(d->rlock); req_free(r); enif_free_env(env); return res; }
static void* _reading_thread (void* arg) { CAN_handle* handle = arg; ErlNifEnv* env = enif_alloc_env(); //ERL_NIF_TERM device = enif_make_int(env, handle->device); handle->threaded = 1; while (handle->threaded) { int status; ERL_NIF_TERM msg = _receive_can_messages(env, handle, handle->chunk_size, handle->timeout); if (!enif_get_int(env, msg, &status)) { enif_send(env, &handle->receiver, env, enif_make_tuple3(env, can_atom, handle->devpath_bin, msg)); enif_clear_env(env); } else if (status == 0) { enif_clear_env(env); } else break; } enif_free_env(env); return 0; }
void* worker(void *arg) { ctx_t *ctx; task_t *task; ERL_NIF_TERM result; ctx = static_cast<ctx_t*>(arg); while (true) { task = static_cast<task_t*>(async_queue_pop(ctx->queue)); if (task->type == COMPRESS) { result = compress(task); } else if (task->type == DECOMPRESS) { result = decompress(task); } else if (task->type == SHUTDOWN) { break; } else { errx(1, "Unexpected task type: %i", task->type); } enif_send(NULL, &task->pid, task->env, result); cleanup_task(&task); } cleanup_task(&task); return NULL; }
void ewpcap_error(EWPCAP_STATE *ep, char *msg) { int rv = 0; if (ep->p == NULL) return; /* {ewpcap_error, Ref, Error} */ rv = enif_send( NULL, &ep->pid, ep->env, enif_make_tuple3(ep->env, atom_ewpcap_error, enif_make_copy(ep->env, ep->ref), enif_make_string(ep->env, msg, ERL_NIF_LATIN1) ) ); if (!rv) pcap_breakloop(ep->p); enif_clear_env(ep->env); }
int wait_pointer(couchfile_modify_request* rq, couchfile_pointer_info *ptr) { if(ptr->writerq_resource == NULL) return 0; int ret = 0; btreenif_state *state = rq->globalstate; enif_mutex_lock(state->writer_cond.mtx); while(ptr->pointer == 0) { enif_cond_wait(state->writer_cond.cond, state->writer_cond.mtx); if(ptr->pointer == 0 && !enif_send(rq->caller_env, &rq->writer, state->check_env, state->atom_heart)) { //The writer process has died ret = ERROR_WRITER_DEAD; break; } enif_clear_env(state->check_env); } if(ptr->pointer != 0) { enif_release_resource(ptr->writerq_resource); } enif_mutex_unlock(state->writer_cond.mtx); ptr->writerq_resource = NULL; return ret; }
static int prefix_cb(void *data, const unsigned char *k, uint32_t k_len, void *val) { callback_data *cb_data = data; art_elem_struct *elem = val; ErlNifBinary key, value; enif_alloc_binary(k_len - 1, &key); memcpy(key.data, k, k_len - 1); enif_alloc_binary(elem->size, &value); memcpy(value.data, elem->data, elem->size); ErlNifEnv *msg_env = enif_alloc_env(); if(msg_env == NULL) return mk_error(cb_data->env, "env_alloc_error");; ERL_NIF_TERM caller_ref = enif_make_copy(msg_env, cb_data->caller_ref); ERL_NIF_TERM res = enif_make_tuple2(msg_env, caller_ref, enif_make_tuple2(msg_env, enif_make_binary(msg_env, &key), enif_make_binary(msg_env, &value))); if(!enif_send(cb_data->env, &cb_data->pid, msg_env, res)) { enif_free(msg_env); return -1; } enif_free(msg_env); return 0; }
static ERL_NIF_TERM send_list_seq(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifPid to; ERL_NIF_TERM msg; ErlNifEnv* msg_env; int i, res; if (!enif_get_int(env, argv[0], &i)) { return enif_make_badarg(env); } if (argv[1] == atom_self) { enif_self(env, &to); } else if (!enif_get_local_pid(env, argv[1], &to)) { return enif_make_badarg(env); } msg_env = enif_alloc_env(); msg = enif_make_list(msg_env,0); for ( ; i>0 ; i--) { msg = enif_make_list_cell(msg_env, enif_make_int(msg_env, i), msg); } res = enif_send(env, &to, msg_env, msg); enif_free_env(msg_env); return enif_make_tuple2(env, atom_ok, enif_make_int(env,res)); }
static void * worker_run(void *arg) { worker_t *w = (worker_t *) arg; msg_t *msg; int continue_running = 1; ERL_NIF_TERM answer; w->alive = 1; while(continue_running) { msg = queue_pop(w->q); if(msg->type == msg_stop) { continue_running = 0; } else { answer = make_answer(msg, evaluate_msg(msg, w)); // printf("%d receive\n", w->id); enif_send(NULL, &(msg->pid), msg->env, answer); } if(msg->res!=NULL) { enif_release_resource(msg->res); } msg_destroy(msg); } w->alive = 0; return NULL; }
static void generate_keypair_from_seed(brine_task_s *task) { ErlNifEnv *env = task->env; ERL_NIF_TERM result; brine_keypair_s *keypair = (brine_keypair_s *) enif_alloc_resource(brine_keypair_resource, sizeof(brine_keypair_s)); ErlNifBinary seed; if (!keypair) { result = BRINE_ERROR_NO_MEMORY; } else { if (!enif_inspect_binary(env, task->options.generate.seed, &seed)) { result = BRINE_ATOM_ERROR; } else { if (!brine_init_keypair_from_seed(keypair, seed.data, seed.size)) { result = BRINE_ATOM_ERROR; } else { result = enif_make_tuple2(env, enif_make_copy(env, BRINE_ATOM_OK), make_keypair_record(env, keypair)); } } enif_release_resource(keypair); } enif_send(NULL, &task->owner, task->env, enif_make_tuple2(env, task->ref, result)); }
void send_result(vm_t & vm, std::string const& type, result_t const& result) { boost::shared_ptr<ErlNifEnv> env(enif_alloc_env(), enif_free_env); erlcpp::tuple_t packet(2); packet[0] = erlcpp::atom_t(type); packet[1] = result; enif_send(NULL, vm.erl_pid().ptr(), env.get(), erlcpp::to_erl(env.get(), packet)); }
void send_result_caller(vm_t & vm, std::string const& type, result_t const& result, erlcpp::lpid_t const& caller) { boost::shared_ptr<ErlNifEnv> env(enif_alloc_env(), enif_free_env); erlcpp::tuple_t packet(3); packet[0] = erlcpp::atom_t(type); packet[1] = result; packet[2] = caller; enif_send(NULL, caller.ptr(), env.get(), erlcpp::to_erl(env.get(), packet)); }
static ERL_NIF_TERM send_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifEnv* menv; ErlNifPid pid; int ret; if (!enif_get_local_pid(env, argv[0], &pid)) { return enif_make_badarg(env); } menv = enif_alloc_env(); ret = enif_send(env, &pid, menv, enif_make_copy(menv, argv[1])); enif_free_env(menv); return enif_make_int(env, ret); }
static int whereis_send_internal( ErlNifEnv* env, int type, whereis_term_data_t* to, ERL_NIF_TERM msg) { if (type == WHEREIS_LOOKUP_PID) return enif_send(env, & to->pid, NULL, msg) ? WHEREIS_SUCCESS : WHEREIS_ERROR_SEND; if (type == WHEREIS_LOOKUP_PORT) return enif_port_command(env, & to->port, NULL, msg) ? WHEREIS_SUCCESS : WHEREIS_ERROR_SEND; return WHEREIS_ERROR_TYPE; }
static ERL_NIF_TERM dirty_call_while_terminated_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifPid self; ERL_NIF_TERM result, self_term; ErlNifPid to; ErlNifEnv* menv; int res; if (!enif_get_local_pid(env, argv[0], &to)) return enif_make_badarg(env); if (!enif_self(env, &self)) return enif_make_badarg(env); self_term = enif_make_pid(env, &self); menv = enif_alloc_env(); result = enif_make_tuple2(menv, enif_make_atom(menv, "dirty_alive"), self_term); res = enif_send(env, &to, menv, result); enif_free_env(menv); if (!res) return enif_make_badarg(env); /* Wait until we have been killed */ while (enif_is_process_alive(env, &self)) ; result = enif_make_tuple2(env, enif_make_atom(env, "dirty_dead"), self_term); res = enif_send(env, &to, NULL, result); #ifdef __WIN32__ Sleep(1000); #else sleep(1); #endif return enif_make_atom(env, "ok"); }
static ERL_NIF_TERM send_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { union { void* vp; struct make_term_info* p; }mti; ErlNifPid to; ERL_NIF_TERM copy; int res; if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp) || !enif_get_local_pid(env, argv[1], &to)) { return enif_make_badarg(env); } copy = enif_make_copy(env, mti.p->blob); res = enif_send(env, &to, mti.p->dst_env, mti.p->blob); return enif_make_tuple3(env, atom_ok, enif_make_int(env,res), copy); }
static void * esqlite_connection_run(void *arg) { esqlite_connection *db = (esqlite_connection *) arg; esqlite_command *cmd; int continue_running = 1; while(continue_running) { cmd = queue_pop(db->commands); if(cmd->type == cmd_stop) { continue_running = 0; } else if(cmd->type == cmd_notification) { enif_send(NULL, &db->notification_pid, cmd->env, cmd->arg); } else { enif_send(NULL, &cmd->pid, cmd->env, make_answer(cmd, evaluate_command(cmd, db))); } command_destroy(cmd); } return NULL; }
void* threaded_sender(void *arg) { union { void* vp; struct make_term_info* p; }mti; mti.vp = arg; enif_mutex_lock(mti.p->mtx); while (!mti.p->send_it) { enif_cond_wait(mti.p->cond, mti.p->mtx); } mti.p->send_it = 0; enif_mutex_unlock(mti.p->mtx); mti.p->send_res = enif_send(NULL, &mti.p->to_pid, mti.p->dst_env, mti.p->blob); return NULL; }
static ERL_NIF_TERM ErlangCall(ErlNifEnv *env, ERL_NIF_TERM fun, ERL_NIF_TERM args) { ErlCall *erlCall = CreateCall(fun, args); enif_mutex_lock(erlCall->mutex); enif_send(env, &server, erlCall->env, erlCall->msg); while(!erlCall->complete) { enif_cond_wait(erlCall->cond, erlCall->mutex); } enif_mutex_unlock(erlCall->mutex); ERL_NIF_TERM result = enif_make_copy(env, erlCall->result); DestroyCall(erlCall); return result; }
static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM result; ErlNifPid pid; int res; if (!enif_get_local_pid(env, argv[0], &pid)) return enif_make_badarg(env); result = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_pid(env, &pid)); res = enif_send(env, &pid, NULL, result); if (!res) return enif_make_badarg(env); else return result; }
static void owner_death_callback(ErlNifEnv* env, void* obj, ErlNifPid* pid, ErlNifMonitor* mon) { efile_data_t *d = (efile_data_t*)obj; (void)env; (void)pid; (void)mon; for(;;) { enum efile_state_t previous_state; previous_state = erts_atomic32_cmpxchg_acqb(&d->state, EFILE_STATE_CLOSED, EFILE_STATE_IDLE); switch(previous_state) { case EFILE_STATE_IDLE: { /* We cannot close the file here as that could block a normal * scheduler, so we tell erts_prim_file to do it for us. * * This can in turn become a bottleneck (especially in cases * like NFS failure), but it's less problematic than blocking * thread progress. */ ERL_NIF_TERM message, file_ref; file_ref = enif_make_resource(env, d); message = enif_make_tuple2(env, am_close, file_ref); if(!enif_send(env, &erts_prim_file_pid, NULL, message)) { ERTS_INTERNAL_ERROR("Failed to defer prim_file close."); } return; } case EFILE_STATE_CLOSE_PENDING: case EFILE_STATE_CLOSED: /* We're either already closed or managed to mark ourselves for * closure in the previous iteration. */ return; case EFILE_STATE_BUSY: /* Schedule ourselves to be closed once the current operation * finishes, retrying the [IDLE -> CLOSED] transition in case we * narrowly passed the [BUSY -> IDLE] one. */ erts_atomic32_cmpxchg_nob(&d->state, EFILE_STATE_CLOSE_PENDING, EFILE_STATE_BUSY); break; } } }
void nif_write(couchfile_modify_request *rq, couchfile_pointer_info *dst, nif_writerq* wrq, ssize_t size) { dst->writerq_resource = wrq; dst->pointer = 0; wrq->ptr = dst; ErlNifEnv* msg_env = enif_alloc_env(); ERL_NIF_TERM msg_term = enif_make_tuple4(msg_env, get_atom(msg_env, "append_bin_btnif"), get_atom(msg_env, "snappy"), //COMPRESSION TYPE enif_make_resource(msg_env, wrq), enif_make_resource_binary(msg_env, wrq, &wrq->buf, size)); enif_send(rq->caller_env, &rq->writer, msg_env, msg_term); enif_free_env(msg_env); enif_release_resource(wrq); }
static ERL_NIF_TERM elibart_prefix_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { art_tree* t; ErlNifBinary key; callback_data cb_data; // extract arguments atr_tree, key if (argc != 4) return enif_make_badarg(env); if(!enif_get_resource(env, argv[0], elibart_RESOURCE, (void**) &t)) return enif_make_badarg(env); if (!enif_inspect_binary(env, argv[1], &key)) return enif_make_badarg(env); cb_data.env = env; if(!enif_is_pid(env, argv[3])) return mk_error(env, "not_a_pid"); if(!enif_get_local_pid(env, argv[3], &cb_data.pid)) return mk_error(env, "not_a_local_pid"); cb_data.caller_ref = argv[2]; // TODO this should be a worker thread since it's a long opearation (?) if (art_iter_prefix(t, key.data, key.size, prefix_cb, &cb_data)) return mk_error(env, "art_prefix_search"); ErlNifEnv *msg_env = enif_alloc_env(); if(msg_env == NULL) return mk_error(env, "env_alloc_error");; ERL_NIF_TERM caller_ref = enif_make_copy(msg_env, argv[2]); ERL_NIF_TERM res = enif_make_tuple2(msg_env, caller_ref, mk_atom(msg_env, "ok")); if (!enif_send(env, &cb_data.pid, msg_env, res)) { enif_free(msg_env); return mk_error(env, "art_prefix_search"); } enif_free(msg_env); return mk_atom(env, "ok"); }
static ERL_NIF_TERM send_new_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifPid to; ERL_NIF_TERM msg, copy; ErlNifEnv* msg_env; int res; if (!enif_get_local_pid(env, argv[0], &to)) { return enif_make_badarg(env); } msg_env = enif_alloc_env(); msg = make_blob(env,msg_env, argv[1]); copy = make_blob(env,env, argv[1]); res = enif_send(env, &to, msg_env, msg); enif_free_env(msg_env); return enif_make_tuple3(env, atom_ok, enif_make_int(env,res), copy); }
static void salt_reply_error(struct salt_msg *sm, const char *why) { nif_heap_t *hp = sm->msg_heap; nif_term_t tag; nif_term_t rsn; nif_term_t res; nif_term_t msg; /* From_pid ! {Mref, {error, Rsn}} */ tag = enif_make_atom(hp, "error"); rsn = enif_make_atom(hp, why); res = enif_make_tuple2(hp, tag, rsn); msg = enif_make_tuple2(hp, sm->msg_mref, res); (void)enif_send(NULL, &sm->msg_from, hp, msg); }
void nif_thread_handle(ErlNifEnv* env, nif_thread_state* st, nif_thread_message* msg) { if (msg->from_pid == NULL) { void (*cast)(nif_thread_arg*) = msg->function; cast(msg->args); } else { ERL_NIF_TERM (*call)(ErlNifEnv*, nif_thread_arg*) = msg->function; ERL_NIF_TERM ret = call(env, msg->args); enif_send(NULL, msg->from_pid, env, enif_make_tuple2(env, atom__nif_thread_ret_, ret)); enif_clear_env(env); } nif_thread_message_free(msg); }
static void keypair_to_binary(brine_task_s *task) { ErlNifEnv *env = task->env; brine_keypair_s *keypair = task->options.signature.keys; ErlNifBinary blob; ERL_NIF_TERM result; if (!enif_alloc_binary(BRINE_BLOB_SZ, &blob)) { result = BRINE_ERROR_NO_MEMORY; } else { brine_serialize_keypair(keypair, blob.data, blob.size); result = enif_make_tuple2(env, enif_make_copy(env, BRINE_ATOM_OK), enif_make_binary(env, &blob)); enif_release_binary(&blob); } enif_release_resource((void *) keypair); enif_send(NULL, &task->owner, task->env, enif_make_tuple2(env, task->ref, result)); }
static void salt_reply_bytes(struct salt_msg *sm, nif_bin_t *bs) { nif_heap_t *hp = sm->msg_heap; nif_term_t tag; nif_term_t res; nif_term_t msg; nif_term_t bb; /* From_pid ! {Mref, {ok, Bytes}} */ bb = enif_make_binary(hp, bs); tag = enif_make_atom(hp, "ok"); res = enif_make_tuple2(hp, tag, bb); msg = enif_make_tuple2(hp, sm->msg_mref, res); (void)enif_send(NULL, &sm->msg_from, hp, msg); }
static void* thr_main(void* obj) { state_t* state = (state_t*) obj; ErlNifEnv* env = enif_alloc_env(); ErlNifPid* pid; ERL_NIF_TERM msg; while((pid = queue_pop(state->queue)) != NULL) { msg = enif_make_int64(env, random()); enif_send(NULL, pid, env, msg); enif_free(pid); enif_clear_env(env); } return NULL; }
static void* worker(void *obj) { handle_t* handle = (handle_t*)obj; task_t* task; ErlNifEnv* env = enif_alloc_env(); while ((task = (task_t*)queue_get(handle->queue)) != NULL) { ERL_NIF_TERM result = handle->calltable[task->cmd](env, handle, task->args); enif_send(NULL, task->pid, env, result); enif_free(task->pid); enif_free(task->args); enif_free(task); enif_clear_env(env); } return NULL; }