qitem* queue_get_item(queue *queue) { qitem *entry = NULL; int i; while (enif_mutex_trylock(queue->lock) != 0) { } if (queue->reuseq != NULL) { entry = queue->reuseq; queue->reuseq = queue->reuseq->next; entry->next = NULL; } else { entry = enif_alloc(sizeof(qitem)*BLOCK_SIZE); memset(entry,0,sizeof(qitem)*BLOCK_SIZE); for (i = 1; i < BLOCK_SIZE; i++) { entry[i].env = enif_alloc_env(); entry[i].next = queue->reuseq; queue->reuseq = &entry[i]; } entry->env = enif_alloc_env(); entry->blockStart = 1; } enif_mutex_unlock(queue->lock); return entry; }
static struct zdoor_result * zdoor_cb(struct zdoor_cookie *cookie, char *argp, size_t argp_sz) { struct door *d; struct req *r; ErlNifEnv *env = enif_alloc_env(); /* we kept the struct door in the biscuit */ d = (struct door *)cookie->zdc_biscuit; /* this request */ r = req_alloc(); /* take the rlist lock first, then the req lock */ enif_rwlock_rwlock(d->rlock); enif_mutex_lock(r->lock); req_insert(d, r); enif_rwlock_rwunlock(d->rlock); /* make the request into a binary term to put it into enif_send() */ ErlNifBinary bin; enif_alloc_binary(argp_sz, &bin); memcpy(bin.data, argp, argp_sz); ERL_NIF_TERM binTerm = enif_make_binary(env, &bin); /* send a message back to the session owner */ enif_send(NULL, &d->owner, env, enif_make_tuple3(env, enif_make_atom(env, "zdoor"), enif_make_resource(env, r), binTerm)); /* now wait until the request has been replied to */ enif_cond_wait(r->cond, r->lock); /* convert the reply into a zdoor_result */ /* we have to use naked malloc() since libzdoor will use free() */ struct zdoor_result *res = malloc(sizeof(struct zdoor_result)); res->zdr_size = r->replen; res->zdr_data = r->rep; r->rep = NULL; r->replen = 0; /* yes, we have to unlock and re-lock to avoid lock inversion here */ enif_mutex_unlock(r->lock); /* remove and free the struct req */ enif_rwlock_rwlock(d->rlock); enif_mutex_lock(r->lock); req_remove(d, r); enif_rwlock_rwunlock(d->rlock); req_free(r); enif_free_env(env); return res; }
static int prefix_cb(void *data, const unsigned char *k, uint32_t k_len, void *val) { callback_data *cb_data = data; art_elem_struct *elem = val; ErlNifBinary key, value; enif_alloc_binary(k_len - 1, &key); memcpy(key.data, k, k_len - 1); enif_alloc_binary(elem->size, &value); memcpy(value.data, elem->data, elem->size); ErlNifEnv *msg_env = enif_alloc_env(); if(msg_env == NULL) return mk_error(cb_data->env, "env_alloc_error");; ERL_NIF_TERM caller_ref = enif_make_copy(msg_env, cb_data->caller_ref); ERL_NIF_TERM res = enif_make_tuple2(msg_env, caller_ref, enif_make_tuple2(msg_env, enif_make_binary(msg_env, &key), enif_make_binary(msg_env, &value))); if(!enif_send(cb_data->env, &cb_data->pid, msg_env, res)) { enif_free(msg_env); return -1; } enif_free(msg_env); return 0; }
/* OGRGeometryH OGR_F_GetGeometryRef(OGRFeatureH hFeat) {ok, DataSource} = lgeo_ogr:open("test/polygon.shp"), {ok, Layer} = lgeo_ogr:ds_get_layer(DataSource, 0), {ok, Feature} = lgeo_ogr:l_get_feature(Layer, 0), {ok, Geometry} = lgeo_ogr:f_get_geometry_ref(Feature). */ static ERL_NIF_TERM f_get_geometry_ref(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { EnvFeature_t **feature; ERL_NIF_TERM eterm; if (argc != 1) { return enif_make_badarg(env); } if(!enif_get_resource(env, argv[0], OGR_F_RESOURCE, (void**)&feature)) { return enif_make_badarg(env); } OGRGeometryH geom = OGR_F_GetGeometryRef((**feature).obj); if(geom == NULL) { return enif_make_atom(env, "undefined"); } EnvGeometry_t **geometry = \ enif_alloc_resource(OGR_G_RESOURCE, sizeof(EnvGeometry_t*)); ErlNifEnv *geometry_env = enif_alloc_env(); *geometry = (EnvGeometry_t*) enif_alloc(sizeof(EnvGeometry_t)); (**geometry).env = geometry_env; (**geometry).obj = geom; // Save copy of feature so is not garbage collected enif_make_copy(geometry_env, argv[0]); eterm = enif_make_resource(env, geometry); enif_release_resource(geometry); return enif_make_tuple2(env, enif_make_atom(env, "ok"), eterm); }
/* OGRFeatureDefnH OGR_L_GetLayerDefn(OGRLayerH hLayer) {ok, DataSource} = lgeo_ogr:open("test/polygon.shp"), {ok, Layer} = lgeo_ogr:ds_get_layer(DataSource, 0), {ok, FeatureDefn} = lgeo_ogr:l_get_layer_defn(Layer). */ static ERL_NIF_TERM l_get_layer_defn(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { EnvLayer_t **layer; ERL_NIF_TERM eterm; if (argc != 1) { return enif_make_badarg(env); } if(!enif_get_resource(env, argv[0], OGR_L_RESOURCE, (void**)&layer)) { return enif_make_badarg(env); } OGRFeatureDefnH feat_defn = OGR_L_GetLayerDefn((**layer).obj); EnvFeatureDefn_t **feature_defn = \ enif_alloc_resource(OGR_FD_RESOURCE, sizeof(EnvFeatureDefn_t*)); ErlNifEnv *feature_defn_env = enif_alloc_env(); *feature_defn = (EnvFeatureDefn_t*) enif_alloc(sizeof(EnvFeatureDefn_t)); (**feature_defn).env = feature_defn_env; (**feature_defn).obj = feat_defn; // Save copy of layer so is not garbage collected enif_make_copy(feature_defn_env, argv[0]); eterm = enif_make_resource(env, feature_defn); enif_release_resource(feature_defn); return enif_make_tuple2(env, enif_make_atom(env, "ok"), eterm); }
static void* _reading_thread (void* arg) { CAN_handle* handle = arg; ErlNifEnv* env = enif_alloc_env(); //ERL_NIF_TERM device = enif_make_int(env, handle->device); handle->threaded = 1; while (handle->threaded) { int status; ERL_NIF_TERM msg = _receive_can_messages(env, handle, handle->chunk_size, handle->timeout); if (!enif_get_int(env, msg, &status)) { enif_send(env, &handle->receiver, env, enif_make_tuple3(env, can_atom, handle->devpath_bin, msg)); enif_clear_env(env); } else if (status == 0) { enif_clear_env(env); } else break; } enif_free_env(env); return 0; }
task_t* init_task(task_type_t type, ERL_NIF_TERM ref, ErlNifPid pid, ERL_NIF_TERM orig_term) { ERL_NIF_TERM term; task_t *task; task = init_empty_task(type); task->pid = pid; task->env = enif_alloc_env(); if (task->env == NULL) { cleanup_task(&task); goto done; } term = enif_make_copy(task->env, orig_term); if (!enif_inspect_binary(task->env, term, &task->data)) { cleanup_task(&task); goto done; } task->ref = enif_make_copy(task->env, ref); done: return task; }
/** * Alloc atoms */ int i18n_atom_load(ErlNifEnv* /*env*/, void ** /*priv_data*/, ERL_NIF_TERM /*load_info*/) { global_atom_env = enif_alloc_env(); ATOM_TRUE = enif_make_atom(global_atom_env, "true"); ATOM_FALSE = enif_make_atom(global_atom_env, "false"); ATOM_EQUAL = enif_make_atom(global_atom_env, "equal"); ATOM_GREATER = enif_make_atom(global_atom_env, "greater"); ATOM_LESS = enif_make_atom(global_atom_env, "less"); ATOM_OK = enif_make_atom(global_atom_env, "ok"); ATOM_COUNT = enif_make_atom(global_atom_env, "count"); ATOM_RESOURCE = enif_make_atom(global_atom_env, "resource"); ATOM_SEARCH = enif_make_atom(global_atom_env, "search"); #if U_IS_BIG_ENDIAN ATOM_ENDIAN = enif_make_atom(global_atom_env, "big"); #else ATOM_ENDIAN = enif_make_atom(global_atom_env, "little"); #endif ATOM_ICU_VERSION = enif_make_atom(global_atom_env, U_ICU_VERSION); ATOM_UNICODE_VERSION = enif_make_atom(global_atom_env, U_UNICODE_VERSION); return 0; }
static ERL_NIF_TERM send_list_seq(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifPid to; ERL_NIF_TERM msg; ErlNifEnv* msg_env; int i, res; if (!enif_get_int(env, argv[0], &i)) { return enif_make_badarg(env); } if (argv[1] == atom_self) { enif_self(env, &to); } else if (!enif_get_local_pid(env, argv[1], &to)) { return enif_make_badarg(env); } msg_env = enif_alloc_env(); msg = enif_make_list(msg_env,0); for ( ; i>0 ; i--) { msg = enif_make_list_cell(msg_env, enif_make_int(msg_env, i), msg); } res = enif_send(env, &to, msg_env, msg); enif_free_env(msg_env); return enif_make_tuple2(env, atom_ok, enif_make_int(env,res)); }
void * ewpcap_loop(void *arg) { EWPCAP_STATE *ep = arg; int rv = 0; ep->env = enif_alloc_env(); if (ep->env == NULL) goto ERR; rv = pcap_loop(ep->p, -1 /* loop forever */, ewpcap_send, (u_char *)ep); switch (rv) { case -2: /* break requested using pcap_breakloop */ break; case -1: /* pcap_loop error: the pcap handle may not be valid at this point, so we do not return an error */ break; default: break; } ERR: /* env is freed in resource cleanup */ return NULL; }
void send_result(vm_t & vm, std::string const& type, result_t const& result) { boost::shared_ptr<ErlNifEnv> env(enif_alloc_env(), enif_free_env); erlcpp::tuple_t packet(2); packet[0] = erlcpp::atom_t(type); packet[1] = result; enif_send(NULL, vm.erl_pid().ptr(), env.get(), erlcpp::to_erl(env.get(), packet)); }
void send_result_caller(vm_t & vm, std::string const& type, result_t const& result, erlcpp::lpid_t const& caller) { boost::shared_ptr<ErlNifEnv> env(enif_alloc_env(), enif_free_env); erlcpp::tuple_t packet(3); packet[0] = erlcpp::atom_t(type); packet[1] = result; packet[2] = caller; enif_send(NULL, caller.ptr(), env.get(), erlcpp::to_erl(env.get(), packet)); }
// Called only once per thread. static void populate(intq *q) { int i; qitem *entry = calloc(1,sizeof(qitem)*BLOCK_SIZE); #ifndef _TESTAPP_ entry[0].env = enif_alloc_env(); #endif entry[0].blockStart = 1; entry[0].home = q; qpush(q, &entry[0]); for (i = 1; i < BLOCK_SIZE; i++) { #ifndef _TESTAPP_ entry[i].env = enif_alloc_env(); #endif entry[i].home = q; qpush(q, &entry[i]); } tls_qsize += BLOCK_SIZE; }
static ERL_NIF_TERM robin_q_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { robin_q_handle* handle = (robin_q_handle*)enif_alloc_resource(robin_q_RESOURCE, sizeof(robin_q_handle)); handle->env = enif_alloc_env(); handle->lock = enif_rwlock_create("robin_q"); do_set(handle, argv[0]); ERL_NIF_TERM result = enif_make_resource(env, handle); enif_release_resource(handle); return enif_make_tuple2(env, enif_make_atom(env, "ok"), result); }
static ERL_NIF_TERM send_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifEnv* menv; ErlNifPid pid; int ret; if (!enif_get_local_pid(env, argv[0], &pid)) { return enif_make_badarg(env); } menv = enif_alloc_env(); ret = enif_send(env, &pid, menv, enif_make_copy(menv, argv[1])); enif_free_env(menv); return enif_make_int(env, ret); }
void* nif_main_thread(void* obj) { ErlNifEnv* env = enif_alloc_env(); nif_thread_state* st = (nif_thread_state*)obj; nif_thread_message* msg; while (nif_thread_receive(st, &msg)) nif_thread_handle(env, st, msg); enif_free_env(env); return NULL; }
static ERL_NIF_TERM return_so_far(ErlNifEnv *env, ecsv_parser_t *parser) { if (parser->err) return enif_raise_exception(env, atoms.insufficient_memory); if (env != parser->env) { // it should never happen return enif_raise_exception(env, enif_make_atom(env, "env_mismatch")); } copy_current_line(enif_alloc_env(), parser); return parser->lines; }
static nif_term_t salt_enqueue_req(nif_heap_t *hp, struct salt_pcb *sc, nif_pid_t pid, nif_term_t ref, uint_t type, uint_t aux) { struct salt_msg *sm; const char *err; /* Prepare async request for worker thread. */ sm = enif_alloc(sizeof(*sm)); if (sm == NULL) return (BADARG); sm->msg_heap = enif_alloc_env(); assert(sm->msg_heap != NULL); sm->msg_next = NULL; sm->msg_from = pid; /* struct copy */ sm->msg_mref = enif_make_copy(sm->msg_heap, ref); sm->msg_type = type; sm->msg_aux = aux; /* Enqueue request checking for failure scenarios. */ enif_mutex_lock(sc->sc_lock); if (sc->sc_req_npend >= 128) { err = "congested"; goto fail; } if (sc->sc_exit_flag) { /* XXX This should not even be possible, no? */ err = "exiting"; goto fail; } *sc->sc_req_lastp = sm; sc->sc_req_lastp = &sm->msg_next; sc->sc_req_npend += 1; enif_cond_signal(sc->sc_cond); enif_mutex_unlock(sc->sc_lock); return (enif_make_atom(hp, "enqueued")); /* Failure treatment. */ fail: enif_mutex_unlock(sc->sc_lock); enif_free_env(sm->msg_heap); enif_free(sm); return (enif_make_atom(hp, err)); }
void nif_write(couchfile_modify_request *rq, couchfile_pointer_info *dst, nif_writerq* wrq, ssize_t size) { dst->writerq_resource = wrq; dst->pointer = 0; wrq->ptr = dst; ErlNifEnv* msg_env = enif_alloc_env(); ERL_NIF_TERM msg_term = enif_make_tuple4(msg_env, get_atom(msg_env, "append_bin_btnif"), get_atom(msg_env, "snappy"), //COMPRESSION TYPE enif_make_resource(msg_env, wrq), enif_make_resource_binary(msg_env, wrq, &wrq->buf, size)); enif_send(rq->caller_env, &rq->writer, msg_env, msg_term); enif_free_env(msg_env); enif_release_resource(wrq); }
static int load_cb(ErlNifEnv *env, void **priv_data, ERL_NIF_TERM load_info) { ErlNifResourceFlags tried; gbl = enif_alloc(sizeof(*gbl)); memset(gbl, 0, sizeof(*gbl)); RB_INIT(&(gbl->atom_head)); gbl->atom_lock = enif_rwlock_create("gbl->atom_lock"); gbl->atom_env = enif_alloc_env(); value_type = enif_open_resource_type(env, NULL, "value", NULL, ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER, &tried); return 0; }
static ERL_NIF_TERM elibart_prefix_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { art_tree* t; ErlNifBinary key; callback_data cb_data; // extract arguments atr_tree, key if (argc != 4) return enif_make_badarg(env); if(!enif_get_resource(env, argv[0], elibart_RESOURCE, (void**) &t)) return enif_make_badarg(env); if (!enif_inspect_binary(env, argv[1], &key)) return enif_make_badarg(env); cb_data.env = env; if(!enif_is_pid(env, argv[3])) return mk_error(env, "not_a_pid"); if(!enif_get_local_pid(env, argv[3], &cb_data.pid)) return mk_error(env, "not_a_local_pid"); cb_data.caller_ref = argv[2]; // TODO this should be a worker thread since it's a long opearation (?) if (art_iter_prefix(t, key.data, key.size, prefix_cb, &cb_data)) return mk_error(env, "art_prefix_search"); ErlNifEnv *msg_env = enif_alloc_env(); if(msg_env == NULL) return mk_error(env, "env_alloc_error");; ERL_NIF_TERM caller_ref = enif_make_copy(msg_env, argv[2]); ERL_NIF_TERM res = enif_make_tuple2(msg_env, caller_ref, mk_atom(msg_env, "ok")); if (!enif_send(env, &cb_data.pid, msg_env, res)) { enif_free(msg_env); return mk_error(env, "art_prefix_search"); } enif_free(msg_env); return mk_atom(env, "ok"); }
static ERL_NIF_TERM send_new_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifPid to; ERL_NIF_TERM msg, copy; ErlNifEnv* msg_env; int res; if (!enif_get_local_pid(env, argv[0], &to)) { return enif_make_badarg(env); } msg_env = enif_alloc_env(); msg = make_blob(env,msg_env, argv[1]); copy = make_blob(env,env, argv[1]); res = enif_send(env, &to, msg_env, msg); enif_free_env(msg_env); return enif_make_tuple3(env, atom_ok, enif_make_int(env,res), copy); }
static void push_nif_ref(lua_State* lua, ERL_NIF_TERM message, ErlNifEnv* env) { (void)env; // unused const int top = lua_gettop(lua); luaL_checkstack(lua, 2, ERROR_STACK_MESSAGE); erlref_ptr erlref = (erlref_ptr) lua_newuserdata( lua, sizeof(struct erlref) ); memset(erlref, 0, sizeof(struct erlref)); erlref->env = enif_alloc_env(); erlref->reference = enif_make_copy( erlref->env, message); luaL_getmetatable(lua, TYPE_ERL_REF); lua_setmetatable(lua, -2); assert(lua_gettop(lua) == top+1); }
static void* worker(void *obj) { handle_t* handle = (handle_t*)obj; task_t* task; ErlNifEnv* env = enif_alloc_env(); while ((task = (task_t*)queue_get(handle->queue)) != NULL) { ERL_NIF_TERM result = handle->calltable[task->cmd](env, handle, task->args); enif_send(NULL, task->pid, env, result); enif_free(task->pid); enif_free(task->args); enif_free(task); enif_clear_env(env); } return NULL; }
static void* thr_main(void* obj) { state_t* state = (state_t*) obj; ErlNifEnv* env = enif_alloc_env(); ErlNifPid* pid; ERL_NIF_TERM msg; while((pid = queue_pop(state->queue)) != NULL) { msg = enif_make_int64(env, random()); enif_send(NULL, pid, env, msg); enif_free(pid); enif_clear_env(env); } return NULL; }
int i18n_string_load(ErlNifEnv *env, void ** /*priv_data*/, ERL_NIF_TERM /*load_info*/) { ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER); iterator_type = enif_open_resource_type(env, NULL, "iterator_type", iterator_dtor, flags, NULL); if (iterator_type == NULL) return 1; global_string_env = enif_alloc_env(); available_locales = generate_available(global_string_env, ubrk_getAvailable, ubrk_countAvailable()); return 0; }
static JSBool jserl_send(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) { vm_ptr vm = (vm_ptr) JS_GetContextPrivate(cx); ErlNifEnv* env; job_ptr job; ENTERM mesg; if(argc < 0) { return JS_FALSE; } assert(vm != NULL && "Context has no vm."); env = enif_alloc_env(); mesg = vm_mk_message(env, to_erl(env, cx, argv[0])); // If pid is not alive, raise an error. // XXX: Can I make this uncatchable? if(!enif_send(NULL, &(vm->curr_job->pid), env, mesg)) { JS_ReportError(cx, "Context closing."); return JS_FALSE; } job = queue_receive(vm->jobs); if(job->type == job_close) { // XXX: Can I make this uncatchable? job_destroy(job); JS_ReportError(cx, "Context closing."); return JS_FALSE; } assert(job->type == job_response && "Invalid message response."); *rval = to_js(job->env, cx, job->args); job_destroy(job); return JS_TRUE; }
static esqlite_command * command_create() { esqlite_command *cmd = (esqlite_command *) enif_alloc(sizeof(esqlite_command)); if(cmd == NULL) return NULL; cmd->env = enif_alloc_env(); if(cmd->env == NULL) { command_destroy(cmd); return NULL; } cmd->type = cmd_unknown; cmd->ref = 0; cmd->arg = 0; cmd->stmt = NULL; return cmd; }
static ErlCall *CreateCall(ERL_NIF_TERM fun, ERL_NIF_TERM args) { enif_mutex_lock(callsMutex); ErlCall *erlCall = (ErlCall *)malloc(sizeof(ErlCall)); erlCall->id = id++; erlCall->env = enif_alloc_env(); ERL_NIF_TERM msgFun = enif_make_copy(erlCall->env, fun); ERL_NIF_TERM msgArgs = enif_make_copy(erlCall->env, args); ERL_NIF_TERM msgId = enif_make_int(erlCall->env, erlCall->id); erlCall->msg = enif_make_tuple3(erlCall->env, msgId, msgFun, msgArgs); erlCall->cond = enif_cond_create("erlcl_cond"); erlCall->mutex = enif_mutex_create("erlcl_mutex"); erlCall->complete = 0; HASH_ADD_INT(calls, id, erlCall); enif_mutex_unlock(callsMutex); return erlCall; }
/* extern GEOSSTRtree GEOS_DLL *GEOSSTRtree_create(size_t nodeCapacity); GeosSTRtree = lgeo_geos_index:strtree_create(). <<>> */ static ERL_NIF_TERM strtree_create(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM eterm; GeosSTRtree_t **tree = \ enif_alloc_resource(GEOSSTRTREE_RESOURCE, sizeof(GeosSTRtree_t*)); size_t nodeCapacity = 10; GEOSSTRtree *rtree = GEOSSTRtree_create(nodeCapacity); ErlNifEnv *tree_env = enif_alloc_env(); *tree = (GeosSTRtree_t*) enif_alloc(sizeof(GeosSTRtree_t)); (*tree)->env = tree_env; (*tree)->tree = rtree; eterm = enif_make_resource(env, tree); enif_release_resource(tree); return eterm; }