static struct cache * new_cache(ERL_NIF_TERM atom, int max_size, int min_q1_size) { struct cache *c; struct atom_node *an; int i; c = enif_alloc(sizeof(*c)); memset(c, 0, sizeof(*c)); c->max_size = max_size; c->min_q1_size = min_q1_size; c->lookup_lock = enif_rwlock_create("cache->lookup_lock"); c->cache_lock = enif_rwlock_create("cache->cache_lock"); c->ctrl_lock = enif_mutex_create("cache->ctrl_lock"); c->check_cond = enif_cond_create("cache->check_cond"); TAILQ_INIT(&(c->q1.head)); TAILQ_INIT(&(c->q2.head)); for (i = 0; i < N_INCR_BKT; ++i) { TAILQ_INIT(&(c->incr_head[i])); c->incr_lock[i] = enif_mutex_create("cache->incr_lock"); } RB_INIT(&(c->expiry_head)); an = enif_alloc(sizeof(*an)); memset(an, 0, sizeof(*an)); an->atom = enif_make_copy(gbl->atom_env, atom); an->cache = c; c->atom_node = an; enif_rwlock_rwlock(gbl->atom_lock); RB_INSERT(atom_tree, &(gbl->atom_head), an); /* start the background thread for the cache. after this, the bg thread now owns the cache and all its data and will free it at exit */ enif_thread_create("cachethread", &(c->bg_thread), cache_bg_thread, c, NULL); enif_rwlock_rwunlock(gbl->atom_lock); return c; }
static ERL_NIF_TERM robin_q_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { robin_q_handle* handle = (robin_q_handle*)enif_alloc_resource(robin_q_RESOURCE, sizeof(robin_q_handle)); handle->env = enif_alloc_env(); handle->lock = enif_rwlock_create("robin_q"); do_set(handle, argv[0]); ERL_NIF_TERM result = enif_make_resource(env, handle); enif_release_resource(handle); return enif_make_tuple2(env, enif_make_atom(env, "ok"), result); }
static int load_cb(ErlNifEnv *env, void **priv_data, ERL_NIF_TERM load_info) { ErlNifResourceFlags tried; gbl = enif_alloc(sizeof(*gbl)); memset(gbl, 0, sizeof(*gbl)); RB_INIT(&(gbl->atom_head)); gbl->atom_lock = enif_rwlock_create("gbl->atom_lock"); gbl->atom_env = enif_alloc_env(); value_type = enif_open_resource_type(env, NULL, "value", NULL, ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER, &tried); return 0; }
static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) { ErlNifResourceFlags flags = (ErlNifResourceFlags)(ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER); ErlNifResourceType* rt = enif_open_resource_type(env, NULL, "cqueue_resource", &cqueue_resource_cleanup, flags, NULL); if (rt == NULL) return -1; cqueue_RESOURCE = rt; ErlNifSysInfo sys_info; enif_system_info(&sys_info, sizeof(ErlNifSysInfo)); schedulers = sys_info.scheduler_threads; scheduler_ids = new ErlNifTid[schedulers]; for (unsigned int i = 0; i < schedulers; ++i) { scheduler_ids[i] = NULL; } lookup_lock = enif_rwlock_create("cqueue_lookup_lock"); return 0; }
DLLEXPORT struct crypto_callbacks* get_crypto_callbacks(int nlocks) { static int is_initialized = 0; static struct crypto_callbacks the_struct = { sizeof(struct crypto_callbacks), &crypto_alloc, &crypto_realloc, &crypto_free, #ifdef OPENSSL_THREADS &locking_function, &id_function, &dyn_create_function, &dyn_lock_function, &dyn_destroy_function #endif /* OPENSSL_THREADS */ }; if (!is_initialized) { #ifdef OPENSSL_THREADS if (nlocks > 0) { int i; lock_vec = enif_alloc(nlocks*sizeof(*lock_vec)); if (lock_vec==NULL) return NULL; memset(lock_vec, 0, nlocks*sizeof(*lock_vec)); for (i=nlocks-1; i>=0; --i) { lock_vec[i] = enif_rwlock_create("crypto_stat"); if (lock_vec[i]==NULL) return NULL; } } #endif is_initialized = 1; } return &the_struct; }
/* Dynamic locking, not used by current openssl version (0.9.8) */ static struct CRYPTO_dynlock_value* dyn_create_function(const char *file, int line) { return (struct CRYPTO_dynlock_value*) enif_rwlock_create("crypto_dyn"); }
static ERL_NIF_TERM emmap_open(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { int flags; int prot; bool direct, lock; unsigned long int len; unsigned long int offset; char buf[1024]; #ifndef NDEBUG if ( sizeof(long int) != sizeof(size_t) ) { abort(); } #endif if (argc == 4 && enif_get_string(env, argv[0], buf, 1024, ERL_NIF_LATIN1) && enif_get_ulong(env, argv[1], &offset) && enif_get_ulong(env, argv[2], &len) && decode_flags(env, argv[3], &prot, &flags, &direct, &lock)) { int mode = (((prot & PROT_WRITE)==PROT_WRITE) ? O_RDWR : O_RDONLY); int fd = open(buf, mode); if (fd < 0) { return make_error_tuple(env, errno); } void * res = mmap(0, (size_t) len, prot, flags, fd, (size_t) offset); if (res == MAP_FAILED) { return make_error_tuple(env, errno); } close(fd); mhandle* handle = (mhandle*)enif_alloc_resource_compat(env, MMAP_RESOURCE, sizeof(mhandle)); if (lock) handle->rwlock = enif_rwlock_create((char*)"mmap"); else handle->rwlock = 0; handle->prot = prot; handle->mem = res; handle->len = len; handle->closed = false; handle->direct = direct; handle->position = 0; ERL_NIF_TERM resource = enif_make_resource(env, handle); enif_release_resource_compat(env, handle); return enif_make_tuple2(env, enif_make_atom(env, "ok"), resource); } else { return enif_make_badarg(env); } }