static void salt_pcb_free(nif_heap_t *hp, void *obj) { struct salt_pcb *sc = obj; struct salt_msg *sm; struct salt_msg *tmp; /* Signal termination request, join worker thread, release all resources. */ enif_mutex_lock(sc->sc_lock); sc->sc_exit_flag = true; enif_cond_signal(sc->sc_cond); enif_mutex_unlock(sc->sc_lock); (void)enif_thread_join(sc->sc_thread, NULL); sm = sc->sc_req_first; loop: if (sm == NULL) goto done; tmp = sm->msg_next; enif_free_env(sm->msg_heap); enif_free(sm); sm = tmp; goto loop; done: enif_mutex_destroy(sc->sc_lock); enif_cond_destroy(sc->sc_cond); /* Done, PCB itself released by ERTS. */ return ; }
static void unload_cb(ErlNifEnv *env, void *priv_data) { struct atom_node *an; enif_rwlock_rwlock(gbl->atom_lock); /* when we unload, we want to tell all of the active caches to die, then join() their bg_threads to wait until they're completely gone */ while ((an = RB_MIN(atom_tree, &(gbl->atom_head)))) { struct cache *c = an->cache; enif_rwlock_rwunlock(gbl->atom_lock); enif_mutex_lock(c->ctrl_lock); c->flags |= FL_DYING; enif_mutex_unlock(c->ctrl_lock); enif_cond_broadcast(c->check_cond); enif_thread_join(c->bg_thread, NULL); enif_rwlock_rwlock(gbl->atom_lock); } enif_rwlock_rwunlock(gbl->atom_lock); enif_rwlock_destroy(gbl->atom_lock); enif_clear_env(gbl->atom_env); enif_free(gbl); gbl = NULL; }
static void destruct_esqlite_connection(ErlNifEnv *env, void *arg) { esqlite_connection *db = (esqlite_connection *) arg; esqlite_command *cmd = command_create(); /* Send the stop command */ cmd->type = cmd_stop; queue_push(db->commands, cmd); queue_send(db->commands, cmd); /* Wait for the thread to finish */ enif_thread_join(db->tid, NULL); enif_thread_opts_destroy(db->opts); /* The thread has finished... now remove the command queue, and close * the datbase (if it was still open). */ queue_destroy(db->commands); if(db->db) sqlite3_close(db->db); }
static ERL_NIF_TERM send_blob_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { union { void* vp; struct make_term_info* p; }mti; ERL_NIF_TERM copy; if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp) || !enif_get_local_pid(env,argv[1], &mti.p->to_pid)) { return enif_make_badarg(env); } copy = enif_make_copy(env, mti.p->blob); mti.p->send_it = enif_is_identical(argv[2],atom_join); if (enif_thread_create("nif_SUITE:send_from_thread", &mti.p->tid, threaded_sender, mti.p, NULL) != 0) { return enif_make_badarg(env); } if (enif_is_identical(argv[2],atom_join)) { int err = enif_thread_join(mti.p->tid, NULL); assert(err == 0); return enif_make_tuple3(env, atom_ok, enif_make_int(env, mti.p->send_res), copy); } else { enif_keep_resource(mti.vp); return enif_make_tuple2(env, atom_ok, copy); } }
/* destroy(Cache :: atom()) -- destroys and entire cache destroy(Cache :: atom(), Key :: binary()) -- removes an entry from a cache */ static ERL_NIF_TERM destroy(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM atom; struct cache *c; ErlNifBinary kbin; struct cache_node *n; if (!enif_is_atom(env, argv[0])) return enif_make_badarg(env); atom = argv[0]; if ((c = get_cache(atom))) { if (argc == 2) { if (!enif_inspect_binary(env, argv[1], &kbin)) return enif_make_badarg(env); enif_rwlock_rwlock(c->cache_lock); enif_rwlock_rwlock(c->lookup_lock); HASH_FIND(hh, c->lookup, kbin.data, kbin.size, n); if (!n) { enif_rwlock_rwunlock(c->lookup_lock); enif_rwlock_rwunlock(c->cache_lock); return enif_make_atom(env, "notfound"); } enif_mutex_lock(c->ctrl_lock); destroy_cache_node(n); enif_mutex_unlock(c->ctrl_lock); enif_rwlock_rwunlock(c->lookup_lock); enif_rwlock_rwunlock(c->cache_lock); enif_consume_timeslice(env, 50); return enif_make_atom(env, "ok"); } else { enif_mutex_lock(c->ctrl_lock); c->flags |= FL_DYING; enif_mutex_unlock(c->ctrl_lock); enif_cond_broadcast(c->check_cond); enif_thread_join(c->bg_thread, NULL); enif_consume_timeslice(env, 100); return enif_make_atom(env, "ok"); } return enif_make_atom(env, "ok"); } return enif_make_atom(env, "notfound"); }
static void alsa_destructor(ErlNifEnv* env, void* obj) { AudioCapture *capture = (AudioCapture *)obj; if(capture->thread_started) { capture->thread_started = 0; enif_thread_join(capture->tid, NULL); } fprintf(stderr, "Hm, dealloc: %s\r\n", capture->pcm_name); }
void woker_destory(worker_t *w) { msg_t *msg = msg_create(); msg->type = msg_stop; queue_push(w->q, msg); enif_thread_join(w->tid, NULL); enif_thread_opts_destroy(w->opts); queue_destroy(w->q); }
void resource_dtor(ErlNifEnv *env, void *obj) { ctx_t *ctx = static_cast<ctx_t*>(obj); task_t *task = init_empty_task(SHUTDOWN); void *result = NULL; async_queue_push(ctx->queue, static_cast<void*>(task)); enif_thread_join(ctx->tid, &result); async_queue_destroy(ctx->queue); enif_thread_opts_destroy(ctx->topts); }
void nif_destroy_main_thread(void* void_st) { nif_thread_state* st = (nif_thread_state*)void_st; nif_thread_message* msg = nif_thread_message_alloc(NULL, NULL, NULL); nif_thread_send(st, msg); enif_thread_join(st->tid, NULL); enif_cond_destroy(st->cond); enif_mutex_destroy(st->lock); enif_free(st->mailbox); enif_free(st); }
static void unload(ErlNifEnv* env, void* priv) { state_t* state = (state_t*) priv; void* resp; queue_push(state->queue, NULL); enif_thread_join(state->qthread, &resp); queue_destroy(state->queue); enif_thread_opts_destroy(state->opts); enif_free(state); }
static ERL_NIF_TERM alsa_stop(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { AudioCapture *capture; if(!enif_get_resource(env, argv[0], alsa_resource, (void **)&capture)) { return enif_make_badarg(env); } if(capture->thread_started) { capture->thread_started = 0; enif_thread_join(capture->tid, NULL); } return enif_make_atom(env, "ok"); }
static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { union { void* vp; struct make_term_info* p; }mti; int err; if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) { return enif_make_badarg(env); } enif_mutex_lock(mti.p->mtx); mti.p->send_it = 1; enif_cond_signal(mti.p->cond); enif_mutex_unlock(mti.p->mtx); err = enif_thread_join(mti.p->tid, NULL); assert(err == 0); enif_release_resource(mti.vp); return enif_make_tuple2(env, atom_ok, enif_make_int(env, mti.p->send_res)); }
void vm_destroy(ErlNifEnv* env, void* obj) { vm_ptr vm = (vm_ptr) obj; job_ptr job = job_create(); void* resp; assert(job != NULL && "Failed to create job."); job->type = job_close; queue_push(vm->jobs, job); queue_send(vm->jobs, job); enif_thread_join(vm->tid, &resp); queue_destroy(vm->jobs); enif_thread_opts_destroy(vm->opts); }
static ERL_NIF_TERM _close (ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { CAN_handle* handle; ERL_NIF_TERM result; if (!enif_get_resource(env, argv[0], CAN_handle_type, (void**) &handle)) return enif_make_badarg(env);; if (handle->threaded) { void* dont_care; handle->threaded = 0; enif_thread_join(handle->tid, &dont_care); } result = enif_make_int(env, handle->device >= 0 ? close(handle->device) : 0); handle->device = -1; if (handle->devpath) { enif_free(handle->devpath); handle->devpath = NULL; } return result; }