int on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info) { UErrorCode status = U_ZERO_ERROR; int i, j; couch_ejson_global_ctx_t *globalCtx; globalCtx = (couch_ejson_global_ctx_t *) enif_alloc(sizeof(couch_ejson_global_ctx_t)); if (globalCtx == NULL) { return 1; } if (!enif_get_int(env, info, &globalCtx->numCollators)) { return 2; } if (globalCtx->numCollators < 1) { return 3; } globalCtx->collMutex = enif_mutex_create("coll_mutex"); if (globalCtx->collMutex == NULL) { return 4; } globalCtx->collators = (UCollator **) enif_alloc(sizeof(UCollator *) * globalCtx->numCollators); if (globalCtx->collators == NULL) { enif_mutex_destroy(globalCtx->collMutex); return 5; } for (i = 0; i < globalCtx->numCollators; i++) { globalCtx->collators[i] = ucol_open("", &status); if (U_FAILURE(status)) { for (j = 0; j < i; j++) { ucol_close(globalCtx->collators[j]); } enif_free(globalCtx->collators); enif_mutex_destroy(globalCtx->collMutex); return 5; } } globalCtx->collStackTop = 0; *priv = globalCtx; ATOM_TRUE = enif_make_atom(env, "true"); ATOM_FALSE = enif_make_atom(env, "false"); ATOM_NULL = enif_make_atom(env, "null"); ATOM_ERROR = enif_make_atom(env, "error"); return 0; }
queue *queue_create() { queue *ret; // int i = 0; // qitem *item; ret = (queue *) enif_alloc(sizeof(struct queue_t)); if(ret == NULL) goto error; // ret->freeitem = freecb; ret->lock = NULL; ret->cond = NULL; ret->head = NULL; ret->tail = NULL; ret->length = 0; ret->reuseq = NULL; ret->lock = enif_mutex_create("queue_lock"); if(ret->lock == NULL) goto error; ret->cond = enif_cond_create("queue_cond"); if(ret->cond == NULL) goto error; return ret; error: if(ret->lock != NULL) enif_mutex_destroy(ret->lock); if(ret->cond != NULL) enif_cond_destroy(ret->cond); if(ret != NULL) enif_free(ret); return NULL; }
static void salt_pcb_free(nif_heap_t *hp, void *obj) { struct salt_pcb *sc = obj; struct salt_msg *sm; struct salt_msg *tmp; /* Signal termination request, join worker thread, release all resources. */ enif_mutex_lock(sc->sc_lock); sc->sc_exit_flag = true; enif_cond_signal(sc->sc_cond); enif_mutex_unlock(sc->sc_lock); (void)enif_thread_join(sc->sc_thread, NULL); sm = sc->sc_req_first; loop: if (sm == NULL) goto done; tmp = sm->msg_next; enif_free_env(sm->msg_heap); enif_free(sm); sm = tmp; goto loop; done: enif_mutex_destroy(sc->sc_lock); enif_cond_destroy(sc->sc_cond); /* Done, PCB itself released by ERTS. */ return ; }
queue_ptr queue_create(const char* name) { queue_ptr ret; ret = (queue_ptr) enif_alloc(sizeof(struct queue_t)); if(ret == NULL) goto error; ret->lock = NULL; ret->cond = NULL; ret->head = NULL; ret->tail = NULL; ret->message = NULL; ret->length = 0; ret->lock = enif_mutex_create("queue_lock"); if(ret->lock == NULL) goto error; ret->cond = enif_cond_create("queue_cond"); if(ret->cond == NULL) goto error; return ret; error: if(ret->lock != NULL) enif_mutex_destroy(ret->lock); if(ret->cond != NULL) enif_cond_destroy(ret->cond); if(ret != NULL) enif_free(ret); return NULL; }
// do not call this if there is the possibility that items // are still being added to the queue. void queue_destroy(queue_ptr queue) { assert(NULL != queue); // empty the queue, calling the destroy function for the items void* node = NULL; while(queue_pop_nowait(queue, &node)) { if(NULL != queue->destroy_node) { queue->destroy_node(node); } } if(NULL != queue->lock) { enif_mutex_destroy(queue->lock); } if(NULL != queue->cond) { enif_cond_destroy(queue->cond); } memset(queue, 0, sizeof(struct queue)); node_free(queue); }
queue * queue_new() { queue *ret; ret = (queue *) enif_alloc(sizeof(queue)); if (ret == NULL) goto error; ret->lock = NULL; ret->cond = NULL; ret->head = NULL; ret->tail = NULL; ret->message = NULL; ret->length = 0; ret->lock = enif_mutex_create("queue_lock"); if (ret->lock == NULL) goto error; ret->cond = enif_cond_create("queue_cond"); if (ret->cond == NULL) goto error; return ret; error: if (ret->lock != NULL) enif_mutex_destroy(ret->lock); if (ret->cond != NULL) enif_cond_destroy(ret->cond); if (ret != NULL) enif_free(ret); return NULL; }
void unload(ErlNifEnv *env, void *priv_data) { PRIV *priv = NULL; priv = (PRIV *)enif_priv_data(env); enif_mutex_destroy(priv->lock); enif_free(priv); }
static void msgenv_dtor(ErlNifEnv* env, void* obj) { struct make_term_info* mti = (struct make_term_info*) obj; if (mti->dst_env != NULL) { enif_free_env(mti->dst_env); } enif_mutex_destroy(mti->mtx); enif_cond_destroy(mti->cond); }
static int reload(ErlNifEnv *env, void **priv, ERL_NIF_TERM load_info) { enif_mutex_destroy(((PRIVDATA *)*priv)->lock); enif_free(((PRIVDATA *)*priv)->data); enif_free(*priv); return load(env, priv, load_info); }
static void DestroyCall(ErlCall *erlCall) { enif_mutex_lock(callsMutex); HASH_DEL(calls, erlCall); enif_clear_env(erlCall->env); enif_free_env(erlCall->env); enif_mutex_destroy(erlCall->mutex); enif_cond_destroy(erlCall->cond); free(erlCall); enif_mutex_unlock(callsMutex); }
void nif_destroy_main_thread(void* void_st) { nif_thread_state* st = (nif_thread_state*)void_st; nif_thread_message* msg = nif_thread_message_alloc(NULL, NULL, NULL); nif_thread_send(st, msg); enif_thread_join(st->tid, NULL); enif_cond_destroy(st->cond); enif_mutex_destroy(st->lock); enif_free(st->mailbox); enif_free(st); }
static nif_term_t start(nif_heap_t *hp, int argc, const nif_term_t argv[]) { struct salt_pcb *sc; nif_cond_t *cv; nif_lock_t *lk; nif_term_t pcb; if (argc != 0) return (BADARG); /* Create thread control block, pass ownership to Erlang. */ assert(salt_pcb_type != NULL); sc = enif_alloc_resource(salt_pcb_type, sizeof(*sc)); if (sc == NULL) goto fail_0; cv = enif_cond_create("lots_pcb_cv"); if (cv == NULL) goto fail_1; lk = enif_mutex_create("lots_pcb_lock"); if (lk == NULL) goto fail_2; sc->sc_vsn = SALT_VSN(1, 0, 0); sc->sc_lock = lk; sc->sc_cond = cv; sc->sc_req_first = NULL; sc->sc_req_lastp = &sc->sc_req_first; sc->sc_req_npend = 0; sc->sc_exit_flag = false; if (enif_thread_create("salt_thread", &sc->sc_thread, salt_worker_loop, sc, NULL) != 0) goto fail_3; pcb = enif_make_resource(hp, sc); enif_release_resource(sc); return (pcb); /* Failure handling. */ fail_3: enif_mutex_destroy(lk); fail_2: enif_cond_destroy(cv); fail_1: enif_release_resource(sc); fail_0: return (BADARG); }
void on_unload(ErlNifEnv* env, void* priv_data) { couch_ejson_global_ctx_t *globalCtx = (couch_ejson_global_ctx_t *) priv_data; int i; for (i = 0; i < globalCtx->numCollators; i++) { ucol_close(globalCtx->collators[i]); } enif_free(globalCtx->collators); enif_mutex_destroy(globalCtx->collMutex); enif_free(globalCtx); }
void queue_destroy(queue *queue) { ErlNifMutex *lock; ErlNifCond *cond; int length; qitem *blocks = NULL; enif_mutex_lock(queue->lock); lock = queue->lock; cond = queue->cond; length = queue->length; queue->lock = NULL; queue->cond = NULL; queue->head = NULL; queue->tail = NULL; queue->length = -1; while(queue->reuseq != NULL) { qitem *tmp = queue->reuseq->next; if(tmp != NULL && tmp->env != NULL) enif_free_env(tmp->env); if (tmp != NULL && tmp->cmd != NULL) enif_free(tmp->cmd); if (queue->reuseq->blockStart) { queue->reuseq->next = blocks; blocks = queue->reuseq; } queue->reuseq = tmp; } while (blocks != NULL) { qitem *tmp = blocks->next; enif_free(blocks); blocks = tmp; } enif_mutex_unlock(lock); assert(length == 0 && "Attempting to destroy a non-empty queue."); enif_cond_destroy(cond); enif_mutex_destroy(lock); enif_free(queue); }
void queue_destroy(queue_t* queue) { ErlNifMutex* lock; ErlNifCond* cond; enif_mutex_lock(queue->lock); assert(queue->head == NULL && "Destroying a non-empty queue."); assert(queue->tail == NULL && "Destroying queue in invalid state."); lock = queue->lock; cond = queue->cond; queue->lock = NULL; queue->cond = NULL; enif_mutex_unlock(lock); enif_cond_destroy(cond); enif_mutex_destroy(lock); enif_free(queue); }
void queue_free(queue *queue) { ErlNifMutex *lock; ErlNifCond *cond; int length; enif_mutex_lock(queue->lock); lock = queue->lock; cond = queue->cond; length = queue->length; queue->lock = NULL; queue->cond = NULL; queue->head = NULL; queue->tail = NULL; queue->length = -1; enif_mutex_unlock(lock); assert(length == 0 && "Attempting to destroy a non-empty queue."); enif_cond_destroy(cond); enif_mutex_destroy(lock); enif_free(queue); }
void queue_destroy(queue_t* queue) { enif_mutex_destroy(queue->mutex); enif_cond_destroy(queue->cond); enif_free(queue); }
static void * cache_bg_thread(void *arg) { struct cache *c = (struct cache *)arg; int i, dud; while (1) { enif_mutex_lock(c->ctrl_lock); /* if we've been told to die, quit this loop and start cleaning up */ if (c->flags & FL_DYING) { enif_mutex_unlock(c->ctrl_lock); break; } /* sleep until there is work to do */ enif_cond_wait(c->check_cond, c->ctrl_lock); __sync_add_and_fetch(&(c->wakeups), 1); dud = 1; /* we have to let go of ctrl_lock so we can take cache_lock then ctrl_lock again to get them back in the right order */ enif_mutex_unlock(c->ctrl_lock); enif_rwlock_rwlock(c->cache_lock); enif_mutex_lock(c->ctrl_lock); /* first process the promotion queue before we do any evicting */ for (i = 0; i < N_INCR_BKT; ++i) { enif_mutex_lock(c->incr_lock[i]); while (!TAILQ_EMPTY(&(c->incr_head[i]))) { struct cache_incr_node *n; n = TAILQ_FIRST(&(c->incr_head[i])); TAILQ_REMOVE(&(c->incr_head[i]), n, entry); __sync_sub_and_fetch(&(c->incr_count), 1); dud = 0; /* let go of the ctrl_lock here, we don't need it when we aren't looking at the incr_queue, and this way other threads can use it while we shuffle queue nodes around */ enif_mutex_unlock(c->incr_lock[i]); enif_mutex_unlock(c->ctrl_lock); if (n->node->q == &(c->q1)) { TAILQ_REMOVE(&(c->q1.head), n->node, entry); c->q1.size -= n->node->size; TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry); n->node->q = &(c->q2); c->q2.size += n->node->size; } else if (n->node->q == &(c->q2)) { TAILQ_REMOVE(&(c->q2.head), n->node, entry); TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry); } enif_free(n); /* take the ctrl_lock back again for the next loop around */ enif_mutex_lock(c->ctrl_lock); enif_mutex_lock(c->incr_lock[i]); } enif_mutex_unlock(c->incr_lock[i]); } /* let go of the ctrl_lock here for two reasons: 1. avoid lock inversion, because if we have evictions to do we will need to take lookup_lock, and we must take lookup_lock before taking ctrl_lock 2. if we don't need to do evictions, we're done with the structures that are behind ctrl_lock so we should give it up for others */ enif_mutex_unlock(c->ctrl_lock); /* do timed evictions -- if anything has expired, nuke it */ { struct cache_node *n; if ((n = RB_MIN(expiry_tree, &(c->expiry_head)))) { struct timespec now; clock_now(&now); while (n && n->expiry.tv_sec < now.tv_sec) { enif_mutex_lock(c->ctrl_lock); dud = 0; destroy_cache_node(n); enif_mutex_unlock(c->ctrl_lock); n = RB_MIN(expiry_tree, &(c->expiry_head)); } } } /* now check if we need to do ordinary size limit evictions */ if (c->q1.size + c->q2.size > c->max_size) { enif_rwlock_rwlock(c->lookup_lock); enif_mutex_lock(c->ctrl_lock); while ((c->q1.size + c->q2.size > c->max_size) && (c->q1.size > c->min_q1_size)) { struct cache_node *n; n = TAILQ_LAST(&(c->q1.head), cache_q); destroy_cache_node(n); } while (c->q1.size + c->q2.size > c->max_size) { struct cache_node *n; n = TAILQ_LAST(&(c->q2.head), cache_q); destroy_cache_node(n); } dud = 0; enif_mutex_unlock(c->ctrl_lock); enif_rwlock_rwunlock(c->lookup_lock); } if (dud) __sync_add_and_fetch(&(c->dud_wakeups), 1); /* now let go of the cache_lock that we took right back at the start of this iteration */ enif_rwlock_rwunlock(c->cache_lock); } /* first remove us from the atom_tree, so we get no new operations coming in */ enif_rwlock_rwlock(gbl->atom_lock); RB_REMOVE(atom_tree, &(gbl->atom_head), c->atom_node); enif_rwlock_rwunlock(gbl->atom_lock); enif_free(c->atom_node); /* now take all of our locks, to make sure any pending operations are done */ enif_rwlock_rwlock(c->cache_lock); enif_rwlock_rwlock(c->lookup_lock); enif_mutex_lock(c->ctrl_lock); c->atom_node = NULL; /* free the actual cache queues */ { struct cache_node *n, *nextn; nextn = TAILQ_FIRST(&(c->q1.head)); while ((n = nextn)) { nextn = TAILQ_NEXT(n, entry); destroy_cache_node(n); } nextn = TAILQ_FIRST(&(c->q2.head)); while ((n = nextn)) { nextn = TAILQ_NEXT(n, entry); destroy_cache_node(n); } } for (i = 0; i < N_INCR_BKT; ++i) enif_mutex_lock(c->incr_lock[i]); /* free the incr_queue */ for (i = 0; i < N_INCR_BKT; ++i) { struct cache_incr_node *in, *nextin; nextin = TAILQ_FIRST(&(c->incr_head[i])); while ((in = nextin)) { nextin = TAILQ_NEXT(in, entry); TAILQ_REMOVE(&(c->incr_head[i]), in, entry); in->node = 0; enif_free(in); } enif_mutex_unlock(c->incr_lock[i]); enif_mutex_destroy(c->incr_lock[i]); } /* unlock and destroy! */ enif_cond_destroy(c->check_cond); enif_mutex_unlock(c->ctrl_lock); enif_mutex_destroy(c->ctrl_lock); enif_rwlock_rwunlock(c->lookup_lock); enif_rwlock_destroy(c->lookup_lock); enif_rwlock_rwunlock(c->cache_lock); enif_rwlock_destroy(c->cache_lock); enif_free(c); return 0; }