int queue_push(queue_t* queue, ErlNifPid* pid) { qitem_t* item = (qitem_t*) enif_alloc(sizeof(qitem_t)); if(item == NULL) return 0; item->pid = pid; item->next = NULL; enif_mutex_lock(queue->lock); if(queue->tail != NULL) { queue->tail->next = item; } queue->tail = item; if(queue->head == NULL) { queue->head = queue->tail; } enif_cond_signal(queue->cond); enif_mutex_unlock(queue->lock); return 1; }
static void salt_pcb_free(nif_heap_t *hp, void *obj) { struct salt_pcb *sc = obj; struct salt_msg *sm; struct salt_msg *tmp; /* Signal termination request, join worker thread, release all resources. */ enif_mutex_lock(sc->sc_lock); sc->sc_exit_flag = true; enif_cond_signal(sc->sc_cond); enif_mutex_unlock(sc->sc_lock); (void)enif_thread_join(sc->sc_thread, NULL); sm = sc->sc_req_first; loop: if (sm == NULL) goto done; tmp = sm->msg_next; enif_free_env(sm->msg_heap); enif_free(sm); sm = tmp; goto loop; done: enif_mutex_destroy(sc->sc_lock); enif_cond_destroy(sc->sc_cond); /* Done, PCB itself released by ERTS. */ return ; }
int queue_push(queue *queue, void *item) { qitem * entry = (qitem *) enif_alloc(sizeof(qitem)); if (entry == NULL) return 0; entry->data = item; entry->next = NULL; enif_mutex_lock(queue->lock); assert(queue->length >= 0 && "Invalid queue size at push"); if (queue->tail != NULL) { queue->tail->next = entry; } queue->tail = entry; if (queue->head == NULL) { queue->head = queue->tail; } queue->length += 1; enif_cond_signal(queue->cond); enif_mutex_unlock(queue->lock); return 1; }
int queue_send(queue *queue, void *item) { enif_mutex_lock(queue->lock); assert(queue->message == NULL && "Attempting to send multiple messages."); queue->message = item; enif_cond_signal(queue->cond); enif_mutex_unlock(queue->lock); return 1; }
void job_insert(struct job *j) { enif_mutex_lock(gbl.jlock); if (gbl.jlist) j->next = gbl.jlist; gbl.jlist = j; enif_cond_signal(gbl.jcond); enif_mutex_unlock(gbl.jlock); }
ERL_NIF_TERM nif_thread_send(nif_thread_state* st, nif_thread_message* msg) { enif_mutex_lock(st->lock); TAILQ_INSERT_TAIL(st->mailbox, msg, next_entry); enif_cond_signal(st->cond); enif_mutex_unlock(st->lock); return atom_ok; }
static nif_term_t salt_enqueue_req(nif_heap_t *hp, struct salt_pcb *sc, nif_pid_t pid, nif_term_t ref, uint_t type, uint_t aux) { struct salt_msg *sm; const char *err; /* Prepare async request for worker thread. */ sm = enif_alloc(sizeof(*sm)); if (sm == NULL) return (BADARG); sm->msg_heap = enif_alloc_env(); assert(sm->msg_heap != NULL); sm->msg_next = NULL; sm->msg_from = pid; /* struct copy */ sm->msg_mref = enif_make_copy(sm->msg_heap, ref); sm->msg_type = type; sm->msg_aux = aux; /* Enqueue request checking for failure scenarios. */ enif_mutex_lock(sc->sc_lock); if (sc->sc_req_npend >= 128) { err = "congested"; goto fail; } if (sc->sc_exit_flag) { /* XXX This should not even be possible, no? */ err = "exiting"; goto fail; } *sc->sc_req_lastp = sm; sc->sc_req_lastp = &sm->msg_next; sc->sc_req_npend += 1; enif_cond_signal(sc->sc_cond); enif_mutex_unlock(sc->sc_lock); return (enif_make_atom(hp, "enqueued")); /* Failure treatment. */ fail: enif_mutex_unlock(sc->sc_lock); enif_free_env(sm->msg_heap); enif_free(sm); return (enif_make_atom(hp, err)); }
static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { union { void* vp; struct make_term_info* p; }mti; int err; if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) { return enif_make_badarg(env); } enif_mutex_lock(mti.p->mtx); mti.p->send_it = 1; enif_cond_signal(mti.p->cond); enif_mutex_unlock(mti.p->mtx); err = enif_thread_join(mti.p->tid, NULL); assert(err == 0); enif_release_resource(mti.vp); return enif_make_tuple2(env, atom_ok, enif_make_int(env, mti.p->send_res)); }
void queue_put(queue_t* queue, void *data) { queue_item_t* item = (queue_item_t*)enif_alloc(sizeof(queue_item_t)); item->next = NULL; item->data = data; enif_mutex_lock(queue->mutex); if (queue->tail != NULL) { queue->tail->next = item; } queue->tail = item; if (queue->head == NULL) { queue->head = queue->tail; } enif_cond_signal(queue->cond); enif_mutex_unlock(queue->mutex); }
// returns 1 on success, 0 on failure static int queue_insert(queue_ptr queue, void* data, int location) { node_ptr node = (node_ptr) node_alloc(sizeof(struct node)); if(NULL != node) { memset(node, 0, sizeof(struct node)); node->data = data; enif_mutex_lock(queue->lock); if(0 == queue->size) { // queue is empty queue->first = node; queue->last = node; } else { if(FRONT == location) { node->next = queue->first; queue->first = node; } else { // append to queue queue->last->next = node; queue->last = node; } } ++queue->size; enif_mutex_unlock(queue->lock); enif_cond_signal(queue->cond); } return NULL == node ? 0 : 1; }
int queue_push(queue *queue, qitem *entry) { while (enif_mutex_trylock(queue->lock) != 0) { } assert(queue->length >= 0 && "Invalid queue size at push"); if(queue->tail != NULL) queue->tail->next = entry; queue->tail = entry; if(queue->head == NULL) queue->head = queue->tail; queue->length += 1; enif_mutex_unlock(queue->lock); enif_cond_signal(queue->cond); return 1; }