void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags) { erts_lc_locked_locks_t *l_lcks = make_my_locked_locks(); erts_lc_locked_lock_t *l_lck = l_lcks->locked.first; if (!find_lock(&l_lck, lck)) required_not_locked(l_lcks, lck); l_lck = l_lcks->required.first; if (!find_lock(&l_lck, lck)) unrequire_of_not_required_lock(l_lcks, lck); if (l_lck->prev) { ASSERT(l_lcks->required.first != l_lck); l_lck->prev->next = l_lck->next; } else { ASSERT(l_lcks->required.first == l_lck); l_lcks->required.first = l_lck->next; } if (l_lck->next) { ASSERT(l_lcks->required.last != l_lck); l_lck->next->prev = l_lck->prev; } else { ASSERT(l_lcks->required.last == l_lck); l_lcks->required.last = l_lck->prev; } lc_free((void *) l_lck); }
void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options) { lc_thread_t *thr = make_my_locked_locks(); lc_locked_lock_t *ll = thr->locked.first; if (!find_lock(&ll, lck)) required_not_locked(thr, lck); ll = thr->required.first; if (!find_lock(&ll, lck)) unrequire_of_not_required_lock(thr, lck); if (ll->prev) { ASSERT(thr->required.first != ll); ll->prev->next = ll->next; } else { ASSERT(thr->required.first == ll); thr->required.first = ll->next; } if (ll->next) { ASSERT(thr->required.last != ll); ll->next->prev = ll->prev; } else { ASSERT(thr->required.last == ll); thr->required.last = ll->prev; } lc_free(thr, ll); }
void *queue_pop(queue_t *q) { void *p = NULL; q_node_t *head = q->head.next; if (head == NULL) { return NULL; } q_node_t *next = head->next; if (q->tail == head) q->tail = &q->head; p = head->data; q->head.next = next; lc_free(head); q->size--; return p; }
static q_node_t *new_node(queue_t *q, void *data) { int err; q_node_t *node = lc_alloc(sizeof(q_node_t)); if (node) { if (q->copy && data) { if ((err = q->copy(data, &node->data)) != SUCCESS) { node = lc_free(node); } } else { node->data = data; } } node->next = NULL; return node; }
int task_free(task_id tid) { task_t f = { tid }; do { lc_spin_lock(lock); task_t *t = map_find(tasks, &f); if (t) { if (atomic_int_dec(&t->ref_count) > 0) break; map_remove(tasks, t); lc_free(t); //printf("Freed task <%f>\n",tid); } } while (0); lc_spin_unlock(lock); return SUCCESS; }
void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags) { erts_lc_locked_locks_t *l_lcks; erts_lc_locked_lock_t *l_lck; if (lck->inited != ERTS_LC_INITITALIZED) uninitialized_lock(); if (lck->id < 0) return; l_lcks = get_my_locked_locks(); if (l_lcks) { l_lck = l_lcks->required.first; if (find_lock(&l_lck, lck)) unlock_of_required_lock(l_lcks, lck); } for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) { if (l_lck->id == lck->id && l_lck->extra == lck->extra) { if ((l_lck->flags & ERTS_LC_FLG_LO_ALL) != op_flags) unlock_op_mismatch(l_lcks, lck, op_flags); if (l_lck->prev) l_lck->prev->next = l_lck->next; else l_lcks->locked.first = l_lck->next; if (l_lck->next) l_lck->next->prev = l_lck->prev; else l_lcks->locked.last = l_lck->prev; lc_free((void *) l_lck); return; } } unlock_of_not_locked(l_lcks, lck); }
void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options) { lc_thread_t *thr; lc_locked_lock_t *ll; if (lck->inited != ERTS_LC_INITITALIZED) uninitialized_lock(); if (lck->id < 0) return; thr = get_my_locked_locks(); if (thr) { ll = thr->required.first; if (find_lock(&ll, lck)) unlock_of_required_lock(thr, lck); } for (ll = thr ? thr->locked.last : NULL; ll; ll = ll->prev) { if (ll->id == lck->id && ll->extra == lck->extra) { if ((ll->taken_options & ERTS_LOCK_OPTIONS_RDWR) != options) unlock_op_mismatch(thr, lck, options); if (ll->prev) ll->prev->next = ll->next; else thr->locked.first = ll->next; if (ll->next) ll->next->prev = ll->prev; else thr->locked.last = ll->prev; lc_free(thr, ll); return; } } unlock_of_not_locked(thr, lck); }
static void task_key_deleter(void *d) { lc_free(d); }
static void rel_task(void *d) { lc_free(d); }
int queue_free(queue_t *q) { int rc = queue_clear(q); lc_free(q); return rc; }
static void lc_err_destroy(void *p) { lc_free(p); }