Esempio n. 1
0
elem *cilkred_map::insert_no_rehash(__cilkrts_worker           *w,
				    void                       *key,
				    __cilkrts_hyperobject_base *hb,
                                    void                       *val)
{

#if REDPAR_DEBUG >= 2
    fprintf(stderr, "[W=%d, desc=insert_no_rehash, this_map=%p]\n",
	    w->self, this);
    verify_current_wkr(w);
#endif
    
    CILK_ASSERT((w == 0 && g == 0) || w->g == g);
    CILK_ASSERT(key != 0);
    CILK_ASSERT(val != 0);
	    
    elem *el = grow(w, &(buckets[hashfun(this, key)]));

#if REDPAR_DEBUG >= 3
    fprintf(stderr, "[W=%d, this=%p, inserting key=%p, val=%p, el = %p]\n",
	    w->self, this, key, val, el);
#endif

    el->key = key;
    el->hb  = hb;
    el->val = val;
    ++nelem;

    return el;
}
Esempio n. 2
0
void cilkred_map::rehash(__cilkrts_worker *w)
{
#if REDPAR_DEBUG >= 1
    fprintf(stderr, "[W=%d, desc=rehash, this_map=%p, g=%p, w->g=%p]\n",
	    w->self, this, g, w->g);
    verify_current_wkr(w);
#endif
    CILK_ASSERT((w == 0 && g == 0) || w->g == g);
    
    size_t onbuckets = nbuckets;
    size_t onelem = nelem;
    bucket **obuckets = buckets;
    size_t i;
    bucket *b;

    make_buckets(w, nextsz(nbuckets));
     
    for (i = 0; i < onbuckets; ++i) {
        b = obuckets[i];
        if (b) {
            elem *oel;
            for (oel = b->el; oel->key; ++oel)
                insert_no_rehash(w, oel->key, oel->hb, oel->val);
        }
    }

    CILK_ASSERT(nelem == onelem);

    free_buckets(w, obuckets, onbuckets);
}
Esempio n. 3
0
struct os_mutex *__cilkrts_os_mutex_create(void)
{
    int status;
    struct os_mutex *mutex = (struct os_mutex *)malloc(sizeof(struct os_mutex));
    pthread_mutexattr_t attr;

    ITT_SYNC_CREATE(mutex, "OS Mutex");

    if (!mutex) {
        if (static_mutex_used) {
            __cilkrts_bug("Cilk RTS library initialization failed");
        } else {
            static_mutex_used = 1;
            mutex = &static_mutex;
        }
    }

    status = pthread_mutexattr_init(&attr);
    CILK_ASSERT (status == 0);
#if defined DEBUG || CILK_LIB_DEBUG 
#ifdef PTHREAD_MUTEX_ERRORCHECK
    status = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#else
    status = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK_NP);
#endif
    CILK_ASSERT (status == 0);
#endif
    status = pthread_mutex_init (&mutex->mutex, &attr);
    CILK_ASSERT (status == 0);
    pthread_mutexattr_destroy(&attr);

    return mutex;
}
Esempio n. 4
0
static Closure *deque_xtract_bottom(CilkWorkerState *const ws, int pn)
{
     Closure *cl;

     deque_assert_ownership(ws, pn);

     cl = USE_PARAMETER(deques)[pn].bottom;
     if (cl) {
	  CILK_ASSERT(ws, cl->owner_ready_deque == pn);
	  USE_PARAMETER(deques)[pn].bottom = cl->prev_ready;
	  if (cl == USE_PARAMETER(deques)[pn].top) {
	       CILK_ASSERT(ws, cl->prev_ready == (Closure *) NULL);
	       USE_PARAMETER(deques)[pn].top = (Closure *) NULL;
	  } else {
	       CILK_ASSERT(ws, cl->prev_ready);
	       (cl->prev_ready)->next_ready = (Closure *) NULL;
	  }

	  WHEN_CILK_DEBUG(cl->owner_ready_deque = NOBODY);
     } else {
	  CILK_ASSERT(ws, USE_PARAMETER(deques)[pn].top == (Closure *) NULL);
     }
     
     return cl;
}
void cilk_fiber::reset_state(cilk_fiber_proc start_proc)
{
    // Setup the fiber and return.
    this->m_start_proc = start_proc;
    
    CILK_ASSERT(!this->is_resumable());
    CILK_ASSERT(NULL == this->m_pending_remove_ref);
    CILK_ASSERT(NULL == this->m_pending_pool);
}
Esempio n. 6
0
/**
 * Record data for a successful steal.
 *
 * The pedigree for a STEAL record is the pedigree of the stolen frame.
 *
 * @note It's assumed that replay_record_steal() has already checked that we're
 * recording a log and that the record/replay functionality has not been
 * compiled out.
 *
 * @param w The worker stealing a frame.
 * @param victim_id The ID of the worker which had it's frame stolen.
 */
void replay_record_steal_internal(__cilkrts_worker *w, int32_t victim_id)
{
    // Follow the pedigree chain using worker's stack frame
    CILK_ASSERT(w->l->next_frame_ff);
    CILK_ASSERT(w->l->next_frame_ff->call_stack);

    // Record steal: STEAL pedigree victim_id thief_id
    write_to_replay_log (w, PED_TYPE_STR_STEAL,
                         &(w->l->next_frame_ff->call_stack->parent_pedigree),
                         victim_id);
}
void cilk_fiber::deallocate_self(cilk_fiber_pool* pool)
{
    this->set_resumable(false);

    CILK_ASSERT(NULL != pool);
    CILK_ASSERT(!this->is_allocated_from_thread());
    this->assert_ref_count_equals(0);
    
    // Cases: 
    //
    // 1. pool has space:  Add to this pool.
    // 2. pool is full:    Give some fibers to parent, and then free
    //                     enough to make space for the fiber we are deallocating.
    //                     Then put the fiber back into the pool.
    
    const bool need_lock = pool->lock;
    // Grab the lock for the remaining cases.
    if (need_lock) {
        spin_mutex_lock(pool->lock);
    }

    // Case 1: this pool has space.  Return the fiber.
    if (pool->size < pool->max_size)
    {
        // Add this fiber to pool
        pool->fibers[pool->size++] = this;
        if (need_lock) {
            spin_mutex_unlock(pool->lock);
        }
        return;
    }

    // Case 2: Pool is full.
    //
    // First free up some space by giving fibers to the parent.
    if (pool->parent)
    {
        // Pool is full.  Move all but "num_to_keep" fibers to parent,
        // if we can.
        unsigned num_to_keep = pool->max_size/2 + pool->max_size/4;
        cilk_fiber_pool_move_fibers_to_parent_pool(pool, num_to_keep);
    }

    if (need_lock) {
        spin_mutex_unlock(pool->lock);
    }

    // Now, free a fiber to make room for the one we need to put back,
    // and then put this fiber back.  This step may actually return
    // fibers to the heap.
    cilk_fiber_pool_free_fibers_from_pool(pool, pool->max_size -1, this);
}
Esempio n. 8
0
CILK_EXPORT
void __cilkrts_hyper_create(__cilkrts_hyperobject_base *hb)
{
    // This function registers the specified hyperobject in the current
    // reducer map and registers the initial value of the hyperobject as the
    // leftmost view of the reducer.
    __cilkrts_worker *w = __cilkrts_get_tls_worker();
    if (! w) {
        // If there is no worker, then there is nothing to do: The iniitial
        // value will automatically be used as the left-most view when we
        // enter Cilk.
        return;
    }

    // Disable Cilkscreen for the duration of this call.  The destructor for
    // this class will re-enable Cilkscreen when the method returns.  This
    // will prevent Cilkscreen from reporting apparent races in reducers
    DisableCilkscreen x;

    void* key = get_hyperobject_key(hb);
    void* view = get_leftmost_view(key);
    cilkred_map *h = w->reducer_map;

    if (__builtin_expect(!h, 0)) {
	h = install_new_reducer_map(w);
#if REDPAR_DEBUG >= 2
	fprintf(stderr, "[W=%d, hb=%p, hyper_create, isntalled new map %p, view=%p]\n",
		w->self, hb, h, view);
#endif
    }

    /* Must not exist. */
    CILK_ASSERT(h->lookup(key) == NULL);

#if REDPAR_DEBUG >= 3
    verify_current_wkr(w);
    fprintf(stderr, "[W=%d, hb=%p, lookup in map %p of view %p, should be null]\n",
	    w->self, hb, h, view);
    fprintf(stderr, "W=%d, h=%p, inserting key %p, view%p\n",
	    w->self,
	    h,
	    &(hb->__c_monoid),
	    view);
#endif    

    if (h->merging)
        __cilkrts_bug("User error: hyperobject used by another hyperobject");

    CILK_ASSERT(w->reducer_map == h);
    // The address of the leftmost value is the same as the key for lookup.
    (void) h->rehash_and_insert(w, view, hb, view);
}
Esempio n. 9
0
static Closure *deque_peek_bottom(CilkWorkerState *const ws, int pn)
{
     Closure *cl;

     deque_assert_ownership(ws, pn);

     cl = USE_PARAMETER(deques)[pn].bottom;
     if (cl) {
	  CILK_ASSERT(ws, cl->owner_ready_deque == pn);
     } else {
	  CILK_ASSERT(ws, USE_PARAMETER(deques)[pn].top == (Closure *) NULL);
     }
     
     return cl;
}
Esempio n. 10
0
void cilkred_map::make_buckets(__cilkrts_worker *w, 
                               size_t            new_nbuckets)
{     
    nbuckets = new_nbuckets;

    CILK_ASSERT(is_power_of_2(nbuckets));
#if defined __GNUC__ && defined __ICC 
    /* bug workaround -- suppress calls to _intel_fast_memset */
    bucket *volatile*new_buckets = (bucket *volatile*)
#else
    bucket **new_buckets = (bucket **)
#endif
        __cilkrts_frame_malloc(w, nbuckets * sizeof(*(buckets)));

#if REDPAR_DEBUG >= 1
    fprintf(stderr, "W=%d, desc=make_buckets, new_buckets=%p, new_nbuckets=%zd\n",
	    w->self, new_buckets, new_nbuckets);
#endif

    for (size_t i = 0; i < new_nbuckets; ++i)
        new_buckets[i] = 0;
#if defined __GNUC__ && defined __ICC 
    buckets = (bucket **)new_buckets;
#else
    buckets = new_buckets;
#endif
    nelem = 0;
}
Esempio n. 11
0
void __cilkrts_save_exception_state(__cilkrts_worker *w, full_frame *ff)
{
    save_exception_info(w, __cxa_get_globals(), 0, false, "undo-detach");
    CILK_ASSERT(NULL == ff->pending_exception);
    ff->pending_exception = w->l->pending_exception;
    w->l->pending_exception = NULL;    
}
NORETURN
cilk_fiber::remove_reference_from_self_and_resume_other(cilk_fiber_pool* self_pool,
                                                        cilk_fiber*      other)
{
    // Decrement my reference count once (to suspend)
    // Increment other's count (to resume)
    // Suspended fiber should have a reference count of at least 1.  (It is not in a pool).
    this->dec_ref_count();
    other->inc_ref_count();

    // Set a pending remove reference for this fiber, once we have
    // actually switched off.
    other->m_pending_remove_ref = this;
    other->m_pending_pool   = self_pool;

    // Pass along my owner.
    other->owner = this->owner;
    this->owner  = NULL;

    // Since we are deallocating self, this fiber does not become
    // resumable.
    CILK_ASSERT(!this->is_resumable());

    cilk_fiber_sysdep* self = this->sysdep();
    self->jump_to_resume_other_sysdep(other->sysdep());

    __cilkrts_bug("Deallocating fiber.  We should never come back here.");
    std::abort();
}
void cilk_fiber::suspend_self_and_resume_other(cilk_fiber* other)
{
#if FIBER_DEBUG >=1
    fprintf(stderr, "suspend_self_and_resume_other: self =%p, other=%p [owner=%p, resume_sf=%p]\n",
            this, other, other->owner, other->resume_sf);
#endif

    // Decrement my reference count (to suspend)
    // Increment other's count (to resume)
    // Suspended fiber should have a reference count of at least 1.  (It is not in a pool).
    this->dec_ref_count();
    other->inc_ref_count();
    this->assert_ref_count_at_least(1);

    // Pass along my owner.
    other->owner = this->owner;
    this->owner  = NULL;

    // Change this fiber to resumable.
    CILK_ASSERT(!this->is_resumable());
    this->set_resumable(true);

    // Normally, I'd assert other->is_resumable().  But this flag may
    // be false the first time we try to "resume" a fiber.
    cilk_fiber_sysdep* self = this->sysdep();
    self->suspend_self_and_resume_other_sysdep(other->sysdep());

    // HAVE RESUMED EXECUTION
    // When we come back here, we should have at least two references:
    // one for the fiber being allocated / out of a pool, and one for it being active.
    this->assert_ref_count_at_least(2);
}
void cilk_fiber_pool_destroy(cilk_fiber_pool* pool)
{
    CILK_ASSERT(cilk_fiber_pool_sanity_check(pool, "pool_destroy"));

    // Lock my own pool, if I need to.
    if (pool->lock) {
        spin_mutex_lock(pool->lock);
    }

    // Give any remaining fibers to parent pool.
    if (pool->parent) {
        cilk_fiber_pool_move_fibers_to_parent_pool(pool, 0);
    }

    // Unlock pool.
    if (pool->lock) {
        spin_mutex_unlock(pool->lock);
    }

    // If I have any left in my pool, just free them myself.
    // This method may acquire the pool lock.
    cilk_fiber_pool_free_fibers_from_pool(pool, 0, NULL);

    // Destroy the lock if there is one.
    if (pool->lock) {
        spin_mutex_destroy(pool->lock);
    }
    __cilkrts_free(pool->fibers);
}
void cilk_fiber_pool_set_fiber_limit(cilk_fiber_pool* root_pool,
                                     unsigned max_fibers_to_allocate)
{
    // Should only set limit on root pool, not children.
    CILK_ASSERT(NULL == root_pool->parent);
    root_pool->alloc_max = max_fibers_to_allocate;
}
Esempio n. 16
0
/*
 * Do any necessary cleanup for the logs - See record-replay.h for full
 * routine header.
 */
void replay_term(global_state_t *g)
{
    // Free memory for the record/replay log file name, if we've got one
    if (g->record_replay_file_name)
        __cilkrts_free(g->record_replay_file_name);

    // Per-worker cleanup
    for(int i = 0; i < g->total_workers; ++i)
    {
        __cilkrts_worker *w = g->workers[i];

        // Close the log files, if we've opened them
        if(w->l->record_replay_fptr)
            fclose(w->l->record_replay_fptr);

        if (w->l->replay_list_root)
        {
            // We should have consumed the entire list
            CILK_ASSERT(ped_type_last == w->l->replay_list_entry->m_type);

            replay_entry_t *entry = w->l->replay_list_root;
            while (ped_type_last != entry->m_type)
            {
                // Free the pedigree memory for each entry
                entry->unload();
                entry++;
            }
            __cilkrts_free(w->l->replay_list_root);
            w->l->replay_list_root = NULL;
            w->l->replay_list_entry = NULL;
        }
    }
}
Esempio n. 17
0
/* debugging support: check consistency of a reducer map */
void cilkred_map::check(bool allow_null_val)
{
    size_t count = 0;

    CILK_ASSERT(buckets);
    for (size_t i = 0; i < nbuckets; ++i) {
        bucket *b = buckets[i];
        if (b) 
            for (elem *el = b->el; el->key; ++el) {
                CILK_ASSERT(allow_null_val || el->val);
                ++count;
            }
    }
    CILK_ASSERT(nelem == count);
    /*global_reducer_map::check();*/
}             
Esempio n. 18
0
void __cilkrts_os_mutex_unlock(struct os_mutex *p)
{
    int status;
    ITT_SYNC_RELEASING(p);
    status = pthread_mutex_unlock (&p->mutex);
    CILK_ASSERT(status == 0);
}
Esempio n. 19
0
__CILKRTS_BEGIN_EXTERN_C

/**
 * Walk the pedigree and generate a string representation with underscores
 * between terms.  Currently does a recursive walk to generate a forward
 * pedigree.
 *
 * @param p The buffer that is to be filled.  Assumed to be PEDIGREE_BUFF_SIZE
 * characters long
 * @param pnode The initial pedigree term to be written.
 *
 * @return A pointer into the pedigree string buffer after a term has been
 * written.
 */
static
char * walk_pedigree_nodes(char *p, const __cilkrts_pedigree *pnode)
{
    CILK_ASSERT(pnode);
    if (pnode->parent)
    {
        p = walk_pedigree_nodes(p, pnode->parent);
        p += cilk_snprintf_s(p, PEDIGREE_BUFF_SIZE, "%s", (char *)"_");
    }
    return p + cilk_snprintf_l(p, PEDIGREE_BUFF_SIZE, "%" PRIu64, pnode->rank);
}
Esempio n. 20
0
/* Destroy a reducer map.  The map must have been allocated
   from the worker's global context and should have been
   allocated from the same worker. */
void __cilkrts_destroy_reducer_map(__cilkrts_worker *w, cilkred_map *h)
{
    CILK_ASSERT((w == 0 && h->g == 0) || w->g == h->g);
    verify_current_wkr(w);

    /* the reducer map is allowed to contain el->val == NULL here (and
       only here).  We set el->val == NULL only when we know that the
       map will be destroyed immediately afterwards. */
    DBG h->check(/*allow_null_val=*/true);

    bucket *b;
    size_t i;

    for (i = 0; i < h->nbuckets; ++i) {
        b = h->buckets[i];
        if (b) {
            elem *el;
            for (el = b->el; el->key; ++el) {
                if (el->val)
                    el->destroy();
            }
        }
    }

    free_buckets(w, h->buckets, h->nbuckets);

#if REDPAR_DEBUG >= 1
    fprintf(stderr, "W=%d, destroy_red_map, freeing map h=%p, size=%zd\n",
	    w->self, h, sizeof(*h));
#endif
    
    __cilkrts_frame_free(w, h, sizeof(*h));
}
Esempio n. 21
0
void __cilkrts_mutex_lock(__cilkrts_worker *w, struct mutex *m)
{
    int count;
    const int maxspin = 1000; /* SWAG */

    NOTE_INTERVAL(w, INTERVAL_MUTEX_LOCK);
    if (!TRY_ACQUIRE(m)) {
        START_INTERVAL(w, INTERVAL_MUTEX_LOCK_SPINNING);
        count = 0;
        do {
            do {
                __cilkrts_short_pause();
                if (++count >= maxspin) {
                    STOP_INTERVAL(w, INTERVAL_MUTEX_LOCK_SPINNING);
                    START_INTERVAL(w, INTERVAL_MUTEX_LOCK_YIELDING);
                    /* let the OS reschedule every once in a while */
                    __cilkrts_yield();
                    STOP_INTERVAL(w, INTERVAL_MUTEX_LOCK_YIELDING);
                    START_INTERVAL(w, INTERVAL_MUTEX_LOCK_SPINNING);
                    count = 0;
                }
            } while (m->lock != 0);
        } while (!TRY_ACQUIRE(m));
        STOP_INTERVAL(w, INTERVAL_MUTEX_LOCK_SPINNING);
    }

    CILK_ASSERT(m->owner == 0);
    m->owner = w;
}
Esempio n. 22
0
CILK_EXPORT void* __CILKRTS_STRAND_PURE(
    __cilkrts_hyper_lookup(__cilkrts_hyperobject_base *hb))
{
    __cilkrts_worker* w = __cilkrts_get_tls_worker_fast();
    void* key = get_leftmost_view(hb);
    if (! w)
        return get_leftmost_view(key);

    // Disable Cilkscreen for the duration of this call.  This will
    // prevent Cilkscreen from reporting apparent races in reducers
    DisableCilkscreen dguard;

    if (__builtin_expect(w->g->force_reduce, 0))
        __cilkrts_promote_own_deque(w);
    cilkred_map* h = w->reducer_map;

    if (__builtin_expect(!h, 0)) {
	h = install_new_reducer_map(w);
    }

    if (h->merging)
        __cilkrts_bug("User error: hyperobject used by another hyperobject");
    elem* el = h->lookup(key);
    if (! el) {
        /* lookup failed; insert a new default element */
        void *rep;

        {
            /* re-enable cilkscreen while calling the constructor */
            EnableCilkscreen eguard;
            if (h->is_leftmost)
            {
                // This special case is called only if the reducer was not
                // registered using __cilkrts_hyper_create, e.g., if this is a
                // C reducer in global scope or if there is no bound worker.
                rep = get_leftmost_view(key);
            }
            else
            {
                rep = hb->__c_monoid.allocate_fn((void*)hb,
						 hb->__view_size);
                // TBD: Handle exception on identity function
                hb->__c_monoid.identity_fn((void*)hb, rep);
            }
        }

#if REDPAR_DEBUG >= 3
	fprintf(stderr, "W=%d, h=%p, inserting key %p, val%p\n",
		w->self,
		h,
		&(hb->__c_monoid),
		rep);
	CILK_ASSERT(w->reducer_map == h);
#endif
        el = h->rehash_and_insert(w, key, hb, rep);
    }

    return el->val;
}
Esempio n. 23
0
/**
 * Advance to the next log entry from a SYNC record.  Consume the current
 * SYNC record on this worker and advance to the next one.
 *
 * @note It's assumed that replay_advance_from_sync() has already returned if
 * we're not replaying a log, or if record/replay functionality has been
 * compiled out.
 *
 * @param w The worker whose replay log we're advancing.
 */
void replay_advance_from_sync_internal (__cilkrts_worker *w)
{
    // The current replay entry must be a SYNC
    CILK_ASSERT(ped_type_sync == w->l->replay_list_entry->m_type);

    // Advance to the next entry
    w->l->replay_list_entry = w->l->replay_list_entry->next_entry();
}
Esempio n. 24
0
static void deque_add_bottom(CilkWorkerState *const ws, Closure *cl, int pn)
{
     deque_assert_ownership(ws, pn);
     CILK_ASSERT(ws, cl->owner_ready_deque == NOBODY);

     cl->prev_ready = USE_PARAMETER(deques)[pn].bottom;
     cl->next_ready = (Closure *)NULL;
     USE_PARAMETER(deques)[pn].bottom = cl;
     WHEN_CILK_DEBUG(cl->owner_ready_deque = pn);
     
     if (USE_PARAMETER(deques)[pn].top) {
	  CILK_ASSERT(ws, cl->prev_ready);
	  (cl->prev_ready)->next_ready = cl;
     } else {
	  USE_PARAMETER(deques)[pn].top = cl;
     }
}
Esempio n. 25
0
void __cilkrts_note_interval(__cilkrts_worker *w, enum interval i)
{
    if (w) {
        statistics *s = w->l->stats;
        CILK_ASSERT(s->start[i] == INVALID_START);
        s->count[i]++;
    }
}
Esempio n. 26
0
static inline void Closure_destroy(CilkWorkerState *const ws, Closure *t)
{
     Closure_checkmagic(ws, t);

     WHEN_CILK_DEBUG(t->magic = ~CILK_CLOSURE_MAGIC);
     CILK_ASSERT(ws, ! (t->malloced));
     Closure_clean(ws->context, t);
     Cilk_internal_free(ws, t, sizeof(Closure));
}
Esempio n. 27
0
void __cilkrts_stop_interval(__cilkrts_worker *w, enum interval i)
{
    if (w) {
        statistics *s = w->l->stats;
        CILK_ASSERT(s->start[i] != INVALID_START);
        s->accum[i] += __cilkrts_getticks() - s->start[i];
        s->start[i] = INVALID_START;
    }
}
Esempio n. 28
0
void load_pedigree_leaf_into_user_worker(__cilkrts_worker *w)
{
    __cilkrts_pedigree *pedigree_leaf;
    CILK_ASSERT(w->l->type == WORKER_USER);
    pedigree_leaf = __cilkrts_get_tls_pedigree_leaf(1);
    w->pedigree = *pedigree_leaf;

    // Save a pointer to the old leaf.
    // We'll need to restore it later.
    CILK_ASSERT(w->l->original_pedigree_leaf == NULL);
    w->l->original_pedigree_leaf = pedigree_leaf;
    
    __cilkrts_set_tls_pedigree_leaf(&w->pedigree);
    
    // Check that this new pedigree root has at least two values.
    CILK_ASSERT(w->pedigree.parent);
    CILK_ASSERT(w->pedigree.parent->parent == NULL);
}
Esempio n. 29
0
void global_os_mutex_unlock(void)
{
    // We'd better have allocated a global_os_mutex.  This means you should
    // have called global_os_mutex_lock() before calling
    // global_os_mutex_unlock(), but this is the only check for it.
    CILK_ASSERT(NULL != global_os_mutex);

    // Release the global OS mutex
    __cilkrts_os_mutex_unlock(global_os_mutex);
}
Esempio n. 30
0
static void global_free(global_state_t *g, void *mem, int bucket)
{
    size_t size;

    CILK_ASSERT(bucket < FRAME_MALLOC_NBUCKETS);
    size = FRAME_MALLOC_BUCKET_TO_SIZE(bucket);
    g->frame_malloc.allocated_from_global_pool -= size;

    push(&g->frame_malloc.global_free_list[bucket], mem);
}