示例#1
0
void __cilkrts_os_mutex_lock(struct os_mutex *p)
{
    int status;
    status = pthread_mutex_lock (&p->mutex);
    ITT_SYNC_ACQUIRED(p);
    if (__builtin_expect(status, 0) == 0)
        return;
    if (status == EDEADLK)
        __cilkrts_bug("Cilk runtime error: deadlock acquiring mutex %p\n",
                      p);
    else
        __cilkrts_bug("Cilk runtime error %d acquiring mutex %p\n",
                      status, p);
}
示例#2
0
/* allocate a batch of frames of size SIZE from the global pool and
   store them in the worker's free list */
static void allocate_batch(__cilkrts_worker *w, int bucket, size_t size)
{
    global_state_t *g = w->g;

    __cilkrts_mutex_lock(w, &g->frame_malloc.lock); {
#if USE_MMAP
        char *p = mmap(0, 12288, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
        if (p == MAP_FAILED)
            __cilkrts_bug("mmap failed %d", errno);
        assert(size < 4096);
        assert(p != MAP_FAILED);
        mprotect(p, 4096, PROT_NONE);
        mprotect(p + 8192, 4096, PROT_NONE);
        w->l->bucket_potential[bucket] += size;
        push(&w->l->free_list[bucket], (struct free_list *)(p + 8192 - size));
#else
        size_t bytes_allocated = 0;
        do {
            w->l->bucket_potential[bucket] += size;
            bytes_allocated += size;
            push(&w->l->free_list[bucket], global_alloc(g, bucket));
        } while (bytes_allocated < g->frame_malloc.batch_size);
#endif
    } __cilkrts_mutex_unlock(w, &g->frame_malloc.lock);

}
示例#3
0
struct os_mutex *__cilkrts_os_mutex_create(void)
{
    int status;
    struct os_mutex *mutex = (struct os_mutex *)malloc(sizeof(struct os_mutex));
    pthread_mutexattr_t attr;

    ITT_SYNC_CREATE(mutex, "OS Mutex");

    if (!mutex) {
        if (static_mutex_used) {
            __cilkrts_bug("Cilk RTS library initialization failed");
        } else {
            static_mutex_used = 1;
            mutex = &static_mutex;
        }
    }

    status = pthread_mutexattr_init(&attr);
    CILK_ASSERT (status == 0);
#if defined DEBUG || CILK_LIB_DEBUG 
#ifdef PTHREAD_MUTEX_ERRORCHECK
    status = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#else
    status = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK_NP);
#endif
    CILK_ASSERT (status == 0);
#endif
    status = pthread_mutex_init (&mutex->mutex, &attr);
    CILK_ASSERT (status == 0);
    pthread_mutexattr_destroy(&attr);

    return mutex;
}
NORETURN
cilk_fiber::remove_reference_from_self_and_resume_other(cilk_fiber_pool* self_pool,
                                                        cilk_fiber*      other)
{
    // Decrement my reference count once (to suspend)
    // Increment other's count (to resume)
    // Suspended fiber should have a reference count of at least 1.  (It is not in a pool).
    this->dec_ref_count();
    other->inc_ref_count();

    // Set a pending remove reference for this fiber, once we have
    // actually switched off.
    other->m_pending_remove_ref = this;
    other->m_pending_pool   = self_pool;

    // Pass along my owner.
    other->owner = this->owner;
    this->owner  = NULL;

    // Since we are deallocating self, this fiber does not become
    // resumable.
    CILK_ASSERT(!this->is_resumable());

    cilk_fiber_sysdep* self = this->sysdep();
    self->jump_to_resume_other_sysdep(other->sysdep());

    __cilkrts_bug("Deallocating fiber.  We should never come back here.");
    std::abort();
}
示例#5
0
CILK_EXPORT void* __CILKRTS_STRAND_PURE(
    __cilkrts_hyper_lookup(__cilkrts_hyperobject_base *hb))
{
    __cilkrts_worker* w = __cilkrts_get_tls_worker_fast();
    void* key = get_leftmost_view(hb);
    if (! w)
        return get_leftmost_view(key);

    // Disable Cilkscreen for the duration of this call.  This will
    // prevent Cilkscreen from reporting apparent races in reducers
    DisableCilkscreen dguard;

    if (__builtin_expect(w->g->force_reduce, 0))
        __cilkrts_promote_own_deque(w);
    cilkred_map* h = w->reducer_map;

    if (__builtin_expect(!h, 0)) {
	h = install_new_reducer_map(w);
    }

    if (h->merging)
        __cilkrts_bug("User error: hyperobject used by another hyperobject");
    elem* el = h->lookup(key);
    if (! el) {
        /* lookup failed; insert a new default element */
        void *rep;

        {
            /* re-enable cilkscreen while calling the constructor */
            EnableCilkscreen eguard;
            if (h->is_leftmost)
            {
                // This special case is called only if the reducer was not
                // registered using __cilkrts_hyper_create, e.g., if this is a
                // C reducer in global scope or if there is no bound worker.
                rep = get_leftmost_view(key);
            }
            else
            {
                rep = hb->__c_monoid.allocate_fn((void*)hb,
						 hb->__view_size);
                // TBD: Handle exception on identity function
                hb->__c_monoid.identity_fn((void*)hb, rep);
            }
        }

#if REDPAR_DEBUG >= 3
	fprintf(stderr, "W=%d, h=%p, inserting key %p, val%p\n",
		w->self,
		h,
		&(hb->__c_monoid),
		rep);
	CILK_ASSERT(w->reducer_map == h);
#endif
        el = h->rehash_and_insert(w, key, hb, rep);
    }

    return el->val;
}
示例#6
0
void __cilkrts_c_resume_except (_Unwind_Exception *exc)
{
#if DEBUG_EXCEPTIONS
    fprintf(stderr, "resume exception %p\n", exc);
#endif
    _Unwind_Reason_Code why = _Unwind_RaiseException(exc);
    __cilkrts_bug ("Cilk runtime error: failed to reinstate suspended exception %p (%d)\n", exc, why);
}
示例#7
0
void cilk_fiber_sysdep::free_stack()
{
    if (m_stack) {
        size_t rounded_stack_size = m_stack_base - m_stack + s_page_size;
        if (munmap(m_stack, rounded_stack_size) < 0)
            __cilkrts_bug("Cilk: stack munmap failed error %d\n", errno);
    }
}
示例#8
0
CILK_EXPORT void __CILKRTS_STRAND_STALE(
    __cilkrts_hyper_destroy(__cilkrts_hyperobject_base *hb))
{
    // Disable Cilkscreen for the duration of this call.  The destructor for
    // this class will re-enable Cilkscreen when the method returns.  This
    // will prevent Cilkscreen from reporting apparent races in reducers
    DisableCilkscreen x;

    __cilkrts_worker* w = __cilkrts_get_tls_worker();
    if (! w) {
        // If no worker, then Cilk is not running and there is no reducer
        // map.  Do nothing.  The reducer's destructor will take care of
        // destroying the leftmost view.
        return;
    }

const char *UNSYNCED_REDUCER_MSG =
    "Destroying a reducer while it is visible to unsynced child tasks, or\n"
    "calling CILK_C_UNREGISTER_REDUCER() on an unregistered reducer.\n"
    "Did you forget a _Cilk_sync or CILK_C_REGISTER_REDUCER()?";

    cilkred_map* h = w->reducer_map;
    if (NULL == h)
	cilkos_error(UNSYNCED_REDUCER_MSG); // Does not return

    if (h->merging) {
	verify_current_wkr(w);
	__cilkrts_bug("User error: hyperobject used by another hyperobject");
    }

    void* key = get_hyperobject_key(hb);
    elem *el = h->lookup(key);

    // Verify that the reducer is being destroyed from the leftmost strand for
    // which the reducer is defined.
    if (! (el && el->is_leftmost()))
	cilkos_error(UNSYNCED_REDUCER_MSG);

#if REDPAR_DEBUG >= 3
    fprintf(stderr, "[W=%d, key=%p, lookup in map %p, found el=%p, about to destroy]\n",
            w->self, key, h, el);
#endif
	
    // Remove the element from the hash bucket.  Do not bother shrinking
    // the bucket. Note that the destroy() function does not actually
    // call the destructor for the leftmost view.
    el->destroy();
    do {
        el[0] = el[1];
        ++el;
    } while (el->key);
    --h->nelem;

#if REDPAR_DEBUG >= 2
    fprintf(stderr, "[W=%d, desc=hyper_destroy_finish, key=%p, w->reducer_map=%p]\n",
	    w->self, key, w->reducer_map);
#endif 
}
示例#9
0
void __cilkrts_setup_for_execution_sysdep(__cilkrts_worker *w, full_frame *ff)
{
    // ASSERT: We own w->lock and ff->lock || P == 1

    __cxa_eh_globals *state = __cxa_get_globals ();
    struct pending_exception_info *info = w->l->pending_exception;

    if (info == NULL)
        return;

    w->l->pending_exception = 0;

#if DEBUG_EXCEPTIONS
    _Unwind_Exception *exc = info->active;
    if (exc) {
        fflush(stdout);
        fprintf(stderr, "__cilkrts_resume_except W%u %p->%p [%u %p]\n",
                w->self, exc,
                to_cxx(exc)->nextException,
                info->runtime_state.uncaughtExceptions,
                info->runtime_state.caughtExceptions);
        /*CILK_ASSERT(info->runtime_state.uncaughtExceptions > 0);*/
    }
#endif

    if (state->uncaughtExceptions || state->caughtExceptions)
        __cilkrts_bug("W%u: resuming with non-empty prior exception state %u %p\n", state->uncaughtExceptions, state->caughtExceptions);

    *state = info->runtime_state;
    info->runtime_state.caughtExceptions = 0;
    info->runtime_state.uncaughtExceptions = 0;

    if (info->rethrow) {
        info->rethrow = false;
        /* Resuming function will rethrow.  Runtime calls
           std::terminate if there is no caught exception. */
        ff->call_stack->flags |= CILK_FRAME_EXCEPTING;
    }
    if (info->active) {
        ff->call_stack->flags |= CILK_FRAME_EXCEPTING;
        ff->call_stack->except_data = info->active;
        info->active = 0;
    }

    if (info->empty()) {
        info->destruct();
        __cilkrts_frame_free(w, info, sizeof *info);
        w->l->pending_exception = NULL;
    }

#if CILK_LIB_DEBUG
    if (ff->call_stack->except_data)
        CILK_ASSERT(std::uncaught_exception());
#endif
}
示例#10
0
CILK_EXPORT
void __cilkrts_hyper_create(__cilkrts_hyperobject_base *hb)
{
    // This function registers the specified hyperobject in the current
    // reducer map and registers the initial value of the hyperobject as the
    // leftmost view of the reducer.
    __cilkrts_worker *w = __cilkrts_get_tls_worker();
    if (! w) {
        // If there is no worker, then there is nothing to do: The iniitial
        // value will automatically be used as the left-most view when we
        // enter Cilk.
        return;
    }

    // Disable Cilkscreen for the duration of this call.  The destructor for
    // this class will re-enable Cilkscreen when the method returns.  This
    // will prevent Cilkscreen from reporting apparent races in reducers
    DisableCilkscreen x;

    void* key = get_hyperobject_key(hb);
    void* view = get_leftmost_view(key);
    cilkred_map *h = w->reducer_map;

    if (__builtin_expect(!h, 0)) {
	h = install_new_reducer_map(w);
#if REDPAR_DEBUG >= 2
	fprintf(stderr, "[W=%d, hb=%p, hyper_create, isntalled new map %p, view=%p]\n",
		w->self, hb, h, view);
#endif
    }

    /* Must not exist. */
    CILK_ASSERT(h->lookup(key) == NULL);

#if REDPAR_DEBUG >= 3
    verify_current_wkr(w);
    fprintf(stderr, "[W=%d, hb=%p, lookup in map %p of view %p, should be null]\n",
	    w->self, hb, h, view);
    fprintf(stderr, "W=%d, h=%p, inserting key %p, view%p\n",
	    w->self,
	    h,
	    &(hb->__c_monoid),
	    view);
#endif    

    if (h->merging)
        __cilkrts_bug("User error: hyperobject used by another hyperobject");

    CILK_ASSERT(w->reducer_map == h);
    // The address of the leftmost value is the same as the key for lookup.
    (void) h->rehash_and_insert(w, view, hb, view);
}
示例#11
0
void __cilkrts_run_scheduler_with_exceptions(__cilkrts_worker *w)
{
    global_state_t* g = cilkg_get_global_state();
    CILK_ASSERT(g->scheduler);

    cpp_scheduler_t* scheduler = (cpp_scheduler_t*) g->scheduler;

    try {
        scheduler(w);
    } catch (...) {
        __cilkrts_bug("Exception escaped Cilk context");
    }
}
示例#12
0
CILK_EXPORT void __CILKRTS_STRAND_STALE(
    __cilkrts_hyper_destroy(__cilkrts_hyperobject_base *hb))
{
    // Disable Cilkscreen for the duration of this call.  The destructor for
    // this class will re-enable Cilkscreen when the method returns.  This
    // will prevent Cilkscreen from reporting apparent races in reducers
    DisableCilkscreen x;

    __cilkrts_worker* w = __cilkrts_get_tls_worker();
    if (! w) {
        // If no worker, then Cilk is not running and there is no reducer
        // map.  Do nothing.  The reducer's destructor will take care of
        // destroying the leftmost view.
        return;
    }

    cilkred_map* h = w->reducer_map;
    CILK_ASSERT(h);

    if (h->merging) {
	verify_current_wkr(w);
	__cilkrts_bug("User error: hyperobject used by another hyperobject");
    }

    void* key = get_leftmost_view(hb);
    elem *el = h->lookup(key);
    if (el) {
        /* found. */

#if REDPAR_DEBUG >= 3
	fprintf(stderr, "[W=%d, key=%p, lookup in map %p, found el=%p, about to destroy]\n",
		w->self, key, h, el);
#endif
	
        /* Destroy view and remove element from bucket. */
        el->destroy();

        /* Shift all subsequent elements.  Do not bother
           shrinking the bucket */
        do {
            el[0] = el[1];
            ++el;
        } while (el->key);
        --h->nelem;
    }

#if REDPAR_DEBUG >= 2
    fprintf(stderr, "[W=%d, desc=hyper_destroy_finish, key=%p, w->reducer_map=%p]\n",
	    w->self, key, w->reducer_map);
#endif 
}
示例#13
0
void __cilkrts_frame_malloc_global_cleanup(global_state_t *g)
{
    struct pool_cons *c;

    if (g->frame_malloc.check_for_leaks) {
        size_t memory_in_global_list = count_memory_in_global_list(g);
        // TBD: This check is weak.  Short of memory corruption,
        // I don't see how we have more memory in the free list
        // than allocated from the os.
        // Ideally, we should count the memory in the global free list
        // and check that we have it all.  But I believe the runtime
        // itself also uses some memory, which is not being tracked.
        if (memory_in_global_list > g->frame_malloc.allocated_from_os) {
            __cilkrts_bug("\nError. The Cilk runtime data structures may have been corrupted.\n");
        }
    }
    
    while ((c = g->frame_malloc.pool_list)) {
        g->frame_malloc.pool_list = c->cdr;
        __cilkrts_free(c->p);
        __cilkrts_free(c);
    }

    __cilkrts_mutex_destroy(0, &g->frame_malloc.lock);

    // Check that all the memory moved from the global pool into
    // workers has been returned to the global pool.
    if (g->frame_malloc.check_for_leaks
        && (g->frame_malloc.allocated_from_global_pool != 0))
    {
        __cilkrts_bug("\n"
                      "---------------------------" "\n"
                      "  MEMORY LEAK DETECTED!!!  " "\n"
                      "---------------------------" "\n"
                      "\n"
            );
    }
}
示例#14
0
NORETURN cilk_fiber_sysdep::jump_to_resume_other_sysdep(cilk_fiber_sysdep* other)
{
#if SUPPORT_GET_CURRENT_FIBER
    cilkos_set_tls_cilk_fiber(other);
#endif
    CILK_ASSERT(!this->is_resumable());

    // Jump to the other fiber.  But we are never coming back because
    // this fiber is being reset.
    resume_other_sysdep(other);

    // We should never come back here...
    __cilkrts_bug("Should not get here");
}
示例#15
0
void __attribute__((nonnull)) __cilkrts_gcc_rethrow(__cilkrts_stack_frame *sf)
{
#ifdef __CYGWIN__
    // Cygwin doesn't support exceptions, so _Unwind_Resume isn't available
    // Which means we can't support exceptions either
    __cilkrts_bug("The Cygwin implementation of the Intel Cilk Plus runtime doesn't support exceptions\n");
#else
    if (sf->except_data) {
#if CILK_LIB_DEBUG
        CILK_ASSERT(std::uncaught_exception());
#endif        
        _Unwind_Resume ((_Unwind_Exception *)sf->except_data);
    } else {
        throw;
    }
#endif  // __CYGWIN__
}
示例#16
0
CILK_API_VOID __cilkrts_end_cilk(void)
{
    // Take out the global OS mutex while we do this to protect against
    // another thread attempting to bind while we do this
    global_os_mutex_lock();

    if (cilkg_is_published()) {
        global_state_t *g = cilkg_get_global_state();
        if (g->Q || __cilkrts_get_tls_worker())
            __cilkrts_bug("Attempt to shut down Cilk while Cilk is still "
                          "running");
        __cilkrts_stop_workers(g);
        __cilkrts_deinit_internal(g);
    }

    global_os_mutex_unlock();
}
示例#17
0
文件: bug.cpp 项目: 0day-ci/gcc
COMMON_SYSDEP void abort_because_rts_is_corrupted(void)
{
    __cilkrts_bug("The Cilk Plus runtime system detected a corruption "
                  "in its data structures.  This is most likely caused "
                  "by an application bug.  Aborting execution.\n");
}
示例#18
0
__cilkrts_worker* cilkred_map::merge(__cilkrts_worker *w,
				     cilkred_map *other_map,
				     enum merge_kind kind)
{
    // Disable Cilkscreen while the we merge the maps.  The destructor for
    // the guard class will re-enable Cilkscreen when it goes out of scope.
    // This will prevent Cilkscreen from reporting apparent races in between
    // the reduce function and the reducer operations.  The Cilk runtime
    // guarantees that a pair of reducer maps will only be merged when no 
    // other strand will access them.
    DisableCilkscreen guard;

#if REDPAR_DEBUG >= 2
    fprintf(stderr, "[W=%d, desc=merge, this_map=%p, other_map=%p]\n",
	    w->self,
	    this, other_map);
#endif
    // Remember the current stack frame.
    __cilkrts_stack_frame *current_sf = w->current_stack_frame;
    merging = true;
    other_map->merging = true;

    // Merging to the leftmost view is a special case because every leftmost
    // element must be initialized before the merge.
    CILK_ASSERT(!other_map->is_leftmost /* || kind == MERGE_UNORDERED */);
    bool merge_to_leftmost = (this->is_leftmost
                              /* && !other_map->is_leftmost */);

    DBG check(/*allow_null_val=*/false);
    DBG other_map->check(/*allow_null_val=*/false);

    for (size_t i = 0; i < other_map->nbuckets; ++i) {
        bucket *b = other_map->buckets[i];
        if (b) {
            for (elem *other_el = b->el; other_el->key; ++other_el) {
                /* Steal the value from the other map, which will be
                   destroyed at the end of this operation. */
                void *other_val = other_el->val;
                CILK_ASSERT(other_val);

                void *key = other_el->key;
		__cilkrts_hyperobject_base *hb = other_el->hb;
                elem *this_el = lookup(key);

                if (this_el == 0 && merge_to_leftmost) {
                    /* Initialize leftmost view before merging. */
                    void* leftmost = get_leftmost_view(key);
                    // leftmost == other_val can be true if the initial view
                    // was created in other than the leftmost strand of the
                    // spawn tree, but then made visible to subsequent strands
                    // (E.g., the reducer was allocated on the heap and the
                    // pointer was returned to the caller.)  In such cases,
                    // parallel semantics says that syncing with earlier
                    // strands will always result in 'this_el' being null,
                    // thus propagating the initial view up the spawn tree
                    // until it reaches the leftmost strand.  When synching
                    // with the leftmost strand, leftmost == other_val will be
                    // true and we must avoid reducing the initial view with
                    // itself.
                    if (leftmost != other_val)
                        this_el = rehash_and_insert(w, key, hb, leftmost);
                }

                if (this_el == 0) {
                    /* move object from other map into this one */
                    rehash_and_insert(w, key, hb, other_val);
                    other_el->val = 0;
                    continue; /* No element-level merge necessary */
                }

                /* The same key is present in both maps with values
                   A and B.  Three choices: fail, A OP B, B OP A. */
                switch (kind)
                {
                case MERGE_UNORDERED:
                    __cilkrts_bug("TLS Reducer race");
                    break;
                case MERGE_INTO_RIGHT:
                    /* Swap elements in order to preserve object
                       identity */
                    other_el->val = this_el->val;
                    this_el->val = other_val;
                    /* FALL THROUGH */
                case MERGE_INTO_LEFT: {
                    /* Stealing should be disabled during reduce
                       (even if force-reduce is enabled). */

#if DISABLE_PARALLEL_REDUCERS
		    __cilkrts_stack_frame * volatile *saved_protected_tail;
		    saved_protected_tail = __cilkrts_disallow_stealing(w, NULL);
#endif

		    {			
			CILK_ASSERT(current_sf->worker == w);
			CILK_ASSERT(w->current_stack_frame == current_sf);

			/* TBD: if reduce throws an exception we need to stop it
			   here. */
			hb->__c_monoid.reduce_fn((void*)hb,
						 this_el->val,
						 other_el->val);
			w = current_sf->worker;

#if REDPAR_DEBUG >= 2
			verify_current_wkr(w);
			CILK_ASSERT(w->current_stack_frame == current_sf);
#endif
		    }

#if DISABLE_PARALLEL_REDUCERS
		    /* Restore stealing */
		    __cilkrts_restore_stealing(w, saved_protected_tail);
#endif

                  } break;
                }
            }
        }
    }
    this->is_leftmost = this->is_leftmost || other_map->is_leftmost;
    merging = false;
    other_map->merging = false;
    verify_current_wkr(w);
    __cilkrts_destroy_reducer_map(w, other_map);
    return w;
}
示例#19
0
NORETURN cilk_fiber_sysdep::run()
{
    // Only fibers created from a pool have a proc method to run and execute. 
    CILK_ASSERT(m_start_proc);
    CILK_ASSERT(!this->is_allocated_from_thread());
    CILK_ASSERT(!this->is_resumable());

    // TBD: This setjmp/longjmp pair simply changes the stack pointer.
    // We could probably replace this code with some assembly.
    if (! CILK_SETJMP(m_resume_jmpbuf))
    {
        // Calculate the size of the current stack frame (i.e., this
        // run() function.  
        size_t frame_size = (size_t)JMPBUF_FP(m_resume_jmpbuf) - (size_t)JMPBUF_SP(m_resume_jmpbuf);

        // Macs require 16-byte alignment.  Do it always because it just
        // doesn't matter
        if (frame_size & (16-1))
            frame_size += 16 - (frame_size  & (16-1));

        // Assert that we are getting a reasonable frame size out of
        // it.  If this run() function is using more than 4096 bytes
        // of space for its local variables / any state that spills to
        // registers, something is probably *very* wrong here...
        //
        // 4096 bytes just happens to be a number that seems "large
        // enough" --- for an example GCC 32-bit compilation, the
        // frame size was 48 bytes.
        CILK_ASSERT(frame_size < 4096);

        // Change stack pointer to fiber stack.  Offset the
        // calculation by the frame size, so that we've allocated
        // enough extra space from the top of the stack we are
        // switching to for any temporaries required for this run()
        // function.
        JMPBUF_SP(m_resume_jmpbuf) = CILK_ADJUST_SP(m_stack_base - frame_size);

        // GCC doesn't allow us to call __builtin_longjmp in the same function
        // that calls __builtin_setjmp, so it's been moved into it's own
        // function that cannot be inlined.
        do_cilk_longjmp(m_resume_jmpbuf);
    }

    // Note: our resetting of the stack pointer is valid only if the
    // compiler has not saved any temporaries onto the stack for this
    // function before the longjmp that we still care about at this
    // point.
    
    // Verify that 1) 'this' is still valid and 2) '*this' has not been
    // corrupted.
    CILK_ASSERT(magic_number == m_magic);

    // If the fiber that switched to me wants to be deallocated, do it now.
    do_post_switch_actions();

    // Now call the user proc on the new stack
    m_start_proc(this);

    // alloca() to force generation of frame pointer.  The argument to alloca
    // is contrived to prevent the compiler from optimizing it away.  This
    // code should never actually be executed.
    int* dummy = (int*) alloca((sizeof(int) + (std::size_t) m_start_proc) & 0x1);
    *dummy = 0xface;

    // User proc should never return.
    __cilkrts_bug("Should not get here");
}