Example #1
0
static void
del_stk(rust_task *task, stk_seg *stk)
{
    VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
    LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk);
    task->free(stk);
}
int main(int argc, char **argv)
{
    int c1 = init_context(&ctx1);
    int c2 = init_context(&ctx2);

    makecontext(&ctx1, (void (*)()) hello, 1, &ctx2);
    makecontext(&ctx2, (void (*)()) hello, 1, &ctx1);

    swapcontext(&oldc, &ctx1);

    VALGRIND_STACK_DEREGISTER(c1);
    
    VALGRIND_STACK_DEREGISTER(c2);
    

    return 0;
}
Example #3
0
  void FiberStack::free() {
    if(!address_) return;
#ifdef HAVE_VALGRIND_H
    VALGRIND_STACK_DEREGISTER(valgrind_id_);
#endif
    ::free(address_);
    address_ = 0;
  }
Example #4
0
void fiber_free(ACL_FIBER *fiber)
{
#ifdef USE_VALGRIND
	VALGRIND_STACK_DEREGISTER(fiber->vid);
#endif
	if (fiber->context)
		acl_myfree(fiber->context);
	acl_myfree(fiber->buff);
	acl_myfree(fiber);
}
        void stack_allocator_memory::deallocate(stack_context & ctx)
        {
            assert(ctx.sp);
            assert(stack_traits::minimum_size() <= ctx.size);
            assert(stack_traits::is_unbounded() || (stack_traits::maximum_size() >= ctx.size));
            
#if defined(COPP_MACRO_USE_VALGRIND)
            VALGRIND_STACK_DEREGISTER( ctx.valgrind_stack_id);
#endif
        }
static void* child_fn_0 ( void* arg )
{
   grow_the_stack();
   bad_things_below_sp();

   if (setjmp(goback)) {
      describe_many();
   } else
      bad_things_till_guard_page();

   if (shake_with_wrong_registration) {
      // Do whatever stupid things we could imagine
      // with stack registration and see no explosion happens
      // Note: this is executed only if an arg is given to the program.
      // 
      
      const int pgsz = guess_pagesize();
      int stackid;

      fprintf(stderr, "\n\nShaking after unregistering stack\n");
      // Assuming our first stack was automatically registered as nr 1
      VALGRIND_STACK_DEREGISTER(1);
      // Test with no stack registered
      describe_many();

      fprintf(stderr, "\n\nShaking with small stack\n");
      stackid = VALGRIND_STACK_REGISTER((void*) VG_ROUNDDN(&stackid, pgsz),
                                        (void*) VG_ROUNDUP(&stackid, pgsz));
      describe_many();
      VALGRIND_STACK_DEREGISTER(stackid);

      fprintf(stderr, "\n\nShaking with huge stack\n");
      stackid = VALGRIND_STACK_REGISTER((void*) 0x0,
                                        (void*) VG_ROUNDUP(&stackid, 2<<20));
      describe_many();
      VALGRIND_STACK_DEREGISTER(stackid);


   }

   return NULL;
}
        void stack_allocator_malloc::deallocate(stack_context & ctx)
        {
            assert(ctx.sp);
            assert(stack_traits::minimum_size() <= ctx.size);
            assert(stack_traits::is_unbounded() || (stack_traits::maximum_size() >= ctx.size));

#if defined(COPP_MACRO_USE_VALGRIND)
            VALGRIND_STACK_DEREGISTER( ctx.valgrind_stack_id);
#endif
            void* start_ptr = static_cast< char * >(ctx.sp) - ctx.size;
            free(start_ptr);
        }
    void deallocate( stack_context & sctx) {
        BOOST_ASSERT( sctx.sp);
        BOOST_ASSERT( traits_type::minimum_size() <= sctx.size);
        BOOST_ASSERT( traits_type::is_unbounded() || ( traits_type::maximum_size() >= sctx.size) );

#if defined(BOOST_USE_VALGRIND)
        VALGRIND_STACK_DEREGISTER( sctx.valgrind_stack_id);
#endif

        void * vp = static_cast< char * >( sctx.sp) - sctx.size;
        std::free( vp);
    }
    void deallocate( stack_context & sctx) {
        BOOST_ASSERT( sctx.sp);
        BOOST_ASSERT( traits_type::minimum_size() <= sctx.size);
        BOOST_ASSERT( traits_type::is_unbounded() || ( traits_type::maximum_size() >= sctx.size) );

#if defined(BOOST_USE_VALGRIND)
        VALGRIND_STACK_DEREGISTER( sctx.valgrind_stack_id);
#endif

        void * vp = static_cast< char * >( sctx.sp) - sctx.size;
        // conform to POSIX.4 (POSIX.1b-1993, _POSIX_C_SOURCE=199309L)
        ::munmap( vp, sctx.size);
    }
Example #10
0
static void smx_ctx_sysv_free(smx_context_t context)
{

  if (context) {

#ifdef HAVE_VALGRIND_VALGRIND_H
    VALGRIND_STACK_DEREGISTER(((smx_ctx_sysv_t)
                               context)->valgrind_stack_id);
#endif                          /* HAVE_VALGRIND_VALGRIND_H */

  }
  smx_ctx_base_free(context);
}
Example #11
0
File: cr.c Project: jimjag/libdill
/* Gets called when coroutine handle is closed. */
static void dill_cr_close(struct dill_hvfs *vfs) {
    struct dill_ctx_cr *ctx = &dill_getctx->cr;
    struct dill_cr *cr = dill_cont(vfs, struct dill_cr, vfs);
    /* If the coroutine has already finished, we are done. */
    if(!cr->done) {
        /* No blocking calls from this point on. */
        cr->no_blocking1 = 1;
        /* Resume the coroutine if it was blocked. */
        if(!cr->ready.next)
            dill_cancel(cr, ECANCELED);
        /* Wait for the coroutine to stop executing. With no clauses added,
           the only mechanism to resume is through dill_cancel(). This is not
           really a blocking call, although it looks like one. Given that the
           coroutine that is being shut down is not permitted to block, we
           should get control back pretty quickly. */
        cr->closer = ctx->r;
        int rc = dill_wait();
        /* This assertion triggers when coroutine tries to close a bundle that
           it is part of. There's no sane way to handle that so let's just
           crash the process. */
        dill_assert(!(rc == -1 && errno == ECANCELED));
        dill_assert(rc == -1 && errno == 0);
    }
#if defined DILL_CENSUS
    /* Find the first overwritten byte on the stack.
       Determine stack usage based on that. */
    uint8_t *bottom = ((uint8_t*)cr) - cr->stacksz;
    int i;
    for(i = 0; i != cr->stacksz; ++i) {
        if(bottom[i] != 0xa0 + (i % 13)) {
            /* dill_cr is located on the stack so we have to take that into
               account. Also, it may be necessary to align the top of the stack
               to a 16-byte boundary, so add 16 bytes to account for that. */
            size_t used = cr->stacksz - i - sizeof(struct dill_cr) + 16;
            if(used > cr->census->max_stack)
                cr->census->max_stack = used;
            break;
        }
    }
#endif
#if defined DILL_VALGRIND
    VALGRIND_STACK_DEREGISTER(cr->sid);
#endif
    /* Now that the coroutine is finished, deallocate it. */
    if(!cr->mem) dill_freestack(cr + 1);
}
Example #12
0
void
coro_stack_free (struct coro_stack *stack)
{
#if CORO_FIBER
  /* nop */
#else
  #if CORO_USE_VALGRIND
    VALGRIND_STACK_DEREGISTER (stack->valgrind_id);
  #endif

  #if CORO_MMAP
    if (stack->sptr)
      munmap ((void*)((char *)stack->sptr - CORO_GUARDPAGES * PAGESIZE),
              stack->ssze                 + CORO_GUARDPAGES * PAGESIZE);
  #else
    free (stack->sptr);
  #endif
#endif
}
Example #13
0
void st_thread_exit(void *retval)
{
  _st_thread_t *thread = _ST_CURRENT_THREAD();

  thread->retval = retval;
  _st_thread_cleanup(thread);
  _st_active_count--;
  if (thread->term) {
    /* Put thread on the zombie queue */
    thread->state = _ST_ST_ZOMBIE;
    _ST_ADD_ZOMBIEQ(thread);

    /* Notify on our termination condition variable */
    st_cond_signal(thread->term);

    /* Switch context and come back later */
    _ST_SWITCH_CONTEXT(thread);

    /* Continue the cleanup */
    st_cond_destroy(thread->term);
    thread->term = NULL;
  }

#ifdef DEBUG
  _ST_DEL_THREADQ(thread);
#endif

#ifndef NVALGRIND
  if (!(thread->flags & _ST_FL_PRIMORDIAL)) {
    VALGRIND_STACK_DEREGISTER(thread->stack->valgrind_stack_id);
  }
#endif

  if (!(thread->flags & _ST_FL_PRIMORDIAL)) {
    _st_stack_free(thread->stack);
  }

  /* Find another thread to run */
  _ST_SWITCH_CONTEXT(thread);
  free(thread);
  (*_st_eventsys->free)();
}
Example #14
0
void destroy_coro(Worker * c) {
  total_coros--;
#ifdef ENABLE_VALGRIND
  if( c->valgrind_stack_id != -1 ) {
    VALGRIND_STACK_DEREGISTER( c->valgrind_stack_id );
  }
#endif
  if( c->base != NULL ) {
    // disarm guard page
    checked_mprotect( c->base, 4096, PROT_READ | PROT_WRITE );
    checked_mprotect( (char*)c->base + c->ssize + 4096, 4096, PROT_READ | PROT_WRITE );
#ifdef CORO_PROTECT_UNUSED_STACK
    // enable writes to stack so we can deallocate
    checked_mprotect( (void*)((intptr_t)c->base + 4096), c->ssize, PROT_READ | PROT_WRITE );
    checked_mprotect( (void*)(c), 4096, PROT_READ | PROT_WRITE );
#endif
    remove_coro(c); // remove from debugging list of coros
    Grappa::impl::locale_shared_memory.deallocate(c->base);
  }
}
Example #15
0
//fonction appelée à la terminaison du programme pour libérer la mémoire
void threads_destroy()
{
	thread_t item, tmp_item;
	free(return_t.uc_stack.ss_sp);

	for (item = TAILQ_FIRST(&(threadList.list)); item != NULL; item = tmp_item)
	{
		tmp_item = TAILQ_NEXT(item, entries);
		/* Retire l'élément de la liste*/
		TAILQ_REMOVE(&(threadList.list), item, entries);

		/* Libère l'espace alloué */
		VALGRIND_STACK_DEREGISTER(item->valgrind_stackid);
		//free(item->context.uc_stack.ss_sp);
		free(item);
	}

   	for (item = TAILQ_FIRST(&(threadList.list_sleeping)); item != NULL; item = tmp_item)
	{
		tmp_item = TAILQ_NEXT(item, entries);
		/* Retire l'élément de la liste*/
		TAILQ_REMOVE(&(threadList.list_sleeping), item, entries);

		/* Libère l'espace alloué */
		free(item->context.uc_stack.ss_sp);
		free(item);
	}

	for (item = TAILQ_FIRST(&(threadList.list_dead)); item != NULL; item = tmp_item)
	{
		tmp_item = TAILQ_NEXT(item, entries);
		/* Retire l'élément de la liste*/
		TAILQ_REMOVE(&(threadList.list_dead), item, entries);

		/* Libère l'espace alloué */
		free(item->context.uc_stack.ss_sp);
		free(item);
	}
}
Example #16
0
static void startup(void *stack_top, unsigned long long stack_id, void (*func)(void *args), void *args)
{
    clear(&trash);
    func(args);
    trash = stack_top;

#ifdef __VALGRIND_H
    VALGRIND_STACK_DEREGISTER(stack_id);
#endif

    if((--task_count == 0) && ucp_main)
        setcontext(ucp_main);

    if(ucp_next)
    {
        ucontext_t *ucp = ucp_next;
        ucp_next = ucp->uc_link;
        setcontext(ucp);
    }

    struct epoll_event event;
    while(epoll_wait(epfd, &event, 1, -1) != 1);
    setcontext(event.data.ptr);
}
static inline void valgrind_stack_deregister(CoroutineUContext *co)
{
    VALGRIND_STACK_DEREGISTER(co->valgrind_stack_id);
}
Example #18
0
int thread_join(thread_t thread, void **retval) {

    int found = 0;
    unsigned int i;
    for(i = 0; i < g_list_length(ready_list); i++) {
	thread_t t = g_list_nth_data(ready_list, i);
	if(thread == t)
	    found = 1;
	else {
	    if(g_list_find(t->sleeping_list, thread) != NULL)
		found = 1;
	}
    }

    if (found){

	thread_t next, current = g_list_nth_data(ready_list, 0);

	ready_list = g_list_remove(ready_list, current);

	thread->sleeping_list = g_list_append(thread->sleeping_list, current);

	next = g_list_nth_data(ready_list, 0);

	if(swapcontext(&current->uc, &next->uc) == -1)
	    return -1;

	*retval = current->retval;
	
	thread_sigTreat(current);

	if (g_list_index(zombie_list, thread) != -1){
	    zombie_list = g_list_remove(zombie_list,thread);
	    free(thread->uc.uc_stack.ss_sp);
	    /* juste avant de libérer la pile */
	    VALGRIND_STACK_DEREGISTER(thread->stackid);

	    free(thread);

	}

	thread_t cur_t =  g_list_nth_data(ready_list, 0);
	if(g_list_length(ready_list)==1 && g_list_length(cur_t->sleeping_list)==0){
	    /* fprintf(stderr, "Total Annihilation\n"); */

	    g_list_free(cur_t->sleeping_list);

	    free(cur_t);

	    g_list_free(ready_list);
	    ready_list=NULL;
	}
    }
    else if (g_list_index(zombie_list,thread)!=-1){

	thread_t waiter = g_list_nth_data(zombie_list,(g_list_index(zombie_list,
								    thread)));
	*retval = waiter->retval;
	zombie_list = g_list_remove(zombie_list,thread);
	free(thread->uc.uc_stack.ss_sp);
	/* juste avant de libérer la pile */
	VALGRIND_STACK_DEREGISTER(thread->stackid);
	/* free(thread->retval); */
	free(thread);
    }
    else {
	*retval = NULL;
	fprintf(stderr, "le thread %p n'existe pas\n", thread);
	return -1;
    }
    return 0;
}
Example #19
0
void fiberYield()
{
	/* If we are in a fiber, switch to the main context */
	if ( inFiber )
	{
		/* Store the current state */
		if ( setjmp( fiberList[ currentFiber ].context ) )
		{
			/* Returning via longjmp (resume) */
			LF_DEBUG_OUT1( "Fiber %d resuming...", currentFiber );
		}
		else
		{
			LF_DEBUG_OUT1( "Fiber %d yielding the processor...", currentFiber );
			/* Saved the state: Let's switch back to the main state */
			longjmp( mainContext, 1 );
		}
	}
	/* If we are in main, dispatch the next fiber */
	else
	{
		if ( numFibers == 0 ) return;
	
		/* Save the current state */
		if ( setjmp( mainContext ) )
		{
			/* The fiber yielded the context to us */
			inFiber = 0;
			if ( ! fiberList[currentFiber].active )
			{
				/* If we get here, the fiber returned and is done! */
				LF_DEBUG_OUT1( "Fiber %d returned, cleaning up.", currentFiber );
				
				free( fiberList[currentFiber].stack );
#ifdef VALGRIND
				VALGRIND_STACK_DEREGISTER(fiberList[currentFiber].stackId);
#endif
				
				/* Swap the last fiber with the current, now empty, entry */
				-- numFibers;
				if ( currentFiber != numFibers )
				{
					fiberList[ currentFiber ] = fiberList[ numFibers ];
				}
				
				/* Clean up the entry */
				fiberList[numFibers].stack = 0;
				fiberList[numFibers].function = 0;
				fiberList[numFibers].active = 0;
			}
			else
			{
				LF_DEBUG_OUT1( "Fiber %d yielded execution.", currentFiber );
			}
		}
		else
		{
			/* Saved the state so call the next fiber */
			currentFiber = (currentFiber + 1) % numFibers;
			
			LF_DEBUG_OUT1( "Switching to fiber %d", currentFiber );
			inFiber = 1;
			longjmp( fiberList[ currentFiber ].context, 1 );
		}
	}
	
	return;
}
Example #20
0
	static void valgrind_unregister(const int64_t id){
#ifdef VALGRIND_CHECK_ENABLE
		VALGRIND_STACK_DEREGISTER(id);
#endif
	}
Example #21
0
void
deregister_valgrind_stack(stk_seg *stk) {
    VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
}
Example #22
0
void
rust_valgrind_stack_deregister(unsigned int id) {
  VALGRIND_STACK_DEREGISTER(id);
}
Example #23
0
/// Deregister a stack with valgrind so that it can be reused.
static void _deregister(ustack_t *thread) {
  VALGRIND_STACK_DEREGISTER(thread->stack_id);
}