Ejemplo n.º 1
0
void threads_initialize(void)
{
	int i;
	struct thread *t;
	u8 *stack_top;
	struct cpu_info *ci;
	u8 *thread_stacks;

	thread_stacks = arch_get_thread_stackbase();

	/* Initialize the BSP thread first. The cpu_info structure is assumed
	 * to be just under the top of the stack. */
	t = &all_threads[0];
	ci = cpu_info();
	ci->thread = t;
	t->stack_orig = (uintptr_t)ci;
	t->id = 0;

	stack_top = &thread_stacks[CONFIG_STACK_SIZE] - sizeof(struct cpu_info);
	for (i = 1; i < TOTAL_NUM_THREADS; i++) {
		t = &all_threads[i];
		t->stack_orig = (uintptr_t)stack_top;
		t->id = i;
		stack_top += CONFIG_STACK_SIZE;
		free_thread(t);
	}

	idle_thread_init();
}
Ejemplo n.º 2
0
void test_add_thread()
{
    Board *board;
    Thread *thread;

    board = new_board("test", "TEST", 2, 0);
    assert(board != NULL);
    thread = new_thread(NULL, 0, 0);
    /* Add the first thread */
    add_thread_to_board(board, thread);
    assert(board->first_thread == 0);
    assert(board->last_thread == 0);
    /* Add a second thread */
    add_thread_to_board(board, thread);
    assert(board->first_thread == 0);
    assert(board->last_thread == 1);
    /* Make sure the post count hasn't magically changed */
    assert(board->post_count == 0);
    /* Add a third thread, should remove the first one */
    add_thread_to_board(board, thread);
    assert(board->first_thread == 1);
    assert(board->last_thread == 0);
    /* Check post count again */
    assert(board->post_count == 0);

    free_board(board);
    free_thread(thread);
}
Ejemplo n.º 3
0
int
_papi_hwi_initialize_thread( ThreadInfo_t ** dest, int tid )
{
	int retval;
	ThreadInfo_t *thread;
	int i;

	if ( ( thread = allocate_thread( tid  ) ) == NULL ) {
		*dest = NULL;
		return PAPI_ENOMEM;
	}

	/* Call the substrate to fill in anything special. */

	for ( i = 0; i < papi_num_components; i++ ) {
		retval = _papi_hwd[i]->init( thread->context[i] );
		if ( retval ) {
			free_thread( &thread );
			*dest = NULL;
			return retval;
		}
	}

	insert_thread( thread, tid );

	*dest = thread;
	return PAPI_OK;
}
Ejemplo n.º 4
0
int thread_join(thread_t *t, void **ret)
{
  if (t == NULL)
    return_errno(FALSE, EINVAL);
  if ( !( t->joinable ) )
    return_errno(FALSE, EINVAL);

  assert(t->state != GHOST);

  // A thread can be joined only once
  if (t->join_thread)   
    return_errno(FALSE, EACCES);   
  t->join_thread = current_thread;

  // Wait for the thread to complete
  tdebug( "**** thread state: %d\n" ,t->state);
  if (t->state != ZOMBIE) {
  	CAP_SET_SYSCALL();
    thread_suspend_self(0);
    CAP_CLEAR_SYSCALL();
  }

  // clean up the dead thread
  if (ret != NULL) 
    *ret = t->ret;
  free_thread( t );

  return TRUE;
}
Ejemplo n.º 5
0
void BrickFactory::FreeAllSurfaces() {
	for (auto it = brick_map_.begin(); it != brick_map_.end(); ++it) {
		// safe to const cast as surface is not changed
		// SDL_FreeSurface is not const enabled
		// Nothing is relying on these anymore so they can be freed concurrently
		std::thread free_thread(
				SDL_FreeSurface, const_cast<SDL_Surface*>(it->second.GetBrickSurface()));
		free_thread.detach();
	}
}
Ejemplo n.º 6
0
int test_thread_full(void* data) {
    (void) data;
    int status = 1;
    /* create dummy post */
    Post *post = new_post("title", "name", "text", 0, 0, 0);
    Thread *thread = new_thread(NULL, 1, 0);
    add_post_to_thread(thread, post);
    add_post_to_thread(thread, post);
    assert(thread->nreplies == 1);
    free_thread(thread);
    free_post(post);
    return status;
}
Ejemplo n.º 7
0
/* Creates and empty thread. */
int test_empty_thread(void *data)
{
    (void) data;
    int status = 1; /* test is correct */
    Thread *thread = new_thread(NULL, 1, 0);
    /* Test that values have correctly been set */
    assert(thread != NULL);
    assert(thread->replies != NULL);
    assert(thread->op == NULL);
    assert(thread->max_replies == 1);
    assert(thread->first_post == -1);
    assert(thread->last_post == -1);
    assert(thread->nreplies == 0);
    free_thread(thread);
    return status;
}
Ejemplo n.º 8
0
int test_add_post(void *data)
{
    (void) data;
    int status = 1;
    /* create posts */
    Post *op = new_post("nameOP", "titleOP", "textOP", 0, -1, 0);
    Post *reply = new_post("name", "title", "text", 1, 0, 0);
    Thread *thread = new_thread(op, 150, 0);
    add_post_to_thread(thread, reply);
    assert(thread->nreplies == 1);
    assert(thread->first_post == 0);
    assert(thread->last_post == 0);
    free_thread(thread);
    free_post(op);
    free_post(reply);
    return status;
}
Ejemplo n.º 9
0
void free_board(Board *board)
{
    int i, j;
    if (board->flags & BOARD_AUTO_FREE) {
        for (i = 0; i < board->max_threads; ++i) {
            free_post(board->threads[i]->op);
            for (j = 0; j < board->threads[i]->max_replies; ++j) {
                if (board->threads[i]->replies[j] != 0) {
                    free_post(board->threads[i]->replies[j]);
                }
            }
            free_thread(board->threads[i]);
        }
    }
    free(board->threads);
    free(board);
}
Ejemplo n.º 10
0
static void *pthread_handler(void *data)
{
	struct pthread_arg *parg = (struct pthread_arg *)data;
	ThreadPool *tp = parg->tp;
	Pthread_t *th = parg->th;

	sem_post(&parg->sem);

	while (1) {
		pthread_mutex_lock(&th->lock);

		if (th->handler)
			th->handler(th->arg);

		free_thread(tp, th);
	}
	return NULL;
}
Ejemplo n.º 11
0
int
_papi_hwi_shutdown_thread( ThreadInfo_t * thread )
{
	int retval = PAPI_OK;
	unsigned long tid;
	int i, failure = 0;

	if ( _papi_hwi_thread_id_fn )
		tid = ( *_papi_hwi_thread_id_fn ) (  );
	else
		tid = ( unsigned long ) getpid(  );

        THRDBG("Want to shutdown thread %ld, alloc %ld, our_tid: %ld\n",
	       thread->tid,
	       thread->allocator_tid,
	       tid);

	if ((thread->tid==tid) || ( thread->allocator_tid == tid )) {

                _papi_hwi_thread_free_eventsets(tid);

		remove_thread( thread );
		THRDBG( "Shutting down thread %ld at %p\n", thread->tid, thread );
		for ( i = 0; i < papi_num_components; i++ ) {
			retval = _papi_hwd[i]->shutdown( thread->context[i] );
			if ( retval != PAPI_OK )
				failure = retval;
		}
		free_thread( &thread );
		return ( failure );
	}

	THRDBG( "Skipping shutdown thread %ld at %p, thread %ld not allocator!\n",
			thread->tid, thread, tid );
	return PAPI_EBUG;
}
Ejemplo n.º 12
0
static void terminate_thread(struct thread *t)
{
	free_thread(t);
	schedule(NULL);
}
Ejemplo n.º 13
0
struct node *make_node(int id, const char *hostname,
		const char *local_ip, const char *remote_ip,
		const char *local_port, const char *remote_port)
{
	struct node *node;
	struct queue *q;
	struct thread *thr;

	node = alloc_node();
	if(node == NULL) {
		return NULL;
	}

	node->id = id;
	strncpy(node->hostname, hostname, MAX_HOSTNAME_LEN);
	strncpy(node->local_ip, local_ip, MAX_IPADDR_LEN);
	strncpy(node->remote_ip, remote_ip, MAX_IPADDR_LEN);
	strncpy(node->local_port, local_port, MAX_PORT_LEN);
	strncpy(node->remote_port, remote_port, MAX_PORT_LEN);
	node->data_handler = node_data_handler;
	node->meta_handler = node_meta_handler;

	q = init_queue();
	if(q == NULL) {
		free_node(node);
		return NULL;
	}
	node->data_q = q;

	q = init_queue();
	if(q == NULL) {
		free_queue(node->data_q);
		free_node(node);
		return NULL;
	}
	node->meta_q = q;

	q = init_queue();
	if(q == NULL) {
		free_queue(node->data_q);
		free_queue(node->meta_q);
		free_node(node);
		return NULL;
	}
	node->work_q = q;

	thr = create_thread(node_data_worker_function, node);
	if(thr == NULL) {
		goto err_data;
	}
	node->data_worker = thr;

	thr = create_thread(node_meta_worker_function, node);
	if(thr == NULL) {
		goto err_meta;
	}
	node->meta_worker = thr;

	return node;

err_worker:
	free_thread(node->meta_worker);

err_meta:
	free_thread(node->data_worker);

err_data:
	free_queue(node->data_q);
	free_queue(node->meta_q);
	free_node(node);

	return NULL;
}
Ejemplo n.º 14
0
/**
 * Main scheduling loop
 **/
static void* do_scheduler(void *arg)
{
  static cpu_tick_t next_poll=0, next_overload_check=0, next_info_dump=0, next_graph_stats=0, now=0;
  static int pollcount=1000;
  static int init_done = 0;

  (void) arg;  // suppress GCC "unused parameter" warning

  in_scheduler = 1;

  // make sure we start out by saving edge stats for a while
  if( !init_done ) {
    init_done = 1;
    if (conf_no_statcollect) 
      bg_save_stats = 0;
    else
      bg_save_stats = 1;
    
    GET_REAL_CPU_TICKS( now );
    next_graph_stats = now + 1 * ticks_per_second;
    
    start_timer(&scheduler_timer);
  }

  while( 1 ) {

    //current_thread = scheduler_thread;
    sanity_check_threadcounts();
    sanity_check_io_stats();

    // wake up threads that have timeouts
    sleepq_check(0);   
    sanity_check_threadcounts();

    // break out if there are only daemon threads
    if(unlikely (num_suspended_threads == 0  &&  num_runnable_threads == num_daemon_threads)) {
      // dump the blocking graph
      if( exit_func_done && conf_dump_blocking_graph ) {
        tdebug("dumping blocking graph from do_scheduler()\n");
        dump_blocking_graph(); 
      }
        
      // go back to mainthread, which should now be in exit_func()
      current_thread = main_thread;
      in_scheduler = 0;
      co_call(main_thread->coro, NULL);
      in_scheduler = 1;

      if( unlikely(current_thread_exited) ) {     // free memory from deleted threads
        current_thread_exited=0;
        if (current_thread != main_thread) // main_thread is needed for whole program exit
          free_thread( current_thread );
      }
        
      return NULL;
    }


    // cheesy way of handling things with timing requirements
    {
      GET_REAL_CPU_TICKS( now );
        
      // toggle stats collection
      if( conf_no_statcollect == 0 && next_graph_stats < now ) {
        bg_save_stats = 1 - bg_save_stats;

        if( bg_save_stats ) { 
          // record stats for 100 ms
          next_graph_stats = now + 100 * ticks_per_millisecond;
            
          // update the stats epoch, to allow proper handling of the first data items
          bg_stats_epoch++;
        }            
        else {
          // avoid stats for 2000 ms
          next_graph_stats = now + 2000 * ticks_per_millisecond;
        }
        //output(" *********************** graph stats %s\n", bg_save_stats ? "ON" : "OFF" );
      }
        
      // resource utalization
      if( unlikely (next_overload_check < now) ) {
        check_overload( now );
        next_overload_check = now + OVERLOAD_CHECK_INTERVAL;
      }

      // poll
      if( likely( (int)io_polling_func) ) {
        if( num_runnable_threads==0  ||  --pollcount <= 0  ||  next_poll < now ) {
          //if( num_runnable_threads==0 ) {
          // poll
          long long timeout = 0;

          if( num_runnable_threads==0 ) {
            if (first_wake_usecs == 0) {
              timeout = -1;
            } else {
              // there are threads in the sleep queue
              // so poll for i/o till at most that time
              unsigned long long now;
              now = current_usecs();
	      tdebug ("first_wake: %lld, now: %lld\n", first_wake_usecs, now);
              if (first_wake_usecs > now)
                timeout = first_wake_usecs - now;
            }
          }

          stop_timer(&scheduler_timer);
          //if( timeout != -1 )  output("timeout is not zero\n");
          io_polling_func( timeout ); // allow blocking
          start_timer(&scheduler_timer);
          sanity_check_threadcounts();


#ifndef USE_NIO
          // sleep for a bit, if there was nothing to do
          // FIXME: let the IO functions block instead??
          if( num_runnable_threads == 0 ) {
            syscall(SYS_sched_yield);
          }
#endif

          // vary the poll rate depending on the workload
#if 0
          if( num_runnable_threads < 5 ) {
            next_poll = now + (10*ticks_per_millisecond);
            pollcount = 1000;
          } else if( num_runnable_threads < 10 ) {
            next_poll = now + (50*ticks_per_millisecond);
            pollcount = 2000;
          } else {
            next_poll = now + (100*ticks_per_millisecond);
            pollcount = 3000;
          }
#else
          next_poll = now + (ticks_per_millisecond << 13);
	  pollcount = 10000;

#endif
        }
      }

      // debug stats
      if( 0 && next_info_dump < now ) {
        dump_debug_info();
        next_info_dump = now + 5 * ticks_per_second;
      }

    }

    // get the head of the run list
    current_thread = sched_next_thread();

    // scheduler gave an invlid even though there are runnable
    // threads.  This indicates that every runnable thead is likely to
    // require use of an overloaded resource. 
    if( !valid_thread(current_thread) ) {
      pollcount = 0;
      continue;
    }

    // barf, if the returned thread is still on the sleep queue
    assert( current_thread->sleep == -1 );

    tdebug("running TID %d (%s)\n", current_thread->tid, current_thread->name ? current_thread->name : "no name");

    sanity_check_threadcounts();


    // call thread
    stop_timer(&scheduler_timer);
    start_timer(&app_timer);
    in_scheduler = 0;
    co_call(current_thread->coro, NULL);
    in_scheduler = 1;
    stop_timer(&app_timer);
    start_timer(&scheduler_timer);

    if( unlikely(current_thread_exited) ) {     // free memory from deleted threads
      current_thread_exited=0;
      if (current_thread != main_thread) // main_thread is needed for whole program exit
        free_thread( current_thread );
    }

#ifdef NO_SCHEDULER_THREAD
    return NULL;
#endif
  }

  return NULL;
}
Ejemplo n.º 15
0
/*
 * Some notes on new thread creation and first time initializion
 * to enable multi-threading.
 *
 * There are basically two things that need to be done.
 *
 *   1) The internal library variables must be initialized.
 *   2) Upcalls need to be enabled to allow multiple threads
 *      to be run.
 *
 * The first may be done as a result of other pthread functions
 * being called.  When _thr_initial is null, _libpthread_init is
 * called to initialize the internal variables; this also creates
 * or sets the initial thread.  It'd be nice to automatically
 * have _libpthread_init called on program execution so we don't
 * have to have checks throughout the library.
 *
 * The second part is only triggered by the creation of the first
 * thread (other than the initial/main thread).  If the thread
 * being created is a scope system thread, then a new KSE/KSEG
 * pair needs to be allocated.  Also, if upcalls haven't been
 * enabled on the initial thread's KSE, they must be now that
 * there is more than one thread; this could be delayed until
 * the initial KSEG has more than one thread.
 */
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread *curthread, *new_thread;
	struct kse *kse = NULL;
	struct kse_group *kseg = NULL;
	kse_critical_t crit;
	int ret = 0;

	if (_thr_initial == NULL)
		_libpthread_init(NULL);

	/*
	 * Turn on threaded mode, if failed, it is unnecessary to
	 * do further work.
	 */
	if (_kse_isthreaded() == 0 && _kse_setthreaded(1)) {
		return (EAGAIN);
	}
	curthread = _get_curthread();

	/*
	 * Allocate memory for the thread structure.
	 * Some functions use malloc, so don't put it
	 * in a critical region.
	 */
	if ((new_thread = _thr_alloc(curthread)) == NULL) {
		/* Insufficient memory to create a thread: */
		ret = EAGAIN;
	} else {
		/* Check if default thread attributes are required: */
		if (attr == NULL || *attr == NULL)
			/* Use the default thread attributes: */
			new_thread->attr = _pthread_attr_default;
		else {
			new_thread->attr = *(*attr);
			if ((*attr)->sched_inherit == PTHREAD_INHERIT_SCHED) {
				/* inherit scheduling contention scop */
				if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
					new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
				else
					new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
				/*
				 * scheduling policy and scheduling parameters will be
				 * inherited in following code.
				 */
			}
		}
		if (_thread_scope_system > 0)
			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
		else if ((_thread_scope_system < 0)
		    && (thread != &_thr_sig_daemon))
			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
		if (create_stack(&new_thread->attr) != 0) {
			/* Insufficient memory to create a stack: */
			ret = EAGAIN;
			_thr_free(curthread, new_thread);
		}
		else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
		    (((kse = _kse_alloc(curthread, 1)) == NULL)
		    || ((kseg = _kseg_alloc(curthread)) == NULL))) {
			/* Insufficient memory to create a new KSE/KSEG: */
			ret = EAGAIN;
			if (kse != NULL) {
				kse->k_kcb->kcb_kmbx.km_flags |= KMF_DONE;
				_kse_free(curthread, kse);
			}
			free_stack(&new_thread->attr);
			_thr_free(curthread, new_thread);
		}
		else {
			if (kseg != NULL) {
				/* Add the KSE to the KSEG's list of KSEs. */
				TAILQ_INSERT_HEAD(&kseg->kg_kseq, kse, k_kgqe);
				kseg->kg_ksecount = 1;
				kse->k_kseg = kseg;
				kse->k_schedq = &kseg->kg_schedq;
			}
			/*
			 * Write a magic value to the thread structure
			 * to help identify valid ones:
			 */
			new_thread->magic = THR_MAGIC;

			new_thread->slice_usec = -1;
			new_thread->start_routine = start_routine;
			new_thread->arg = arg;
			new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
			    PTHREAD_CANCEL_DEFERRED;

			/* No thread is wanting to join to this one: */
			new_thread->joiner = NULL;

			/*
			 * Initialize the machine context.
			 * Enter a critical region to get consistent context.
			 */
			crit = _kse_critical_enter();
			THR_GETCONTEXT(&new_thread->tcb->tcb_tmbx.tm_context);
			/* Initialize the thread for signals: */
			new_thread->sigmask = curthread->sigmask;
			_kse_critical_leave(crit);

			new_thread->tcb->tcb_tmbx.tm_udata = new_thread;
			new_thread->tcb->tcb_tmbx.tm_context.uc_sigmask =
			    new_thread->sigmask;
			new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size =
			    new_thread->attr.stacksize_attr;
			new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp =
			    new_thread->attr.stackaddr_attr;
			makecontext(&new_thread->tcb->tcb_tmbx.tm_context,
			    (void (*)(void))thread_start, 3, new_thread,
			    start_routine, arg);
			/*
			 * Check if this thread is to inherit the scheduling
			 * attributes from its parent:
			 */
			if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
				/*
				 * Copy the scheduling attributes.
				 * Lock the scheduling lock to get consistent
				 * scheduling parameters.
				 */
				THR_SCHED_LOCK(curthread, curthread);
				new_thread->base_priority =
				    curthread->base_priority &
				    ~THR_SIGNAL_PRIORITY;
				new_thread->attr.prio =
				    curthread->base_priority &
				    ~THR_SIGNAL_PRIORITY;
				new_thread->attr.sched_policy =
				    curthread->attr.sched_policy;
				THR_SCHED_UNLOCK(curthread, curthread);
			} else {
				/*
				 * Use just the thread priority, leaving the
				 * other scheduling attributes as their
				 * default values:
				 */
				new_thread->base_priority =
				    new_thread->attr.prio;
			}
			new_thread->active_priority = new_thread->base_priority;
			new_thread->inherited_priority = 0;

			/* Initialize the mutex queue: */
			TAILQ_INIT(&new_thread->mutexq);

			/* Initialise hooks in the thread structure: */
			new_thread->specific = NULL;
			new_thread->specific_data_count = 0;
			new_thread->cleanup = NULL;
			new_thread->flags = 0;
			new_thread->tlflags = 0;
			new_thread->sigbackout = NULL;
			new_thread->continuation = NULL;
			new_thread->wakeup_time.tv_sec = -1;
			new_thread->lock_switch = 0;
			sigemptyset(&new_thread->sigpend);
			new_thread->check_pending = 0;
			new_thread->locklevel = 0;
			new_thread->rdlock_count = 0;
			new_thread->sigstk.ss_sp = 0;
			new_thread->sigstk.ss_size = 0;
			new_thread->sigstk.ss_flags = SS_DISABLE;
			new_thread->oldsigmask = NULL;

			if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
				new_thread->state = PS_SUSPENDED;
				new_thread->flags = THR_FLAGS_SUSPENDED;
			}
			else
				new_thread->state = PS_RUNNING;

			/*
			 * System scope threads have their own kse and
			 * kseg.  Process scope threads are all hung
			 * off the main process kseg.
			 */
			if ((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) {
				new_thread->kseg = _kse_initial->k_kseg;
				new_thread->kse = _kse_initial;
			}
			else {
				kse->k_curthread = NULL;
				kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
				new_thread->kse = kse;
				new_thread->kseg = kse->k_kseg;
				kse->k_kcb->kcb_kmbx.km_udata = kse;
				kse->k_kcb->kcb_kmbx.km_curthread = NULL;
			}

			/*
			 * Schedule the new thread starting a new KSEG/KSE
			 * pair if necessary.
			 */
			ret = _thr_schedule_add(curthread, new_thread);
			if (ret != 0)
				free_thread(curthread, new_thread);
			else {
				/* Return a pointer to the thread structure: */
				(*thread) = new_thread;
			}
		}
	}

	/* Return the status: */
	return (ret);
}