Esempio n. 1
0
static void barrier_do_broadcast(struct barrier_s *barrier)
{
	register uint_t tm_first;
	register uint_t tm_last;
	register uint_t tm_start;
	register uint_t wqdbsz;
	register uint_t tm_end;
	register uint_t ticket;
	register uint_t index;
	register uint_t count;
	register uint_t event;
	register void  *listner;
	register wqdb_t *wqdb;
	register uint_t i;
 
	tm_start = cpu_time_stamp();
	tm_first = barrier->tm_first;
	tm_last  = barrier->tm_last;
	wqdbsz   = PMM_PAGE_SIZE / sizeof(wqdb_record_t);
	ticket   = 0;

#if ARCH_HAS_BARRIERS
	count    = barrier->count;
#else
	count    = barrier->count - 1;	/* last don't sleep */
#endif

	for(index = 0; ((index < BARRIER_WQDB_NR) && (ticket < count)); index++)
	{
		wqdb = barrier->wqdb_tbl[index];

		for(i = 0; ((i < wqdbsz) && (ticket < count)); i++)
		{

#if CONFIG_BARRIER_BORADCAST_UREAD
			event   = cpu_uncached_read(&wqdb->tbl[i].event);
			listner = (void*) cpu_uncached_read(&wqdb->tbl[i].listner);
#else
			event   = wqdb->tbl[i].event;
			listner = wqdb->tbl[i].listner;
#endif

			if(listner != NULL)
			{
				wqdb->tbl[i].listner = NULL;
#if CONFIG_USE_SCHED_LOCKS
				sched_wakeup((struct thread_s*) listner);
#else
				sched_event_send(listner, event);
#endif
				ticket ++;
			}
		}
	}

	tm_end = cpu_time_stamp();

	printk(INFO, "INFO: %s: cpu %d [F: %d, L: %d, B: %d, E: %d, T: %d]\n",
	       __FUNCTION__,
	       cpu_get_id(),
	       tm_first, 
	       tm_last, 
	       tm_start,
	       tm_end,
	       tm_end - tm_first);
}
Esempio n. 2
0
/* 
 * FIXME: define spinlock_rdlock() so all locking on task->th_lock 
 * becoms rdlock but on join/detach/destroy 
 */
int sys_thread_wakeup(pthread_t tid, pthread_t *tid_tbl, uint_t count)
{
	struct task_s *task;
	struct thread_s *this;
	struct thread_s *target;
	pthread_t tbl[100];
	void *listner;
	uint_t event;
	sint_t i;
	error_t err;

	this = current_thread;
	task = this->task;
	i = -1;

	if(tid_tbl != NULL)
	{
		if((((uint_t)tid_tbl + (count*sizeof(pthread_t))) >= CONFIG_KERNEL_OFFSET) || 
		   (count == 0) || (count > 100))
		{
			err = -1;
			goto fail_tid_tbl;
		}

		if((err = cpu_uspace_copy(&tbl[0], tid_tbl, sizeof(pthread_t*) * count))) 
			goto fail_usapce;

		if(tbl[0] != tid)
		{
			err = -2;
			goto fail_first_tid;
		}
	}
	else
	{
		count = 1;
		tbl[0] = tid;
	}

	for(i = 0; i < count; i++)
	{
		tid = tbl[i];

		if(tid > task->max_order)
		{
			err = -3;
			goto fail_tid;
		}

		target = task->th_tbl[tid];
   
		if((target == NULL) || (target->signature != THREAD_ID))
		{
			err = -4;
			goto fail_target;
		}

		listner = sched_get_listner(target, SCHED_OP_UWAKEUP);
		event = sched_event_make(target,SCHED_OP_UWAKEUP);
    
		if(this->info.isTraced == true)
		{
			printk(INFO,"%s: tid %d --> tid %d [%d][%d]\n", 
			       __FUNCTION__, 
			       this->info.order, 
			       tid, 
			       cpu_time_stamp(),
			       i);
		}

		sched_event_send(listner,event);
		cpu_wbflush();
	}

	return 0;

fail_target:
fail_tid:
fail_first_tid:
fail_usapce:
fail_tid_tbl:

	printk(INFO, "%s: cpu %d, pid %d, tid %x, i %d, count %d, ttid %x, request has failed with err %d [%d]\n",
	       __FUNCTION__,
	       cpu_get_id(),
	       task->pid,
	       this,
	       i,
	       count,
	       tid,
	       err,
	       cpu_time_stamp());
  
	this->info.errno = EINVAL;
	return -1;
}