Example #1
0
error_t barrier_wait(struct barrier_s *barrier)
{
	register uint_t event;
	register void *listner;
	register uint_t ticket;
	register uint_t index;
	register uint_t wqdbsz;
	register wqdb_t *wqdb;
	register struct thread_s *this;
	uint_t irq_state;
	uint_t tm_now;

	tm_now = cpu_time_stamp(); 
	this   = current_thread;
	index  = this->info.order;

	if((barrier->signature != BARRIER_ID) || ((barrier->owner != NULL) && (barrier->owner != this->task)))
		return EINVAL;

	wqdbsz  = PMM_PAGE_SIZE / sizeof(wqdb_record_t);
	wqdb    = barrier->wqdb_tbl[index / wqdbsz];

#if !(CONFIG_USE_SCHED_LOCKS)
	event   = sched_event_make (this, SCHED_OP_WAKEUP);
	listner = sched_get_listner(this, SCHED_OP_WAKEUP);
#else
	listner = (void*)this;
#endif

	wqdb->tbl[index % wqdbsz].event   = event;
	wqdb->tbl[index % wqdbsz].listner = listner;

#if CONFIG_BARRIER_ACTIVE_WAIT
	register uint_t current_phase;
	current_phase = barrier->phase;
#endif	/* CONFIG_BARRIER_ACTIVE_WAIT */

	cpu_disable_all_irq(&irq_state);

	ticket = arch_barrier_wait(barrier->cluster, barrier->hwid);

	cpu_restore_irq(irq_state);

	if(ticket < 0) return EINVAL;

	if(ticket == barrier->count)
		barrier->tm_first = tm_now;

	else if(ticket == 1)
		barrier->tm_last  = tm_now;

#if CONFIG_BARRIER_ACTIVE_WAIT
	while(cpu_uncached_read(&barrier->state[current_phase]) == 0)
		sched_yield(this);
#else
	sched_sleep(this);
#endif	/* CONFIG_BARRIER_ACTIVE_WAIT */

	return (ticket == 1) ? PTHREAD_BARRIER_SERIAL_THREAD : 0;
}
Example #2
0
/* TODO: reintroduce barrier's ops to deal with case-specific treatment */
error_t barrier_wait(struct barrier_s *barrier)
{
	register uint_t ticket;
	register uint_t index;
	register uint_t wqdbsz;
	register wqdb_t *wqdb;
	register bool_t isShared;
	struct thread_s *this;
	uint_t tm_now;

	tm_now   = cpu_time_stamp();
	this     = current_thread;
	index    = this->info.order;
	ticket   = 0;
	isShared = (barrier->owner == NULL) ? true : false;

	if((barrier->signature != BARRIER_ID) || ((isShared == false) && (barrier->owner != this->task)))
		return EINVAL;

	wqdbsz = PMM_PAGE_SIZE / sizeof(wqdb_record_t);

	if(isShared)
	{
		spinlock_lock(&barrier->lock);
		index  = barrier->index ++;
		ticket = barrier->count - index;
	}

	wqdb   = barrier->wqdb_tbl[index / wqdbsz];

#if CONFIG_USE_SCHED_LOCKS
	wqdb->tbl[index % wqdbsz].listner = (void*)this;
#else
	uint_t irq_state;
	cpu_disable_all_irq(&irq_state); /* To prevent against any scheduler intervention */
	wqdb->tbl[index % wqdbsz].event   = sched_event_make (this, SCHED_OP_WAKEUP);
	wqdb->tbl[index % wqdbsz].listner = sched_get_listner(this, SCHED_OP_WAKEUP);
#endif

	if(isShared == false)
		ticket = atomic_add(&barrier->waiting, -1);

	if(ticket == 1)
	{
#if !(CONFIG_USE_SCHED_LOCKS)
		cpu_restore_irq(irq_state);
#endif
		barrier->tm_last = tm_now;
		wqdb->tbl[index % wqdbsz].listner = NULL;

		if(isShared)
		{
			barrier->index = 0;
			spinlock_unlock(&barrier->lock);
		}
		else
			atomic_init(&barrier->waiting, barrier->count);

		barrier_do_broadcast(barrier);
		return PTHREAD_BARRIER_SERIAL_THREAD;
	}

	if(ticket == barrier->count)
		barrier->tm_first = tm_now;

	spinlock_unlock_nosched(&barrier->lock);
	sched_sleep(this);

#if !(CONFIG_USE_SCHED_LOCKS)
	cpu_restore_irq(irq_state);
#endif
	return 0;
}
Example #3
0
/* 
 * FIXME: define spinlock_rdlock() so all locking on task->th_lock 
 * becoms rdlock but on join/detach/destroy 
 */
int sys_thread_wakeup(pthread_t tid, pthread_t *tid_tbl, uint_t count)
{
	struct task_s *task;
	struct thread_s *this;
	struct thread_s *target;
	pthread_t tbl[100];
	void *listner;
	uint_t event;
	sint_t i;
	error_t err;

	this = current_thread;
	task = this->task;
	i = -1;

	if(tid_tbl != NULL)
	{
		if((((uint_t)tid_tbl + (count*sizeof(pthread_t))) >= CONFIG_KERNEL_OFFSET) || 
		   (count == 0) || (count > 100))
		{
			err = -1;
			goto fail_tid_tbl;
		}

		if((err = cpu_uspace_copy(&tbl[0], tid_tbl, sizeof(pthread_t*) * count))) 
			goto fail_usapce;

		if(tbl[0] != tid)
		{
			err = -2;
			goto fail_first_tid;
		}
	}
	else
	{
		count = 1;
		tbl[0] = tid;
	}

	for(i = 0; i < count; i++)
	{
		tid = tbl[i];

		if(tid > task->max_order)
		{
			err = -3;
			goto fail_tid;
		}

		target = task->th_tbl[tid];
   
		if((target == NULL) || (target->signature != THREAD_ID))
		{
			err = -4;
			goto fail_target;
		}

		listner = sched_get_listner(target, SCHED_OP_UWAKEUP);
		event = sched_event_make(target,SCHED_OP_UWAKEUP);
    
		if(this->info.isTraced == true)
		{
			printk(INFO,"%s: tid %d --> tid %d [%d][%d]\n", 
			       __FUNCTION__, 
			       this->info.order, 
			       tid, 
			       cpu_time_stamp(),
			       i);
		}

		sched_event_send(listner,event);
		cpu_wbflush();
	}

	return 0;

fail_target:
fail_tid:
fail_first_tid:
fail_usapce:
fail_tid_tbl:

	printk(INFO, "%s: cpu %d, pid %d, tid %x, i %d, count %d, ttid %x, request has failed with err %d [%d]\n",
	       __FUNCTION__,
	       cpu_get_id(),
	       task->pid,
	       this,
	       i,
	       count,
	       tid,
	       err,
	       cpu_time_stamp());
  
	this->info.errno = EINVAL;
	return -1;
}