Exemplo n.º 1
0
error_t barrier_wait(struct barrier_s *barrier)
{
	register uint_t event;
	register void *listner;
	register uint_t ticket;
	register uint_t index;
	register uint_t wqdbsz;
	register wqdb_t *wqdb;
	register struct thread_s *this;
	uint_t irq_state;
	uint_t tm_now;

	tm_now = cpu_time_stamp(); 
	this   = current_thread;
	index  = this->info.order;

	if((barrier->signature != BARRIER_ID) || ((barrier->owner != NULL) && (barrier->owner != this->task)))
		return EINVAL;

	wqdbsz  = PMM_PAGE_SIZE / sizeof(wqdb_record_t);
	wqdb    = barrier->wqdb_tbl[index / wqdbsz];

#if !(CONFIG_USE_SCHED_LOCKS)
	event   = sched_event_make (this, SCHED_OP_WAKEUP);
	listner = sched_get_listner(this, SCHED_OP_WAKEUP);
#else
	listner = (void*)this;
#endif

	wqdb->tbl[index % wqdbsz].event   = event;
	wqdb->tbl[index % wqdbsz].listner = listner;

#if CONFIG_BARRIER_ACTIVE_WAIT
	register uint_t current_phase;
	current_phase = barrier->phase;
#endif	/* CONFIG_BARRIER_ACTIVE_WAIT */

	cpu_disable_all_irq(&irq_state);

	ticket = arch_barrier_wait(barrier->cluster, barrier->hwid);

	cpu_restore_irq(irq_state);

	if(ticket < 0) return EINVAL;

	if(ticket == barrier->count)
		barrier->tm_first = tm_now;

	else if(ticket == 1)
		barrier->tm_last  = tm_now;

#if CONFIG_BARRIER_ACTIVE_WAIT
	while(cpu_uncached_read(&barrier->state[current_phase]) == 0)
		sched_yield(this);
#else
	sched_sleep(this);
#endif	/* CONFIG_BARRIER_ACTIVE_WAIT */

	return (ticket == 1) ? PTHREAD_BARRIER_SERIAL_THREAD : 0;
}
Exemplo n.º 2
0
int sys_fork(uint_t flags, uint_t cpu_gid)
{
	fork_info_t info;
	struct dqdt_attr_s attr;
	struct thread_s *this_thread;
	struct task_s *this_task;
	struct thread_s *child_thread;
	struct task_s *child_task;
	uint_t irq_state;
	uint_t cpu_lid;
	uint_t cid;
	error_t err;
	uint_t tm_start;
	uint_t tm_end;
	uint_t tm_bRemote;
	uint_t tm_aRemote;

	tm_start = cpu_time_stamp();

	fork_dmsg(1, "%s: cpu %d, started [%d]\n",
		  __FUNCTION__, 
		  cpu_get_id(),
		  tm_start);

	this_thread = current_thread;
	this_task   = this_thread->task;
	info.current_clstr = current_cluster;

	err = atomic_add(&this_task->childs_nr, 1);
  
	if(err >= CONFIG_TASK_CHILDS_MAX_NR)
	{
		err = EAGAIN;
		goto fail_childs_nr;
	}

	fork_dmsg(1, "%s: task of pid %d can fork a child [%d]\n",
		  __FUNCTION__, 
		  this_task->pid,
		  cpu_time_stamp());

	info.isDone      = false;
	info.this_thread = this_thread;
	info.this_task   = this_task;
	info.flags       = flags;

	cpu_disable_all_irq(&irq_state);
	cpu_restore_irq(irq_state);
  
	if(current_cpu->fpu_owner == this_thread)
	{
		fork_dmsg(1, "%s: going to save FPU\n", __FUNCTION__);
		cpu_fpu_context_save(&this_thread->uzone);
	}

	if(flags & PT_FORK_USE_TARGET_CPU)
	{
		cpu_gid       = cpu_gid % arch_onln_cpu_nr();
		cpu_lid       = arch_cpu_lid(cpu_gid);
		cid           = arch_cpu_cid(cpu_gid);
		attr.cid      = cid;
		attr.cpu_id   = arch_cpu_lid(cpu_gid);
		info.isPinned = true;
	}
	else
	{
		info.isPinned = false;
		dqdt_attr_init(&attr, NULL);
		err = dqdt_task_placement(dqdt_root, &attr);
	}

        info.cpu = cpu_lid2ptr(attr.cpu_id);
        info.cid_exec = attr.cid_exec;

        /* Keeps the first two processes on current cluster. This is used by cluster zero to keep
         * the "sh" process on this cluster. Init is forced on current_cluster in the
         * task_load_init() function.
         */
        if ( this_task->pid < PID_MIN_GLOBAL+2 )
                info.cid_exec = current_cid;

	fork_dmsg(1, "%s: new task will be placed on cluster %d, cpu %d. Task will be moved on cluster %u on exec()\n", \
                        __FUNCTION__, attr.cid, attr.cpu_id, info.cid_exec);

	tm_bRemote = cpu_time_stamp();
	err = do_fork(&info);
	tm_aRemote = cpu_time_stamp();

	if(err)
		goto fail_do_fork;

	child_thread = info.child_thread;
	child_task   = info.child_task;

	spinlock_lock(&this_task->lock);

	list_add(&this_task->children, &child_task->list);
	spinlock_unlock(&this_task->lock);

	fork_dmsg(1, "%s: childs (task & thread) have been registered in their parents lists [%d]\n", 
		  __FUNCTION__, 
		  cpu_time_stamp());
  
	fork_dmsg(1, "%s: going to add child to target scheduler\n", __FUNCTION__);
	sched_add_created(child_thread);
	tm_end = cpu_time_stamp();
    
	fork_dmsg(1, "%s: cpu %d, pid %d, done [s:%u, bR:%u, aR:%u, e:%u, d:%u, t:%u, r:%u]\n",
	       __FUNCTION__,
	       cpu_get_id(),
	       this_task->pid,
	       tm_start,
	       tm_bRemote,
	       tm_aRemote,
	       tm_end,
	       attr.tm_request,
	       tm_end - tm_start,
	       info.tm_event);

	return child_task->pid;

fail_do_fork:
fail_childs_nr:
	atomic_add(&this_task->childs_nr, -1);
	this_thread->info.errno = err;
	return -1;
}
Exemplo n.º 3
0
/* TODO: reintroduce barrier's ops to deal with case-specific treatment */
error_t barrier_wait(struct barrier_s *barrier)
{
	register uint_t ticket;
	register uint_t index;
	register uint_t wqdbsz;
	register wqdb_t *wqdb;
	register bool_t isShared;
	struct thread_s *this;
	uint_t tm_now;

	tm_now   = cpu_time_stamp();
	this     = current_thread;
	index    = this->info.order;
	ticket   = 0;
	isShared = (barrier->owner == NULL) ? true : false;

	if((barrier->signature != BARRIER_ID) || ((isShared == false) && (barrier->owner != this->task)))
		return EINVAL;

	wqdbsz = PMM_PAGE_SIZE / sizeof(wqdb_record_t);

	if(isShared)
	{
		spinlock_lock(&barrier->lock);
		index  = barrier->index ++;
		ticket = barrier->count - index;
	}

	wqdb   = barrier->wqdb_tbl[index / wqdbsz];

#if CONFIG_USE_SCHED_LOCKS
	wqdb->tbl[index % wqdbsz].listner = (void*)this;
#else
	uint_t irq_state;
	cpu_disable_all_irq(&irq_state); /* To prevent against any scheduler intervention */
	wqdb->tbl[index % wqdbsz].event   = sched_event_make (this, SCHED_OP_WAKEUP);
	wqdb->tbl[index % wqdbsz].listner = sched_get_listner(this, SCHED_OP_WAKEUP);
#endif

	if(isShared == false)
		ticket = atomic_add(&barrier->waiting, -1);

	if(ticket == 1)
	{
#if !(CONFIG_USE_SCHED_LOCKS)
		cpu_restore_irq(irq_state);
#endif
		barrier->tm_last = tm_now;
		wqdb->tbl[index % wqdbsz].listner = NULL;

		if(isShared)
		{
			barrier->index = 0;
			spinlock_unlock(&barrier->lock);
		}
		else
			atomic_init(&barrier->waiting, barrier->count);

		barrier_do_broadcast(barrier);
		return PTHREAD_BARRIER_SERIAL_THREAD;
	}

	if(ticket == barrier->count)
		barrier->tm_first = tm_now;

	spinlock_unlock_nosched(&barrier->lock);
	sched_sleep(this);

#if !(CONFIG_USE_SCHED_LOCKS)
	cpu_restore_irq(irq_state);
#endif
	return 0;
}
Exemplo n.º 4
0
void* thread_idle(void *arg)
{
	extern uint_t __ktext_start;
	register uint_t id;
	register uint_t cpu_nr;
	register struct thread_s *this;
	register struct cpu_s *cpu;
	struct thread_s *thread;
	register struct page_s *reserved_pg;
	register uint_t reserved;
	kthread_args_t *args;
	bool_t isBSCPU;
	uint_t tm_now;
	uint_t count;
	error_t err;

	this    = current_thread;
	cpu     = current_cpu;
	id      = cpu->gid;
	cpu_nr  = arch_onln_cpu_nr();
	args    = (kthread_args_t*) arg;
	isBSCPU = (cpu == cpu->cluster->bscpu);

	cpu_trace_write(cpu, thread_idle_func);

	if(isBSCPU)
		pmm_tlb_flush_vaddr((vma_t)&__ktext_start, PMM_UNKNOWN);

	cpu_set_state(cpu, CPU_ACTIVE);
	rt_timer_read(&tm_now);
	this->info.tm_born = tm_now;      
	this->info.tm_tmp  = tm_now;
	//// Reset stats /// 
	cpu_time_reset(cpu);
	////////////////////

	mcs_barrier_wait(&boot_sync);

	printk(INFO, "INFO: Starting Thread Idle On Core %d\tOK\n", cpu->gid);

	if(isBSCPU && (id == args->val[2]))
	{
		for(reserved = args->val[0]; reserved < args->val[1]; reserved += PMM_PAGE_SIZE)
		{
			reserved_pg = ppm_ppn2page(&cpu->cluster->ppm, reserved >> PMM_PAGE_SHIFT);
			page_state_set(reserved_pg, PGINIT);       
			ppm_free_pages(reserved_pg);
		}
	}

	thread = kthread_create(this->task, 
				&thread_event_manager, 
				NULL, 
				cpu->cluster->id, 
				cpu->lid);

	if(thread == NULL)
		PANIC("Failed to create default events handler Thread for CPU %d\n", id);

	thread->task   = this->task;
	cpu->event_mgr = thread;
	wait_queue_init(&thread->info.wait_queue, "Events");

	err = sched_register(thread);
	assert(err == 0);

	sched_add_created(thread);

	if(isBSCPU)
	{
		dqdt_update();
#if 0
		thread = kthread_create(this->task, 
					&cluster_manager_thread,
					cpu->cluster, 
					cpu->cluster->id, 
					cpu->lid);

		if(thread == NULL)
		{
			PANIC("Failed to create cluster manager thread, cid %d, cpu %d\n", 
			      cpu->cluster->id, 
			      cpu->gid);
		}

		thread->task          = this->task;
		cpu->cluster->manager = thread;
		wait_queue_init(&thread->info.wait_queue, "Cluster-Mgr");

		err = sched_register(thread);
		assert(err == 0);

		sched_add_created(thread);

#endif

		if(clusters_tbl[cpu->cluster->id].flags & CLUSTER_IO)
		{
			thread = kthread_create(this->task, 
						&kvfsd, 
						NULL, 
						cpu->cluster->id, 
						cpu->lid);
       
			if(thread == NULL)
			{
				PANIC("Failed to create KVFSD on cluster %d, cpu %d\n", 
				      cpu->cluster->id, 
				      cpu->gid);
			}

			thread->task  = this->task;
			wait_queue_init(&thread->info.wait_queue, "KVFSD");
			err           = sched_register(thread);
			assert(err == 0);
			sched_add_created(thread);
			printk(INFO,"INFO: kvfsd has been created\n");
		}
	}

	cpu_set_state(cpu,CPU_IDLE);

	while (true)
	{
		cpu_disable_all_irq(NULL);
     
		if((event_is_pending(&cpu->re_listner)) || (event_is_pending(&cpu->le_listner)))
		{
			wakeup_one(&cpu->event_mgr->info.wait_queue, WAIT_ANY);
		}
 
		sched_idle(this);

		count = sched_runnable_count(&cpu->scheduler);

		cpu_enable_all_irq(NULL);

		if(count != 0)
			sched_yield(this);
     
		//arch_set_power_state(cpu, ARCH_PWR_IDLE);
	}

	return NULL;
}