예제 #1
0
static int
poweron_vcpu(struct cpu *cp)
{
	int error;

	ASSERT(MUTEX_HELD(&cpu_lock));

	if (HYPERVISOR_vcpu_op(VCPUOP_is_up, cp->cpu_id, NULL) != 0) {
		printf("poweron_vcpu: vcpu%d is not available!\n",
		    cp->cpu_id);
		return (ENXIO);
	}

	if ((error = xen_vcpu_up(cp->cpu_id)) == 0) {
		CPUSET_ADD(cpu_ready_set, cp->cpu_id);
		cp->cpu_flags |= CPU_EXISTS | CPU_READY | CPU_RUNNING;
		cp->cpu_flags &= ~CPU_POWEROFF;
		/*
		 * There are some nasty races possible here.
		 * Tell the vcpu it's up one more time.
		 * XXPV	Is this enough?  Is this safe?
		 */
		(void) xen_vcpu_up(cp->cpu_id);

		cpu_phase[cp->cpu_id] = CPU_PHASE_NONE;

		cpu_set_state(cp);
	}
	return (error);
}
예제 #2
0
static int
poweroff_vcpu(struct cpu *cp)
{
	int error;

	ASSERT(MUTEX_HELD(&cpu_lock));

	ASSERT(CPU->cpu_id != cp->cpu_id);
	ASSERT(cp->cpu_flags & CPU_QUIESCED);

	mp_enter_barrier();

	if ((error = xen_vcpu_down(cp->cpu_id)) == 0) {
		ASSERT(cpu_phase[cp->cpu_id] == CPU_PHASE_SAFE);

		CPUSET_DEL(cpu_ready_set, cp->cpu_id);

		cp->cpu_flags |= CPU_POWEROFF | CPU_OFFLINE;
		cp->cpu_flags &=
		    ~(CPU_RUNNING | CPU_READY | CPU_EXISTS | CPU_ENABLE);

		cpu_phase[cp->cpu_id] = CPU_PHASE_POWERED_OFF;

		cpu_set_state(cp);
	}

	mp_leave_barrier();

	return (error);
}
예제 #3
0
static void
warm_flag_set(int cpuid)
{
	cpu_t *cp;

	ASSERT(MUTEX_HELD(&cpu_lock));

	/*
	 * warm start activates cpus into the OFFLINE state
	 */
	cp = cpu[cpuid];
	cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS
	    | CPU_OFFLINE | CPU_QUIESCED;
	cpu_set_state(cp);
}
예제 #4
0
/*
 * parametric flag setting functions.  these routines set the cpu
 * state just prior to releasing the slave cpu.
 */
void
cold_flag_set(int cpuid)
{
	cpu_t *cp;

	ASSERT(MUTEX_HELD(&cpu_lock));

	cp = cpu[cpuid];
	cp->cpu_flags |= CPU_RUNNING | CPU_ENABLE | CPU_EXISTS;
	cpu_add_active(cp);
	/*
	 * Add CPU_READY after the cpu_add_active() call
	 * to avoid pausing cp.
	 */
	cp->cpu_flags |= CPU_READY;		/* ready */
	cpu_set_state(cp);
}
예제 #5
0
파일: thread_idle.c 프로젝트: Masshat/almos
void* thread_idle(void *arg)
{
	extern uint_t __ktext_start;
	register uint_t id;
	register uint_t cpu_nr;
	register struct thread_s *this;
	register struct cpu_s *cpu;
	struct thread_s *thread;
	register struct page_s *reserved_pg;
	register uint_t reserved;
	kthread_args_t *args;
	bool_t isBSCPU;
	uint_t tm_now;
	uint_t count;
	error_t err;

	this    = current_thread;
	cpu     = current_cpu;
	id      = cpu->gid;
	cpu_nr  = arch_onln_cpu_nr();
	args    = (kthread_args_t*) arg;
	isBSCPU = (cpu == cpu->cluster->bscpu);

	cpu_trace_write(cpu, thread_idle_func);

	if(isBSCPU)
		pmm_tlb_flush_vaddr((vma_t)&__ktext_start, PMM_UNKNOWN);

	cpu_set_state(cpu, CPU_ACTIVE);
	rt_timer_read(&tm_now);
	this->info.tm_born = tm_now;      
	this->info.tm_tmp  = tm_now;
	//// Reset stats /// 
	cpu_time_reset(cpu);
	////////////////////

	mcs_barrier_wait(&boot_sync);

	printk(INFO, "INFO: Starting Thread Idle On Core %d\tOK\n", cpu->gid);

	if(isBSCPU && (id == args->val[2]))
	{
		for(reserved = args->val[0]; reserved < args->val[1]; reserved += PMM_PAGE_SIZE)
		{
			reserved_pg = ppm_ppn2page(&cpu->cluster->ppm, reserved >> PMM_PAGE_SHIFT);
			page_state_set(reserved_pg, PGINIT);       
			ppm_free_pages(reserved_pg);
		}
	}

	thread = kthread_create(this->task, 
				&thread_event_manager, 
				NULL, 
				cpu->cluster->id, 
				cpu->lid);

	if(thread == NULL)
		PANIC("Failed to create default events handler Thread for CPU %d\n", id);

	thread->task   = this->task;
	cpu->event_mgr = thread;
	wait_queue_init(&thread->info.wait_queue, "Events");

	err = sched_register(thread);
	assert(err == 0);

	sched_add_created(thread);

	if(isBSCPU)
	{
		dqdt_update();
#if 0
		thread = kthread_create(this->task, 
					&cluster_manager_thread,
					cpu->cluster, 
					cpu->cluster->id, 
					cpu->lid);

		if(thread == NULL)
		{
			PANIC("Failed to create cluster manager thread, cid %d, cpu %d\n", 
			      cpu->cluster->id, 
			      cpu->gid);
		}

		thread->task          = this->task;
		cpu->cluster->manager = thread;
		wait_queue_init(&thread->info.wait_queue, "Cluster-Mgr");

		err = sched_register(thread);
		assert(err == 0);

		sched_add_created(thread);

#endif

		if(clusters_tbl[cpu->cluster->id].flags & CLUSTER_IO)
		{
			thread = kthread_create(this->task, 
						&kvfsd, 
						NULL, 
						cpu->cluster->id, 
						cpu->lid);
       
			if(thread == NULL)
			{
				PANIC("Failed to create KVFSD on cluster %d, cpu %d\n", 
				      cpu->cluster->id, 
				      cpu->gid);
			}

			thread->task  = this->task;
			wait_queue_init(&thread->info.wait_queue, "KVFSD");
			err           = sched_register(thread);
			assert(err == 0);
			sched_add_created(thread);
			printk(INFO,"INFO: kvfsd has been created\n");
		}
	}

	cpu_set_state(cpu,CPU_IDLE);

	while (true)
	{
		cpu_disable_all_irq(NULL);
     
		if((event_is_pending(&cpu->re_listner)) || (event_is_pending(&cpu->le_listner)))
		{
			wakeup_one(&cpu->event_mgr->info.wait_queue, WAIT_ANY);
		}
 
		sched_idle(this);

		count = sched_runnable_count(&cpu->scheduler);

		cpu_enable_all_irq(NULL);

		if(count != 0)
			sched_yield(this);
     
		//arch_set_power_state(cpu, ARCH_PWR_IDLE);
	}

	return NULL;
}