Пример #1
0
extern kthread_runqueue_t *ksched_find_target(uthread_struct_t *u_obj)
{
	ksched_shared_info_t *ksched_info;
	unsigned int target_cpu, u_gid;

	ksched_info = &ksched_shared_info;
	u_gid = u_obj->uthread_gid;

	target_cpu = ksched_info->last_ugroup_kthread[u_gid];
	
	do
	{
		/* How dumb to assume there is atleast one cpu (haha) !! :-D */
		target_cpu = ((target_cpu + 1) % GT_MAX_CORES);

	} while(!kthread_cpu_map[target_cpu]);

	gt_spin_lock(&(ksched_info->ksched_lock));
	ksched_info->last_ugroup_kthread[u_gid] = target_cpu;
	gt_spin_unlock(&(ksched_info->ksched_lock));

	u_obj->cpu_id = kthread_cpu_map[target_cpu]->cpuid;
	u_obj->last_cpu_id = kthread_cpu_map[target_cpu]->cpuid;

#if 0
	printf("Target uthread (id:%d, group:%d) : cpu(%d)\n", u_obj->uthread_tid, u_obj->uthread_gid, kthread_cpu_map[target_cpu]->cpuid);
#endif

	return(&(kthread_cpu_map[target_cpu]->krunqueue));
}
Пример #2
0
uthread_t *cfs_pick_next_uthread(kthread_t *k_ctx)
{
	checkpoint("k%d: CFS: Picking next uthread", k_ctx->cpuid);

	cfs_kthread_t *cfs_kthread = cfs_get_kthread(k_ctx);
	assert(cfs_kthread != NULL);

	int flag = cfs_check_group_active(cfs_kthread->tree);
	if(flag == -1){
	  return NULL;
	}

	gt_spin_lock(&cfs_kthread->lock);
	rb_red_blk_node *min = RBDeleteMin(cfs_kthread->tree);
	assert(min != cfs_kthread->tree->nil);
	if (!min) {
		cfs_kthread->current_cfs_uthread = NULL;
		cfs_kthread->min_vruntime = 0;
		gt_spin_unlock(&cfs_kthread->lock);
		return NULL;
	}

	cfs_uthread_t *min_cfs_uthread = min->info;
	checkpoint("k%d: u%d: Choosing uthread with vruntime %lu",
	           cfs_kthread->k_ctx->cpuid, min_cfs_uthread->uthread->tid,
	           min_cfs_uthread->key);
	cfs_kthread->current_cfs_uthread = min_cfs_uthread;
	cfs_kthread->min_vruntime = min_cfs_uthread->vruntime;
	gt_spin_unlock(&cfs_kthread->lock);
	return min_cfs_uthread->uthread;
}
Пример #3
0
extern uthread_struct_t *sched_find_best_uthread(kthread_runqueue_t *kthread_runq)
{
	/* [1] Tries to find the highest priority RUNNABLE uthread in active-runq.
	 * [2] Found - Jump to [FOUND]
	 * [3] Switches runqueues (active/expires)
	 * [4] Repeat [1] through [2]
	 * [NOT FOUND] Return NULL(no more jobs)
	 * [FOUND] Remove uthread from pq and return it. */

	rbtree rbrunq = kthread_runq->cfs_rq;
	rbtree_node node = find_leftmost(rbrunq);
	
	uthread_struct_t *u_obj;
	gt_spin_lock(&(kthread_runq->kthread_runqlock));
	kthread_runq->kthread_runqlock.holder = 0x04;



	if(rbrunq->nr_running == 0 || !node){ //If there are no more nodes to schedule, return NULL.
	gt_spin_unlock(&(kthread_runq->kthread_runqlock));
	return NULL;
	}

	u_obj = (uthread_struct_t *) node->value; //Else get the uthread in the leftmost node.
	update_min_vruntime(kthread_runq, u_obj->vruntime); //Set the min_vruntime of the runqueue to this uthread's vruntime.
	__rem_from_cfs_runqueue(rbrunq,u_obj); //Remove the object from the CFS tree.
	gt_spin_unlock(&(kthread_runq->kthread_runqlock));
	return u_obj; //Return the leftmost node.
#if 0
	printf("cpu(%d) : sched best uthread(id:%d, group:%d)\n", u_obj->cpu_id, u_obj->uthread_tid, u_obj->uthread_gid);
#endif
}
Пример #4
0
extern void rem_from_runqueue(runqueue_t *runq, gt_spinlock_t *runq_lock, uthread_struct_t *u_elem)
{
	gt_spin_lock(runq_lock);
	runq_lock->holder = 0x03;
	__rem_from_runqueue(runq, u_elem);
	gt_spin_unlock(runq_lock);
	return;
}
Пример #5
0
extern void add_to_runqueue(runqueue_t *runq, gt_spinlock_t *runq_lock, uthread_struct_t *u_elem)
{
	gt_spin_lock(runq_lock);
	runq_lock->holder = 0x02;
	__add_to_runqueue(runq, u_elem);
	gt_spin_unlock(runq_lock);
	return;
}
Пример #6
0
static void pcs_insert_zombie(pcs_uthread_t *pcs_uthread,
                              kthread_runqueue_t *k_runq)
{
	uthread_head_t *kthread_zhead = &(k_runq->zombie_uthreads);
	gt_spin_lock(&(k_runq->kthread_runqlock));
	k_runq->kthread_runqlock.holder = 0x01;
	TAILQ_INSERT_TAIL(kthread_zhead, pcs_uthread, uthread_runq);
	gt_spin_unlock(&(k_runq->kthread_runqlock));
}
Пример #7
0
/* called at every kthread_create(). Assumes pcs_init() has already been
 * called */
void pcs_kthread_init(kthread_t *k_ctx)
{
	gt_spin_lock(&scheduler.lock);
	pcs_kthread_t *pcs_kthread = pcs_get_kthread(k_ctx);
	pcs_kthread->k_ctx = k_ctx;
	kthread_init_runqueue(&pcs_kthread->k_runqueue);
	pcs_data_t *pcs_data = SCHED_DATA;
	pcs_data->pcs_kthread_count++;
	gt_spin_unlock(&scheduler.lock);
}
Пример #8
0
extern void rem_from_runqueue(runqueue_t *runq, gt_spinlock_t *runq_lock,
                              pcs_uthread_t *u_elem)
{
	if (runq_lock) {
		gt_spin_lock(runq_lock);
		runq_lock->holder = 0x03;
	}
	__rem_from_runqueue(runq, u_elem);
	if (runq_lock)
		gt_spin_unlock(runq_lock);
	return;
}
Пример #9
0
extern uthread_struct_t *sched_find_best_uthread(kthread_runqueue_t *kthread_runq)
{
	/* [1] Tries to find the highest priority RUNNABLE uthread in active-runq.
	 * [2] Found - Jump to [FOUND]
	 * [3] Switches runqueues (active/expires)
	 * [4] Repeat [1] through [2]
	 * [NOT FOUND] Return NULL(no more jobs)
	 * [FOUND] Remove uthread from pq and return it. */

	runqueue_t *runq;
	prio_struct_t *prioq;
	uthread_head_t *u_head;
	uthread_struct_t *u_obj;
	unsigned int uprio, ugroup;

	gt_spin_lock(&(kthread_runq->kthread_runqlock));

	runq = kthread_runq->active_runq;

	kthread_runq->kthread_runqlock.holder = 0x04;
	if(!(runq->uthread_mask))
	{ /* No jobs in active. switch runqueue */
		assert(!runq->uthread_tot);
		kthread_runq->active_runq = kthread_runq->expires_runq;
		kthread_runq->expires_runq = runq;

		runq = kthread_runq->expires_runq;
		if(!runq->uthread_mask)
		{
			assert(!runq->uthread_tot);
			gt_spin_unlock(&(kthread_runq->kthread_runqlock));
			return NULL;
		}
	}

	/* Find the highest priority bucket */
	uprio = LOWEST_BIT_SET(runq->uthread_mask);
	prioq = &(runq->prio_array[uprio]);

	assert(prioq->group_mask);
	ugroup = LOWEST_BIT_SET(prioq->group_mask);

	u_head = &(prioq->group[ugroup]);
	u_obj = TAILQ_FIRST(u_head);
	__rem_from_runqueue(runq, u_obj);

	gt_spin_unlock(&(kthread_runq->kthread_runqlock));
#if U_DEBUG
	printf("cpu(%d) : sched best uthread(id:%d, group:%d)\n", u_obj->cpu_id, u_obj->uthread_tid, u_obj->uthread_gid);
#endif
	return(u_obj);
}
Пример #10
0
void update_curr(struct timeval tv1, uthread_struct_t *cur_uthread) {
	struct timeval tv2;
	gettimeofday(&tv2, NULL);
	unsigned int vtime = (tv2.tv_sec*100000+tv2.tv_usec)-(tv1.tv_sec*100000+tv1.tv_usec);
	cur_uthread->vruntime += vtime;

	if(&cur_uthread->vgroup != NULL){
		gt_spin_lock(stat_lock);
		vruntime[cur_uthread->uthread_gid]+= (double)vtime;
		vruntime_thread[cur_uthread->uthread_gid][cur_uthread->uthread_tid]+= (double)vtime;
		gt_spin_unlock(stat_lock);
	}
}
Пример #11
0
static kthread_t *cfs_uthread_init(uthread_t *uthread)
{
	checkpoint("u%d: CFS: init uthread", uthread->tid);

	cfs_data_t *cfs_data = SCHED_DATA;
	cfs_uthread_t *cfs_uthread = emalloc(sizeof(*cfs_uthread));
	cfs_uthread->uthread = uthread;
	cfs_uthread->priority = CFS_DEFAULT_PRIORITY;
	cfs_uthread->gid = uthread->attr->gid;

	gt_spin_lock(&cfs_data->lock);
	cfs_kthread_t *cfs_kthread = cfs_find_kthread_target(cfs_uthread,
	                                                     cfs_data);
	gt_spin_unlock(&cfs_data->lock);

	if(cfs_uthread->gid > cfs_kthread->tree->max_gid){
	  cfs_kthread->tree->max_gid = cfs_uthread->gid;
	}

	/* update the kthread's load and latency, if necessary */
	gt_spin_lock(&cfs_kthread->lock);
	cfs_kthread->cfs_uthread_count++;
	cfs_kthread->latency =
	        max(CFS_DEFAULT_LATENCY_us,
	            cfs_kthread->cfs_uthread_count * CFS_MIN_GRANULARITY_us);
	cfs_kthread->load += cfs_uthread->priority;
	cfs_uthread->vruntime = cfs_kthread->min_vruntime;
	cfs_uthread->key = 0;
	gt_spin_unlock(&cfs_kthread->lock);

	checkpoint("u%d: CFS: Creating node", uthread->tid);
	cfs_uthread->node = RBNodeCreate(&cfs_uthread->key, cfs_uthread);
	checkpoint("u%d: CFS: Insert into rb tree", cfs_uthread->uthread->tid);
	RBTreeInsert(cfs_kthread->tree, cfs_uthread->node);

	return cfs_kthread->k_ctx;
}
Пример #12
0
/* called at every kthread_create(). Assumes cfs_init() has already been
 * called */
void cfs_kthread_init(kthread_t *k_ctx)
{
	checkpoint("k%d: CFS: init kthread", k_ctx->cpuid);
	gt_spin_lock(&scheduler.lock);
	cfs_kthread_t *cfs_kthread = cfs_get_kthread(k_ctx);
	gt_spinlock_init(&cfs_kthread->lock);
	cfs_kthread->k_ctx = k_ctx;
	cfs_kthread->current_cfs_uthread = NULL;
	cfs_kthread->cfs_uthread_count = 0;
	cfs_kthread->latency = CFS_DEFAULT_LATENCY_us;
	cfs_kthread->min_vruntime = 0;
	cfs_kthread->tree = RBTreeCreate(&cfs_rb_compare_key,
	                                 &cfs_rb_destroy_key,
	                                 &cfs_rb_destroy_info,
	                                 &cfs_rb_print_key,
	                                 &cfs_rb_print_info);
	cfs_data_t *cfs_data = SCHED_DATA;
	cfs_data->cfs_kthread_count++;
	gt_spin_unlock(&scheduler.lock);
}
Пример #13
0
/* [1] Tries to find the highest priority RUNNABLE uthread in active-runq.
 * [2] Found - Jump to [FOUND]
 * [3] Switches runqueues (active/expires)
 * [4] Repeat [1] through [2]
 * [NOT FOUND] Return NULL(no more jobs)
 * [FOUND] Remove uthread from pq and return it. */
uthread_t *pcs_pick_next_uthread(kthread_t *k_ctx)
{
	checkpoint("k%d: PCS: Picking next uthread", k_ctx->cpuid);
	pcs_kthread_t *pcs_kthread = pcs_get_kthread(k_ctx);
	kthread_runqueue_t *kthread_runq = &pcs_kthread->k_runqueue;

	gt_spin_lock(&(kthread_runq->kthread_runqlock));
	kthread_runq->kthread_runqlock.holder = 0x04;

	runqueue_t *runq = kthread_runq->active_runq;
	if (!(runq->uthread_mask)) { /* No jobs in active. switch runqueue */
		checkpoint("k%d: PCS: Switching runqueues", k_ctx->cpuid);
		assert(!runq->uthread_tot);
		kthread_runq->active_runq = kthread_runq->expires_runq;
		kthread_runq->expires_runq = runq;

		runq = kthread_runq->active_runq;
		if (!runq->uthread_mask) {
			assert(!runq->uthread_tot);
			gt_spin_unlock(&(kthread_runq->kthread_runqlock));
			return NULL;
		}
	}

	/* Find the highest priority bucket */
	unsigned int uprio, ugroup;
	uprio = LOWEST_BIT_SET(runq->uthread_mask);
	prio_struct_t *prioq = &(runq->prio_array[uprio]);

	assert(prioq->group_mask);
	ugroup = LOWEST_BIT_SET(prioq->group_mask);

	uthread_head_t *u_head = &(prioq->group[ugroup]);
	pcs_uthread_t *next_uthread = TAILQ_FIRST(u_head);
	rem_from_runqueue(runq, NULL, next_uthread);

	gt_spin_unlock(&(kthread_runq->kthread_runqlock));
	return next_uthread->uthread;
}
Пример #14
0
/* Called on uthread_create(). Must assign the new uthread to a kthread;
 * anything else is left up to the implementation. Can't assume the uthread
 * itself has been initialized in any way---it just has a tid
 */
kthread_t *pcs_uthread_init(uthread_t *uthread)
{
	checkpoint("u%d: PCS: init uthread", uthread->tid);

	pcs_data_t *pcs_data = SCHED_DATA;
	gt_spin_lock(&pcs_data->lock);

	pcs_uthread_t *pcs_uthread = pcs_pcs_uthread_create(uthread);
	pcs_uthread->uthread = uthread;
	pcs_uthread->priority = pq_get_priority(uthread);
	pcs_uthread->group_id = pq_get_group_id(uthread);

	pcs_kthread_t *pcs_kthread = pcs_find_kthread_target(pcs_uthread,
	                                                     pcs_data);
	add_to_runqueue(pcs_kthread->k_runqueue.active_runq,
	                &pcs_kthread->k_runqueue.kthread_runqlock,
	                pcs_uthread);
	gt_spin_unlock(&pcs_data->lock);
	assert(pcs_kthread != NULL);
	assert(pcs_kthread->k_ctx != NULL);
	return pcs_kthread->k_ctx;
}