Ejemplo n.º 1
0
static void gtthread_app_start(void *arg)
{
	kthread_context_t *k_ctx;

	k_ctx = kthread_cpu_map[kthread_apic_id()];
	assert((k_ctx->cpu_apic_id == kthread_apic_id()));

#if 0
	printf("kthread (%d) ready to schedule", k_ctx->cpuid);
#endif
	while(!(k_ctx->kthread_flags & KTHREAD_DONE))
	{
		__asm__ __volatile__ ("pause\n");
		if(sigsetjmp(k_ctx->kthread_env, 0))
		{
			/* siglongjmp to this point is done when there
			 * are no more uthreads to schedule.*/
			/* XXX: gtthread app cleanup has to be done. */
			continue;
		}
		uthread_schedule(&sched_find_best_uthread);
	}
	kthread_exit();

	return;
}
Ejemplo n.º 2
0
static void kthread_init(kthread_context_t *k_ctx)
{
	int cpu_affinity_mask, cur_cpu_apic_id;

	/* cpuid and kthread_app_func are set by the application 
	 * over kthread (eg. gtthread). */

	k_ctx->pid = syscall(SYS_getpid);
	k_ctx->tid = syscall(SYS_gettid);

	/* For priority co-scheduling */
	k_ctx->kthread_sched_timer = ksched_priority;
	k_ctx->kthread_sched_relay = ksched_cosched;

	/* XXX: kthread runqueue balancing (TBD) */
	k_ctx->kthread_runqueue_balance = NULL;

	/* Initialize kthread runqueue */

	kthread_init_runqueue(&(k_ctx->krunqueue));

	cpu_affinity_mask = (1 << k_ctx->cpuid);
	sched_setaffinity(k_ctx->tid,sizeof(unsigned long),&cpu_affinity_mask);

	sched_yield();

	/* Scheduled on target cpu */
	k_ctx->cpu_apic_id = kthread_apic_id();

	kthread_cpu_map[k_ctx->cpu_apic_id] = k_ctx;

	return;
}
Ejemplo n.º 3
0
extern void gtthread_app_exit()
{
	/* gtthread_app_exit called by only main thread. */
	/* For main thread, trigger start again. */
	kthread_context_t *k_ctx;
	printf("\n HOLA \n");
	k_ctx = kthread_cpu_map[kthread_apic_id()];
	k_ctx->kthread_flags &= ~KTHREAD_DONE;

	while(!(k_ctx->kthread_flags & KTHREAD_DONE))
	{
		__asm__ __volatile__ ("pause\n");
		if(sigsetjmp(k_ctx->kthread_env, 0))
		{
			/* siglongjmp to this point is done when there
			 * are no more uthreads to schedule.*/
			/* XXX: gtthread app cleanup has to be done. */
			continue;
		}
		uthread_schedule(&sched_find_best_uthread);
	}

	kthread_block_signal(SIGVTALRM);
	kthread_block_signal(SIGUSR1);

	while(ksched_shared_info.kthread_cur_uthreads)
	{
		/* Main thread has to wait for other kthreads */
		__asm__ __volatile__ ("pause\n");
	}
	return;	
}
Ejemplo n.º 4
0
static void ksched_cosched(int signal)
{
	/* [1] Reads the uthread-select-criterion set by schedule-master.
	 * [2] Read NULL. Jump to [5]
	 * [3] Tries to find a matching uthread.
	 * [4] Found - Jump to [FOUND]
	 * [5] Tries to find the best uthread (by DEFAULT priority method) 
	 * [6] Found - Jump to [FOUND]
	 * [NOT FOUND] Return.
	 * [FOUND] Return. 
	 * [[NOTE]] {uthread_select_criterion == match_uthread_group_id} */

	kthread_context_t *cur_k_ctx;

	// kthread_block_signal(SIGVTALRM);
	// kthread_block_signal(SIGUSR1);

	/* This virtual processor (thread) was not
	 * picked by kernel for vtalrm signal.
	 * USR1 signal has been relayed to it. */

	cur_k_ctx = kthread_cpu_map[kthread_apic_id()];
	KTHREAD_PRINT_SCHED_DEBUGINFO(cur_k_ctx, "RELAY(USR)");

#ifdef CO_SCHED
	uthread_schedule(&sched_find_best_uthread_group);
#else
	//uthread_schedule(&sched_find_best_uthread);
#endif

	// kthread_unblock_signal(SIGVTALRM);
	// kthread_unblock_signal(SIGUSR1);
	return;
}
Ejemplo n.º 5
0
static void * uthread_mulmat(void *p)
{
	int i, j, k;
	int start_row, end_row;
	int start_col, end_col;
	unsigned int cpuid;
	struct timeval tv2;

#define ptr ((uthread_arg_t *)p)

	i=0; j= 0; k=0;

	start_row = ptr->start_row;
	end_row = (ptr->start_row + ptr->matrix_size/NUM_THREADS);

#ifdef GT_GROUP_SPLIT
	start_col = ptr->start_col;
	end_col = (ptr->start_col + ptr->matrix_size/NUM_THREADS);
#else
	start_col = 0;
	end_col = ptr->matrix_size;
#endif

#ifdef GT_THREADS
	cpuid = kthread_cpu_map[kthread_apic_id()]->cpuid;
#else
#endif

	for(i = start_row; i < end_row; i++)
		for(j = start_col; j < end_col; j++)
			for(k = 0; k < ptr->matrix_size; k++) {
				ptr->_C->m[i][j] += ptr->_A->m[i][k] * ptr->_B->m[k][j];
                        }

#ifdef GT_THREADS
#else
	gettimeofday(&tv2,NULL);
#endif

#undef ptr
	return 0;
}
Ejemplo n.º 6
0
static int func(void *arg)
{
	unsigned int count;
	kthread_context_t *k_ctx = kthread_cpu_map[kthread_apic_id()];
#define u_info ((uthread_arg_t *)arg)
	printf("Thread (id:%d, group:%d, cpu:%d) created\n", u_info->num1, u_info->num2, k_ctx->cpuid);
	count = 0;
	while(count <= 0x1fffffff)
	{
#if 0
		if(!(count % 5000000))
		{
			printf("uthread(id:%d, group:%d, cpu:%d) => count : %d\n", 
					u_info->num1, u_info->num2, k_ctx->cpuid, count);
		}
#endif
		count++;
	}
#undef u_info
	return 0;
}
Ejemplo n.º 7
0
static void ksched_priority(int signo)
{
	/* [1] Tries to find the next schedulable uthread.
	 * [2] Not Found - Sets uthread-select-criterion to NULL. Jump to [4].
	 * [3] Found -  Sets uthread-select-criterion(if any) {Eg. uthread_group}.
	 * [4] Announces uthread-select-criterion to other kthreads.
	 * [5] Relays the scheduling signal to other kthreads. 
	 * [RETURN] */
	kthread_context_t *cur_k_ctx, *tmp_k_ctx;
	pid_t pid;
	int inx;

	// kthread_block_signal(SIGVTALRM);
	// kthread_block_signal(SIGUSR1);

	ksched_announce_cosched_group();
	//printf("\n TOING \n");
	cur_k_ctx = kthread_cpu_map[kthread_apic_id()];
	KTHREAD_PRINT_SCHED_DEBUGINFO(cur_k_ctx, "VTALRM");

	/* Relay the signal to all other virtual processors(kthreads) */
	for(inx=0; inx<GT_MAX_KTHREADS; inx++)
	{
		/* XXX: We can avoid the last check (tmp to cur) by
		 * temporarily marking cur as DONE. But chuck it !! */
		if((tmp_k_ctx = kthread_cpu_map[inx]) && (tmp_k_ctx != cur_k_ctx))
		{
			if(tmp_k_ctx->kthread_flags & KTHREAD_DONE)
				continue;
			/* tkill : send signal to specific threads */
			syscall(__NR_tkill, tmp_k_ctx->tid, SIGUSR1);
		}
	}

	uthread_schedule(&sched_find_best_uthread);

	// kthread_unblock_signal(SIGVTALRM);
	// kthread_unblock_signal(SIGUSR1);
	return;
}