/* Initialize the ksched_shared_info */ static inline void ksched_info_init(ksched_shared_info_t *ksched_info) { gt_spinlock_init(&(ksched_info->ksched_lock)); gt_spinlock_init(&(ksched_info->uthread_init_lock)); gt_spinlock_init(&(ksched_info->__malloc_lock)); return; }
extern void kthread_init_runqueue(kthread_runqueue_t *kthread_runq) { gt_spinlock_init(&(kthread_runq->kthread_runqlock)); kthread_runq->cfs_rq = rbtree_create(); return; }
static void *cfs_create_sched_data(int lwp_count) { cfs_data_t *cfs_data = ecalloc(sizeof(*cfs_data)); gt_spinlock_init(&cfs_data->lock); cfs_data->last_cpu_assiged = 0; /* array of kthread_t, index by kthread_t->cpuid */ cfs_kthread_t *cfs_kthreads = ecalloc( lwp_count * sizeof(*cfs_kthreads)); cfs_data->cfs_kthreads = cfs_kthreads; return cfs_data; }
extern void kthread_init_runqueue(kthread_runqueue_t *kthread_runq) { kthread_runq->active_runq = &(kthread_runq->runqueues[0]); kthread_runq->expires_runq = &(kthread_runq->runqueues[1]); gt_spinlock_init(&(kthread_runq->kthread_runqlock)); init_runqueue(kthread_runq->active_runq); init_runqueue(kthread_runq->expires_runq); TAILQ_INIT(&(kthread_runq->zombie_uthreads)); return; }
/* creates and inits the sched data */ void *pcs_create_sched_data(int lwp_count) { pcs_data_t *pcs_data = ecalloc(sizeof(*pcs_data)); gt_spinlock_init(&pcs_data->lock); /* array of kthread_t, index by kthread_t->cpuid */ pcs_kthread_t *pcs_kthreads = ecalloc(lwp_count * sizeof(*pcs_kthreads)); pcs_data->pcs_kthreads = pcs_kthreads; /* array of uthread_t, index by uthread_t->tid */ pcs_data->pcs_uthread_array_length = DEFAULT_UTHREAD_COUNT; pcs_uthread_t **pcs_uthreads = ecalloc( pcs_data->pcs_uthread_array_length * sizeof(*pcs_uthreads)); pcs_data->pcs_uthreads = pcs_uthreads; pcs_data->pcs_uthread_count = 0; pcs_data->pcs_kthread_count = 0; return pcs_data; }
/* called at every kthread_create(). Assumes cfs_init() has already been * called */ void cfs_kthread_init(kthread_t *k_ctx) { checkpoint("k%d: CFS: init kthread", k_ctx->cpuid); gt_spin_lock(&scheduler.lock); cfs_kthread_t *cfs_kthread = cfs_get_kthread(k_ctx); gt_spinlock_init(&cfs_kthread->lock); cfs_kthread->k_ctx = k_ctx; cfs_kthread->current_cfs_uthread = NULL; cfs_kthread->cfs_uthread_count = 0; cfs_kthread->latency = CFS_DEFAULT_LATENCY_us; cfs_kthread->min_vruntime = 0; cfs_kthread->tree = RBTreeCreate(&cfs_rb_compare_key, &cfs_rb_destroy_key, &cfs_rb_destroy_info, &cfs_rb_print_key, &cfs_rb_print_info); cfs_data_t *cfs_data = SCHED_DATA; cfs_data->cfs_kthread_count++; gt_spin_unlock(&scheduler.lock); }