extern uthread_struct_t *sched_find_best_uthread(kthread_runqueue_t *kthread_runq) { /* [1] Tries to find the highest priority RUNNABLE uthread in active-runq. * [2] Found - Jump to [FOUND] * [3] Switches runqueues (active/expires) * [4] Repeat [1] through [2] * [NOT FOUND] Return NULL(no more jobs) * [FOUND] Remove uthread from pq and return it. */ rbtree rbrunq = kthread_runq->cfs_rq; rbtree_node node = find_leftmost(rbrunq); uthread_struct_t *u_obj; gt_spin_lock(&(kthread_runq->kthread_runqlock)); kthread_runq->kthread_runqlock.holder = 0x04; if(rbrunq->nr_running == 0 || !node){ //If there are no more nodes to schedule, return NULL. gt_spin_unlock(&(kthread_runq->kthread_runqlock)); return NULL; } u_obj = (uthread_struct_t *) node->value; //Else get the uthread in the leftmost node. update_min_vruntime(kthread_runq, u_obj->vruntime); //Set the min_vruntime of the runqueue to this uthread's vruntime. __rem_from_cfs_runqueue(rbrunq,u_obj); //Remove the object from the CFS tree. gt_spin_unlock(&(kthread_runq->kthread_runqlock)); return u_obj; //Return the leftmost node. #if 0 printf("cpu(%d) : sched best uthread(id:%d, group:%d)\n", u_obj->cpu_id, u_obj->uthread_tid, u_obj->uthread_gid); #endif }
//Find leftmost node node* find_leftmost (node *curNode, node *prevNode) { if (curNode->left == NULL) { return curNode; } else { prevNode = curNode; return find_leftmost (curNode->left, prevNode); } }
//Recursively write the tree in ascending order int write_ascending (node* curNode, node* rootNode, node* prevNode, FILE *outFile) { return deleteNode (write (find_leftmost (curNode, prevNode), outFile), rootNode, prevNode); }