/*------------------------------------------------------------------------ * chprio -- change the scheduling priority of a process *------------------------------------------------------------------------ */ SYSCALL chprio(int pid, int newprio) { STATWORD ps; struct pentry *pptr; disable(ps); if (isbadpid(pid) || newprio<=0 || (pptr = &proctab[pid])->pstate == PRFREE) { restore(ps); return(SYSERR); } pptr->pprio = newprio; if(pptr -> pstate == PRREADY) { dequeue(pid); insert(pid, rdyhead, pptr -> pprio); } // Update the processes priority inheritance update_priority(pid); // If the process is waiting for a lock then update the lock if (pptr->pstate == PRLOCK) { // Update lppriomax for the lock (max priority of all waiting procs) update_lppriomax(LOCK_INDEX(pptr->plock)); // Update pinh for all procs that hold this lock. update_pinh(LOCK_INDEX(pptr->plock)); } restore(ps); return(newprio); }
/* Releases LOCK, which must be owned by the current thread. An interrupt handler cannot acquire a lock, so it does not make sense to try to release a lock within an interrupt handler. */ void lock_release (struct lock *lock) { ASSERT (lock != NULL); ASSERT (lock_held_by_current_thread (lock)); enum intr_level old_level = intr_disable (); lock->holder = NULL; /* The lock is released: the threads waiting for this lock can be removed from the ex lock holder donors list, and its priority can be updated (either with another donation or a restoration of its original priority). */ remove_donor (lock); update_priority (); sema_up (&lock->semaphore); intr_set_level (old_level); }
void thread_quantum_expire( timer_call_param_t p0, timer_call_param_t p1) { register processor_t myprocessor = p0; register thread_t thread = p1; spl_t s; s = splsched(); thread_lock(thread); /* * Check for fail-safe trip. */ if (!(thread->sched_mode & TH_MODE_TIMESHARE)) { extern uint64_t max_unsafe_computation; uint64_t new_computation; new_computation = myprocessor->quantum_end; new_computation -= thread->computation_epoch; if (new_computation + thread->computation_metered > max_unsafe_computation) { extern uint32_t sched_safe_duration; if (thread->sched_mode & TH_MODE_REALTIME) { thread->priority = DEPRESSPRI; thread->safe_mode |= TH_MODE_REALTIME; thread->sched_mode &= ~TH_MODE_REALTIME; } pset_share_incr(thread->processor_set); thread->safe_release = sched_tick + sched_safe_duration; thread->sched_mode |= (TH_MODE_FAILSAFE|TH_MODE_TIMESHARE); thread->sched_mode &= ~TH_MODE_PREEMPT; } } /* * Recompute scheduled priority if appropriate. */ if (thread->sched_stamp != sched_tick) update_priority(thread); else if (thread->sched_mode & TH_MODE_TIMESHARE) { thread_timer_delta(thread); thread->sched_usage += thread->sched_delta; thread->sched_delta = 0; /* * Adjust the scheduled priority if * the thread has not been promoted * and is not depressed. */ if ( !(thread->sched_mode & TH_MODE_PROMOTED) && !(thread->sched_mode & TH_MODE_ISDEPRESSED) ) compute_my_priority(thread); } /* * This quantum is up, give this thread another. */ if (first_timeslice(myprocessor)) myprocessor->timeslice--; thread_quantum_init(thread); myprocessor->quantum_end += thread->current_quantum; timer_call_enter1(&myprocessor->quantum_timer, thread, myprocessor->quantum_end); thread_unlock(thread); /* * Check for and schedule ast if needed. */ ast_check(myprocessor); splx(s); }
kern_return_t thread_info_internal( register thread_t thread, thread_flavor_t flavor, thread_info_t thread_info_out, /* ptr to OUT array */ mach_msg_type_number_t *thread_info_count) /*IN/OUT*/ { int state, flags; spl_t s; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); if (flavor == THREAD_BASIC_INFO) { register thread_basic_info_t basic_info; if (*thread_info_count < THREAD_BASIC_INFO_COUNT) return (KERN_INVALID_ARGUMENT); basic_info = (thread_basic_info_t) thread_info_out; s = splsched(); thread_lock(thread); /* fill in info */ thread_read_times(thread, &basic_info->user_time, &basic_info->system_time); /* * Update lazy-evaluated scheduler info because someone wants it. */ if (thread->sched_stamp != sched_tick) update_priority(thread); basic_info->sleep_time = 0; /* * To calculate cpu_usage, first correct for timer rate, * then for 5/8 ageing. The correction factor [3/5] is * (1/(5/8) - 1). */ basic_info->cpu_usage = ((uint64_t)thread->cpu_usage * TH_USAGE_SCALE) / sched_tick_interval; basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5; if (basic_info->cpu_usage > TH_USAGE_SCALE) basic_info->cpu_usage = TH_USAGE_SCALE; basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)? POLICY_TIMESHARE: POLICY_RR); flags = 0; if (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor->idle_thread == thread) flags |= TH_FLAGS_IDLE; if (!thread->kernel_stack) flags |= TH_FLAGS_SWAPPED; state = 0; if (thread->state & TH_TERMINATE) state = TH_STATE_HALTED; else if (thread->state & TH_RUN) state = TH_STATE_RUNNING; else if (thread->state & TH_UNINT) state = TH_STATE_UNINTERRUPTIBLE; else if (thread->state & TH_SUSP) state = TH_STATE_STOPPED; else if (thread->state & TH_WAIT) state = TH_STATE_WAITING; basic_info->run_state = state; basic_info->flags = flags; basic_info->suspend_count = thread->user_stop_count; thread_unlock(thread); splx(s); *thread_info_count = THREAD_BASIC_INFO_COUNT; return (KERN_SUCCESS); } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) { policy_timeshare_info_t ts_info; if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) return (KERN_INVALID_ARGUMENT); ts_info = (policy_timeshare_info_t)thread_info_out; s = splsched(); thread_lock(thread); if (!(thread->sched_mode & TH_MODE_TIMESHARE)) { thread_unlock(thread); splx(s); return (KERN_INVALID_POLICY); } ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0; if (ts_info->depressed) { ts_info->base_priority = DEPRESSPRI; ts_info->depress_priority = thread->priority; } else { ts_info->base_priority = thread->priority; ts_info->depress_priority = -1; } ts_info->cur_priority = thread->sched_pri; ts_info->max_priority = thread->max_priority; thread_unlock(thread); splx(s); *thread_info_count = POLICY_TIMESHARE_INFO_COUNT; return (KERN_SUCCESS); } else if (flavor == THREAD_SCHED_FIFO_INFO) { if (*thread_info_count < POLICY_FIFO_INFO_COUNT) return (KERN_INVALID_ARGUMENT); return (KERN_INVALID_POLICY); } else if (flavor == THREAD_SCHED_RR_INFO) { policy_rr_info_t rr_info; if (*thread_info_count < POLICY_RR_INFO_COUNT) return (KERN_INVALID_ARGUMENT); rr_info = (policy_rr_info_t) thread_info_out; s = splsched(); thread_lock(thread); if (thread->sched_mode & TH_MODE_TIMESHARE) { thread_unlock(thread); splx(s); return (KERN_INVALID_POLICY); } rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0; if (rr_info->depressed) { rr_info->base_priority = DEPRESSPRI; rr_info->depress_priority = thread->priority; } else { rr_info->base_priority = thread->priority; rr_info->depress_priority = -1; } rr_info->max_priority = thread->max_priority; rr_info->quantum = std_quantum_us / 1000; thread_unlock(thread); splx(s); *thread_info_count = POLICY_RR_INFO_COUNT; return (KERN_SUCCESS); } return (KERN_INVALID_ARGUMENT); }