/* * Unlock the given lock */ void lock_release(lock_t *lock) { /* Check if lock was in fact locked */ if (lock->state == LOCK_LOCKED) { /* Open lock and wake up the process waiting for the lock */ lock->state = LOCK_OPEN; sleepq_wake(lock); } }
void lock_release( lock_t *lock ) { interrupt_status_t intr_status; intr_status = _interrupt_disable(); spinlock_acquire(&lock->slock); lock->locked = LOCK_UNLOCKED; sleepq_wake(lock); spinlock_release(&lock->slock); _interrupt_set_state(intr_status); }
void lock_release(lock_t *lock){ interrupt_status_t intr_status; intr_status = _interrupt_disable(); spinlock_acquire(&(lock->spinlock)); /* We unlock the lock and wake the next thread in the queue */ lock->locked = 0; sleepq_wake(lock); spinlock_release(&(lock->spinlock)); _interrupt_set_state(intr_status); }
/* For the signal function, we simply wake the the next thread waiting */ void condition_signal(cond_t *cond, lock_t *condition_lock){ interrupt_status_t intr_status; intr_status = _interrupt_disable(); spinlock_acquire(&(cond->spinlock)); lock_acquire(condition_lock); sleepq_wake(cond); lock_release(condition_lock); spinlock_release(&(cond->spinlock)); }
int usr_sem_vacate(usr_sem_t* sem) { interrupt_status_t intr_status; intr_status = _interrupt_disable(); spinlock_acquire(&(sem->sem_slock)); sem->value++; if (sem->value <= 0) { sleepq_wake(&(sem->value)); } spinlock_release(&(sem->sem_slock)); _interrupt_set_state(intr_status); return 0; }
void semaphore_V(semaphore_t *sem) { interrupt_status_t intr_status; intr_status = _interrupt_disable(); spinlock_acquire(&sem->slock); sem->value++; if (sem->value <= 0) { sleepq_wake(sem); } spinlock_release(&sem->slock); _interrupt_set_state(intr_status); }
/* * Unlock the given lock */ void lock_release(lock_t *lock) { interrupt_status_t intr_status; intr_status = _interrupt_disable(); spinlock_acquire(&lock->spinlock); // Acquire spinlock /* Check if lock was in fact locked */ if (lock->state == LOCK_LOCKED) { /* Open lock and wake up the process waiting for the lock */ lock->state = LOCK_OPEN; sleepq_wake(lock); } spinlock_release(&lock->spinlock); _interrupt_set_state(intr_status); }
void finish_given_process(process_id_t pid, int retval) { process_id_t parent = process_table[pid].parent; process_id_t zombie; process_table[pid].retval = retval; while ((zombie = process_table[pid].first_zombie) >= 0) { /* We have zombie children - remove them. */ process_table[zombie].state = PROCESS_FREE; process_table[zombie].retval = -1; process_table[zombie].parent = -1; process_table[pid].first_zombie = process_table[zombie].next_zombie; process_table[pid].children--; } if (parent >= 0 && process_table[parent].state == PROCESS_ZOMBIE) { /* We have a zombie parent, implying we can never be joined. */ if (--(process_table[parent].children) == 0 && process_table[parent].retval < 0) { /* Oh, and our parent is joined and we are the last child. so free our parent. */ finish_given_process(parent, 0); } process_table[pid].state = PROCESS_FREE; } else if (parent >= 0) { /* Our parent is alive and well, add us to its list of zombies */ process_table[pid].state = PROCESS_ZOMBIE; zombie = process_table[parent].first_zombie; process_table[pid].next_zombie = zombie; if (zombie >= 0) { process_table[zombie].prev_zombie = pid; } process_table[parent].first_zombie = pid; } else { /* We have no parent, i.e. we are the initial program, and will be joined by the startup thread in init/main.c */ process_table[pid].state = PROCESS_ZOMBIE; } sleepq_wake(&process_table[pid]); }
/** * Terminates the current process and sets a return value */ void process_finish(uint32_t retval) { interrupt_status_t intr_status; process_id_t pid; thread_table_t *my_thread; // Find out who we are. pid = process_get_current_process(); my_thread = thread_get_current_thread_entry(); // Ensure that we're the only ones touching the process table. intr_status = _interrupt_disable(); spinlock_acquire(&process_table_slock); // Mark the stack as free so new threads can reuse it. process_free_stack(my_thread); if(--process_table[pid].threads == 0) { // Last thread in process; now we die. // Mark ourself as dying. process_table[pid].retval = retval; process_table[pid].state = PROCESS_DYING; vm_destroy_pagetable(my_thread->pagetable); // Wake whomever may be sleeping for the process sleepq_wake(&process_table[pid]); } // Free our locks. spinlock_release(&process_table_slock); _interrupt_set_state(intr_status); my_thread->pagetable = NULL; // Kill the thread. thread_finish(); }
/* * Signal next thread waiting for the given condition */ void condition_signal(cond_t *cond, lock_t *condition_lock) { condition_lock = condition_lock; sleepq_wake(cond); // Wake up next waiting thread }
void condition_signal (cond_t *cond, lock_t *lock ) { lock = lock; sleepq_wake(cond); }
void condition_signal(cond_t *cond, lock_t *condition_lock) { sleepq_wake(cond); // flyt en proces fra q til e }
/* * callout_softclock: * * Soft interrupt handler, scheduled above if there is work to * be done. Callouts are made in soft interrupt context. */ static void callout_softclock(void *v) { callout_impl_t *c; struct callout_cpu *cc; void (*func)(void *); void *arg; int mpsafe, count, ticks, delta; lwp_t *l; l = curlwp; KASSERT(l->l_cpu == curcpu()); cc = l->l_cpu->ci_data.cpu_callout; mutex_spin_enter(cc->cc_lock); cc->cc_lwp = l; while (!CIRCQ_EMPTY(&cc->cc_todo)) { c = CIRCQ_FIRST(&cc->cc_todo); KASSERT(c->c_magic == CALLOUT_MAGIC); KASSERT(c->c_func != NULL); KASSERT(c->c_cpu == cc); KASSERT((c->c_flags & CALLOUT_PENDING) != 0); KASSERT((c->c_flags & CALLOUT_FIRED) == 0); CIRCQ_REMOVE(&c->c_list); /* If due run it, otherwise insert it into the right bucket. */ ticks = cc->cc_ticks; delta = c->c_time - ticks; if (delta > 0) { CIRCQ_INSERT(&c->c_list, BUCKET(cc, delta, c->c_time)); continue; } if (delta < 0) cc->cc_ev_late.ev_count++; c->c_flags = (c->c_flags & ~CALLOUT_PENDING) | (CALLOUT_FIRED | CALLOUT_INVOKING); mpsafe = (c->c_flags & CALLOUT_MPSAFE); func = c->c_func; arg = c->c_arg; cc->cc_active = c; mutex_spin_exit(cc->cc_lock); KASSERT(func != NULL); if (__predict_false(!mpsafe)) { KERNEL_LOCK(1, NULL); (*func)(arg); KERNEL_UNLOCK_ONE(NULL); } else (*func)(arg); mutex_spin_enter(cc->cc_lock); /* * We can't touch 'c' here because it might be * freed already. If LWPs waiting for callout * to complete, awaken them. */ cc->cc_active = NULL; if ((count = cc->cc_nwait) != 0) { cc->cc_nwait = 0; /* sleepq_wake() drops the lock. */ sleepq_wake(&cc->cc_sleepq, cc, count, cc->cc_lock); mutex_spin_enter(cc->cc_lock); } } cc->cc_lwp = NULL; mutex_spin_exit(cc->cc_lock); }