void lock_acquire(struct lock *lock) { //edit #if OPT_A1 KASSERT(lock != NULL); KASSERT(curthread->t_in_interrupt == false); spinlock_acquire(&lock->lock_lock); while((lock->lock_holder != NULL) && !(lock_do_i_hold(lock))){ wchan_lock(lock->lk_wchan); spinlock_release(&lock->lock_lock); wchan_sleep(lock->lk_wchan); spinlock_acquire(&lock->lock_lock); } if(lock_do_i_hold(lock)){} else{ KASSERT(lock->lock_holder == NULL); lock->lock_holder = curthread; } spinlock_release(&lock->lock_lock); #endif }
void lock_release(struct lock *lock) { // Write this KASSERT(lock != NULL); if(lock_do_i_hold(lock) == false) KASSERT(lock_do_i_hold(lock) == true); spinlock_acquire(&lock->lk_spinlock); lock->lk_isheld = false; lock->lk_curthread = NULL; wchan_wakeone(lock->lk_wchan, &lock->lk_spinlock); spinlock_release(&lock->lk_spinlock); //(void)lock; // suppress warning until code gets written }
void cv_signal(struct cv *cv, struct lock *lock) { #if opt_A1 // validate parameter assert (cv != NULL); assert (lock != NULL); // others assert (lock_do_i_hold(lock) == 1); // disable interrupts int spl = splhigh(); if (q_empty(cv->sleeping_list)) goto done; // signal must be called after wait! // pick one thread and wake it up thread_wakeup((struct thread*) q_remhead(cv->sleeping_list)); // enable interrupts done: splx(spl); #else (void) cv; (void) lock; #endif }
void lock_release(struct lock *lock) { #if OPT_A1 // validate parameter assert (lock != NULL); int spl; // disable interrupts spl = splhigh(); assert (lock_do_i_hold(lock) == 1); // make sure right lock locks the right thread // release the lock lock ->status = 0; assert (lock->status==0); // check lock->target = NULL; thread_wakeup(lock); // enable interrupts splx(spl); #else (void) lock; #endif }
void lock_release(struct lock *lock) { KASSERT(lock!=NULL); KASSERT(lock->hold); KASSERT(lock_do_i_hold(lock)); spinlock_acquire(&lock->mut_lock); lock->hold=0; //sem->sem_count++; KASSERT(lock->hold == 0); lock->holder=NULL; wchan_wakeone(lock->mut_wchan); spinlock_release(&lock->mut_lock); // Write this /* KASSERT(lock!=NULL); KASSERT(lock->lockNeed==curthread); spinlock_acquire(&lock->lockNeed); lock->lockNeed==NULL; //thread_wakeup(lock);// wake a thread suppose it waits for the same lock. spinlock_release(&lock->lockNeed); */ //(void)lock; // suppress warning until code gets written }
void cv_wait(struct cv *cv, struct lock *lock) { int spl; //We must complete an unconditional wait once an unlock occurs and we can then take the lock. We will check the conditions now. assert(cv != NULL); assert(lock !=NULL); assert (lock_do_i_hold(lock)); //If these steps above are valid we can now release the lock, sleep and then lock again. //This must be done atomically. //Like locks and semaphores, we want to make sure before we disable interrupts that we are not currently in the interrupt handler. assert(in_interrupt == 0); spl = splhigh(); //Disable All Interrupts lock_release(lock); //Unlock cv->count++; //Add one to the count since we have one more thread waiting now. q_preallocate(cv->thread_queue,cv->count); // not sure about this. q_addtail(cv->thread_queue, curthread); //add the currently waiting thread in the queue; thread_sleep(curthread); // now that the thread is in the queue, it can sleep. lock_acquire(lock); //When awoken, reacquire the lock if available. If not available, the thread will go back to bed inside lock_acquire(); splx(spl); //Re-enable interrupts (void)cv; // suppress warning until code gets written (void)lock; // suppress warning until code gets written }
void cv_wait(struct cv *cv, struct lock *lock) { #if OPT_A1 KASSERT(lock != NULL); KASSERT(lock_do_i_hold(lock)); spinlock_acquire(&cv->cv_spinlock); lock_release(lock); wchan_lock(cv->cv_wchan); spinlock_release(&cv->cv_spinlock); wchan_sleep(cv->cv_wchan); lock_acquire(lock); #else (void)cv; // suppress warning until code gets written (void)lock; // suppress warning until code gets written #endif }
void cv_signal(struct cv *cv, struct lock *lock) { //On a signal, this means the next thread in the queue can start!! int spl; //We must complete an unconditional wait once an unlock occurs and we can then take the lock. We will check the conditions now. assert(cv != NULL); assert(lock !=NULL); assert (lock_do_i_hold(lock)); spl = splhigh(); //Disable All Interrupts cv->count--; //Decrement count since the next thread can go. //We will never know which thread is next, so we must create a temp thread pointer to be able to work with the next pointer in the queue. struct thread *next_thread = q_remhead(cv->thread_queue); //removes the next head in the queue. thread_wakeup(next_thread); //Wake up this next thread! splx(spl); //Re-enable All Interrupts (void)cv; // suppress warning until code gets written (void)lock; // suppress warning until code gets written }
static u_int32_t ToDisk(vaddr_t vaddr) { assert(lock_do_i_hold(&vmlock)); assert((vaddr & 0xfffff000) == vaddr); struct uio ku; int result; u_int32_t bitmaploc, diskloc, diskindex, flatloc; result = bitmap_alloc(diskmap, &bitmaploc); if (result) { panic("no disk space."); } flatloc = bitmaploc * PAGE_SIZE; diskindex = flatloc / DISKSPACE; diskloc = flatloc - diskindex * DISKSPACE; mk_kuio(&ku, (void*)vaddr , PAGE_SIZE, diskloc, UIO_WRITE); result = VOP_WRITE(disk[diskindex], &ku); if (result) { panic(strerror(result)); } return flatloc; }
/* * Helper function for sfs_namefile. * * Locking: must hold vnode lock on parent. * * Requires up to 3 buffers. */ static int sfs_getonename(struct sfs_vnode *parent, uint32_t targetino, char *buf, size_t *bufpos) { size_t bp = *bufpos; struct sfs_direntry sd; size_t namelen; int result; KASSERT(lock_do_i_hold(parent->sv_lock)); KASSERT(targetino != SFS_NOINO); result = sfs_dir_findino(parent, targetino, &sd, NULL); if (result) { return result; } /* include a trailing slash in the length */ namelen = strlen(sd.sfd_name)+1; if (namelen > bp) { /* * Doesn't fit. ERANGE is the error from the BSD man page, * even though ENAMETOOLONG would make more sense... */ return ERANGE; } buf[bp-1] = '/'; memmove(buf+bp-namelen, sd.sfd_name, namelen-1); *bufpos = bp-namelen; return 0; }
// Creates a Zero-Filled Logical Page. int lp_zero (struct lpage **lpret) { struct lpage *lp = NULL; paddr_t pa; int result; DEBUG(DB_VM, "LPage: lp_zero\n"); result = lp_setup(&lp, &pa); if (result) { return (result); } KASSERT(lock_do_i_hold(lp -> lock)); KASSERT(cm_pageispinned(pa)); cm_zero(pa); KASSERT(cm_pageispinned(pa)); cm_unpin(pa); lock_release(lp -> lock); *lpret = lp; return (0); }
void lock_acquire(struct lock *lock) { // Write this //Peng 2.19.2016 KASSERT(lock != NULL); /* * May not block in an interrupt handler. * * For robustness, always check, even if we can actually * complete the lock_acquire without blocking. */ KASSERT(curthread->t_in_interrupt == false); KASSERT(!lock_do_i_hold(lock)); /* Use the lock spinlock to protect the wchan. */ spinlock_acquire(&lock->lock_splk); while (lock->held) { /* * Note that we don't maintain strict FIFO ordering of * threads going through the lock; that is, we * might "get" it on the first try even if other * threads are waiting. */ //spinlock_release(&lock->lock_splk); wchan_sleep(lock->lock_wchan, &lock->lock_splk); //spinlock_acquire(&lock->lock_splk); } KASSERT(!lock->held); lock->held=true; lock->holder=curthread; spinlock_release(&lock->lock_splk); //Peng //(void)lock; // suppress warning until code gets written }
/* * Routine for closing a file we opened at the hardware level. * This is not necessarily called at VOP_LASTCLOSE time; it's called * at VOP_RECLAIM time. */ static int emu_close(struct emu_softc *sc, uint32_t handle) { int result; bool mine; int retries = 0; mine = lock_do_i_hold(sc->e_lock); if (!mine) { lock_acquire(sc->e_lock); } while (1) { /* Retry operation up to 10 times */ emu_wreg(sc, REG_HANDLE, handle); emu_wreg(sc, REG_OPER, EMU_OP_CLOSE); result = emu_waitdone(sc); if (result==EIO && retries < 10) { kprintf("emu%d: I/O error on close, retrying\n", sc->e_unit); retries++; continue; } break; } if (!mine) { lock_release(sc->e_lock); } return result; }
void cv_wait(struct cv *cv, struct lock *lock) { // Write this //make interupt thingy // make sure i own the lock? KASSERT(cv != NULL); KASSERT(lock != NULL); KASSERT(lock_do_i_hold(lock)); KASSERT(curthread->t_in_interrupt == false); wchan_lock(cv->cv_wchan); lock_release(lock); wchan_sleep(cv->cv_wchan); lock_acquire(lock); // (void)cv; // suppress warning until code gets written // (void)lock; // suppress warning until code gets written }
static int ToMem(u_int32_t flatloc, vaddr_t vaddr) { assert(lock_do_i_hold(&vmlock)); assert((vaddr & 0xfffff000) == vaddr); assert((flatloc & 0xfffff000) == flatloc); struct uio ku; int result; u_int32_t bitmaploc, diskloc, diskindex; bitmaploc = flatloc/PAGE_SIZE; assert (bitmap_isset(diskmap, bitmaploc)); diskindex = flatloc / DISKSPACE; diskloc = flatloc - diskindex * DISKSPACE; mk_kuio(&ku, (void*)vaddr , PAGE_SIZE, diskloc, UIO_READ); result = VOP_READ(disk[diskindex], &ku); if (result) { panic(strerror(result)); } bitmap_unmark(diskmap, bitmaploc); return result; }
void cv_broadcast(struct cv *cv, struct lock *lock) { KASSERT( lock_do_i_hold(lock) ); wchan_wakeall(cv->cv_wchan); }
void cv_signal(struct cv *cv, struct lock *lock) { KASSERT( lock_do_i_hold(lock) ); wchan_wakeone(cv->cv_wchan); }
/* * Operations on vfs_biglock. We make it recursive to avoid having to * think about where we do and don't already hold it. This is an * undesirable hack that's frequently necessary when a lock covers too * much material. Your solution scheme for FS and VFS locking should * not require recursive locks. */ void vfs_biglock_acquire(void) { if (!lock_do_i_hold(vfs_biglock)) { lock_acquire(vfs_biglock); } vfs_biglock_depth++; }
void processtable_biglock_acquire() { //Let's panic for now if we already have the lock //Because I don't know if if this will ever happen anyway. KASSERT(!lock_do_i_hold(processtable_biglock)); lock_acquire(processtable_biglock); }
void cv_signal(struct cv *cv, struct lock *lock) { // Write this KASSERT(lock_do_i_hold(lock)); spinlock_acquire(&cv->cv_lock); wchan_wakeone(cv->cv_wchan, &cv->cv_lock); spinlock_release(&cv->cv_lock); }
void cv_broadcast(struct cv *cv, struct lock *lock) { // Write this KASSERT(lock_do_i_hold(lock)); spinlock_acquire(&cv->cv_lock); wchan_wakeall(cv->cv_wchan, &cv->cv_lock); spinlock_release(&cv->cv_lock); }
void lock_release(struct lock *lock) { // Write this KASSERT(lock_do_i_hold(lock)); spinlock_acquire(&lock->lk_lock); lock->lk_holder = NULL; wchan_wakeone(lock->lk_wchan, &lock->lk_lock); spinlock_release(&lock->lk_lock); }
void cv_broadcast(struct cv *cv, struct lock *lock) { // Write this // (void)cv; // suppress warning until code gets written // (void)lock; // suppress warning until code gets written if(lock_do_i_hold(lock)) { wchan_wakeall(cv->cv_wchan); } }
void vfs_biglock_release(void) { KASSERT(lock_do_i_hold(vfs_biglock)); KASSERT(vfs_biglock_depth > 0); vfs_biglock_depth--; if (vfs_biglock_depth == 0) { lock_release(vfs_biglock); } }
void cv_broadcast(struct cv *cv, struct lock *lock) { #if OPT_A1 assert(lock_do_i_hold(lock) == 1); if (lock_do_i_hold(lock)) { int spl; assert (cv != NULL); spl = splhigh(); thread_wakeup(cv); splx(spl); } #endif /* OPT_A1 */ (void)cv; // suppress warning until code gets written (void)lock; // suppress warning until code gets written }
void cv_broadcast(struct cv *cv, struct lock *lock) { KASSERT(cv != NULL); KASSERT(lock != NULL); if(lock_do_i_hold(lock)) { spinlock_acquire(&cv->spin_lock); wchan_wakeall(cv->cv_wchan, &cv->spin_lock); spinlock_release(&cv->spin_lock); } }
/* * Helper function for pid_alloc. */ static void inc_nextpid(void) { KASSERT(lock_do_i_hold(pidlock)); nextpid++; if (nextpid > PID_MAX) { nextpid = PID_MIN; } }
void lock_release(struct lock *lock) { spinlock_acquire(&lock->lk_spinlock); if(lock_do_i_hold(lock)) { lock->lk_owner = NULL; wchan_wakeone(lock->lk_wchan); } spinlock_release(&lock->lk_spinlock); }
static void turnright(unsigned long cardirection, unsigned long carnumber, unsigned long destdirection){ /* * Avoid unused variable warnings. */ int lockrequired; lockrequired=cardirection; message(0,carnumber,cardirection,destdirection); //checking for locks required and then acquiring them, after use releasing them if(lockrequired==0) { lock_acquire(lock0); assert(lock_do_i_hold(lock0)==1); message(1,carnumber,cardirection,destdirection); lock_release(lock0); } else if(lockrequired==1) { lock_acquire(lock1); assert(lock_do_i_hold(lock1)==1); message(2,carnumber,cardirection,destdirection); lock_release(lock1); } else if(lockrequired==2) { lock_acquire(lock2); assert(lock_do_i_hold(lock2)==1); message(3,carnumber,cardirection,destdirection); lock_release(lock2); } else { lock_acquire(lock3); assert(lock_do_i_hold(lock3)==1); message(4,carnumber,cardirection,destdirection); lock_release(lock3); } message(5,carnumber,cardirection,destdirection); count--; }
void lock_acquire(struct lock *lock) { // Write this KASSERT(!lock_do_i_hold(lock)); spinlock_acquire(&lock->lk_lock); while (lock->lk_holder != NULL) { wchan_sleep(lock->lk_wchan, &lock->lk_lock); } lock->lk_holder = curthread; spinlock_release(&lock->lk_lock); }