示例#1
0
static
void
waker_thread(void *junk1, unsigned long junk2)
{
	int i, done;

	(void)junk1;
	(void)junk2;

	while (1) {
		P(wakersem);
		done = wakerdone;
		V(wakersem);
		if (done) {
			break;
		}

		for (i=0; i<WAKER_WAKES; i++) {
			unsigned n;
			struct spinlock *lk;
			struct wchan *wc;

			n = random() % NWAITCHANS;
			lk = &spinlocks[n];
			wc = waitchans[n];
			spinlock_acquire(lk);
			wchan_wakeall(wc, lk);
			spinlock_release(lk);

			thread_yield();
		}
	}
	V(donesem);
}
/*
 * Create a new thread based on an existing one.
 *
 * The new thread has name NAME, and starts executing in function
 * ENTRYPOINT. DATA1 and DATA2 are passed to ENTRYPOINT.
 *
 * The new thread is created in the process P. If P is null, the
 * process is inherited from the caller. It will start on the same CPU
 * as the caller, unless the scheduler intervenes first.
 */
int
thread_fork(const char *name,
            struct proc *proc,
            void (*entrypoint)(void *data1, unsigned long data2),
            void *data1, unsigned long data2)
{
    struct thread *newthread;
    int result;

    newthread = thread_create(name);
    if (newthread == NULL) {
        return ENOMEM;
    }

    /* Allocate a stack */
    newthread->t_stack = kmalloc(STACK_SIZE);
    if (newthread->t_stack == NULL) {
        thread_destroy(newthread);
        return ENOMEM;
    }
    thread_checkstack_init(newthread);

    /*
     * Now we clone various fields from the parent thread.
     */

    /* Thread subsystem fields */
    newthread->t_cpu = curthread->t_cpu;

    /* Attach the new thread to its process */
    if (proc == NULL) {
        proc = curthread->t_proc;
    }
    result = proc_addthread(proc, newthread);
    if (result) {
        /* thread_destroy will clean up the stack */
        thread_destroy(newthread);
        return result;
    }

    /*
     * Because new threads come out holding the cpu runqueue lock
     * (see notes at bottom of thread_switch), we need to account
     * for the spllower() that will be done releasing it.
     */
    newthread->t_iplhigh_count++;

    spinlock_acquire(&thread_count_lock);
    ++thread_count;
    wchan_wakeall(thread_count_wchan, &thread_count_lock);
    spinlock_release(&thread_count_lock);

    /* Set up the switchframe so entrypoint() gets called */
    switchframe_init(newthread, entrypoint, data1, data2);

    /* Lock the current cpu's run queue and make the new thread runnable */
    thread_make_runnable(newthread, false);

    return 0;
}
示例#3
0
文件: tt3.c 项目: AdamChit/cs350
static
void
waker_thread(void *junk1, unsigned long junk2)
{
	int i, done;

	(void)junk1;
	(void)junk2;

	while (1) {
		P(wakersem);
		done = wakerdone;
		V(wakersem);
		if (done) {
			break;
		}

		for (i=0; i<WAKER_WAKES; i++) {
			struct wchan *w;

			w = waitchans[random()%NWAITCHANS];
			wchan_wakeall(w);

			thread_yield();
		}
	}
	V(donesem);
}
/*
 * Cause the current thread to exit.
 *
 * The parts of the thread structure we don't actually need to run
 * should be cleaned up right away. The rest has to wait until
 * thread_destroy is called from exorcise().
 *
 * Note that any dynamically-allocated structures that can vary in size from
 * thread to thread should be cleaned up here, not in thread_destroy. This is
 * because the last thread left on each core runs the idle loop and does not
 * get cleaned up until new threads are created. Differences in the amount of
 * memory used by different threads after thread_exit will make it look like
 * your kernel in leaking memory and cause some of the test161 checks to fail.
 *
 * Does not return.
 */
void
thread_exit(void)
{
    struct thread *cur;

    cur = curthread;

    /*
     * Detach from our process. You might need to move this action
     * around, depending on how your wait/exit works.
     */
    proc_remthread(cur);

    /* Make sure we *are* detached (move this only if you're sure!) */
    KASSERT(cur->t_proc == NULL);

    /* Check the stack guard band. */
    thread_checkstack(cur);

    // Decrement the thread count and notify anyone interested.
    if (thread_count) {
        spinlock_acquire(&thread_count_lock);
        --thread_count;
        wchan_wakeall(thread_count_wchan, &thread_count_lock);
        spinlock_release(&thread_count_lock);
    }

    /* Interrupts off on this processor */
    splhigh();
    thread_switch(S_ZOMBIE, NULL, NULL);
    panic("braaaaaaaiiiiiiiiiiinssssss\n");
}
示例#5
0
void
cv_broadcast(struct cv *cv, struct lock *lock)
{

	KASSERT( lock_do_i_hold(lock) );
	wchan_wakeall(cv->cv_wchan);

}
示例#6
0
文件: synch.c 项目: jamesidzik/Test
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
	KASSERT(cv != NULL);
	KASSERT(lock != NULL);
	
	wchan_wakeall(cv->cv_wchan);
        (void)lock; //We don't need to modify the lock in this case
}
示例#7
0
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
    // Write this
    KASSERT(lock_do_i_hold(lock));
    spinlock_acquire(&cv->cv_lock);
    wchan_wakeall(cv->cv_wchan, &cv->cv_lock);
    spinlock_release(&cv->cv_lock);
}
示例#8
0
/*when some condition is meet, cv broadcast will weak up all thread which currently blocking on the cv's wchan*/
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
	//spinlock_acquire(&cv->cv_lock);
	wchan_wakeall(cv->cv_wchan);
	(void)lock;
	//spinlock_release(&cv->cv_lock);

}
示例#9
0
文件: coremap.c 项目: YueGan/CSCC69A2
/*
 * Shoot down everything.
 */
void
vm_tlbshootdown_all(void)
{
	spinlock_acquire(&coremap_spinlock);
	ct_shootdown_interrupts++;
	tlb_clear();
	ct_shootdowns_done += NUM_TLB;
	wchan_wakeall(coremap_shootchan);
	spinlock_release(&coremap_spinlock);
}
示例#10
0
文件: synch.c 项目: coderpm/OS161
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
	// Write this
//	(void)cv;    // suppress warning until code gets written
//	(void)lock;  // suppress warning until code gets written
	if(lock_do_i_hold(lock)) {
	wchan_wakeall(cv->cv_wchan);
	}
}
示例#11
0
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
    KASSERT(cv != NULL);
    KASSERT(lock != NULL);
    if(lock_do_i_hold(lock)) {
        spinlock_acquire(&cv->spin_lock);
        wchan_wakeall(cv->cv_wchan, &cv->spin_lock);
        spinlock_release(&cv->spin_lock);
    }
}
示例#12
0
void
cv_broadcast(struct cv *mycv, struct lock *mylock)
{
	// Write this

	wchan_wakeall(mycv->cv_wchan);

	//end of added stuff
//	(void)mycv;    // suppress warning until code gets written
	(void)mylock;  // suppress warning until code gets written
}
示例#13
0
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
#if OPT_A1
    KASSERT(cv != NULL && lock != NULL);
    KASSERT(curthread->t_in_interrupt == false);
    spinlock_acquire(&cv->cv_lock);
    wchan_wakeall(cv->cv_wchan);
    spinlock_release(&cv->cv_lock);
#endif
}
示例#14
0
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
        KASSERT(cv);
        KASSERT(lock);
        (void) lock;

        spinlock_acquire(&cv->cv_lock);
    	wchan_wakeall(cv->cv_wchan, &cv->cv_lock);
        spinlock_release(&cv->cv_lock);
}
示例#15
0
文件: synch.c 项目: Asb10/doodi_os
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
		// Write this
	// todo: checks for caller
	KASSERT(lock->lk_holder == curthread);
	//wchan_lock(cv->cv_wchan);
	// todo: got an error from this call, should we unlock here or not?
	//lock_release(lock);
	wchan_wakeall(cv->cv_wchan);
}
示例#16
0
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
	// Write this
	#if OPT_A1
	KASSERT(lock != NULL);
	wchan_wakeall(cv->cv_wchan);
	#endif
//	(void)cv;    // suppress warning until code gets written
//	(void)lock;  // suppress warning until code gets written
}
示例#17
0
文件: synch.c 项目: nmanivas/OS161
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
	// Write this
	KASSERT(cv!=NULL);
	KASSERT(lock!=NULL);
	KASSERT(lock_do_i_hold(lock));
	wchan_wakeall(cv->cv_wchan);
		
	
	//(void)cv;    // suppress warning until code gets written
	//(void)lock;  // suppress warning until code gets written
}
示例#18
0
文件: coremap.c 项目: YueGan/CSCC69A2
/*
 * mmu_map: Enter a translation into the MMU. (This is the end result
 * of fault handling.)
 *
 * Synchronization: Takes coremap_spinlock. Does not block.
 */
void
mmu_map(struct addrspace *as, vaddr_t va, paddr_t pa, int writable)
{
	int tlbix;
	uint32_t ehi, elo;
	unsigned cmix;
	
	KASSERT(pa/PAGE_SIZE >= base_coremap_page);
	KASSERT(pa/PAGE_SIZE - base_coremap_page < num_coremap_entries);
	
	spinlock_acquire(&coremap_spinlock);

	KASSERT(as == curcpu->c_vm.cvm_lastas);

	cmix = PADDR_TO_COREMAP(pa);
	KASSERT(cmix < num_coremap_entries);

	/* Page must be pinned. */
	KASSERT(coremap[cmix].cm_pinned);

	tlbix = tlb_probe(va, 0);
	if (tlbix < 0) {
		KASSERT(coremap[cmix].cm_tlbix == -1);
		KASSERT(coremap[cmix].cm_cpunum == 0);
		tlbix = mipstlb_getslot();
		KASSERT(tlbix>=0 && tlbix<NUM_TLB);
		coremap[cmix].cm_tlbix = tlbix;
		coremap[cmix].cm_cpunum = curcpu->c_number;
		DEBUG(DB_TLB, "... pa 0x%05lx <-> tlb %d\n", 
			(unsigned long) COREMAP_TO_PADDR(cmix), tlbix);
	}
	else {
		KASSERT(tlbix>=0 && tlbix<NUM_TLB);
		KASSERT(coremap[cmix].cm_tlbix == tlbix);
		KASSERT(coremap[cmix].cm_cpunum == curcpu->c_number);
	}

	ehi = va & TLBHI_VPAGE;
	elo = (pa & TLBLO_PPAGE) | TLBLO_VALID;
	if (writable) {
		elo |= TLBLO_DIRTY;
	}

	tlb_write(ehi, elo, tlbix);

	/* Unpin the page. */
	coremap[cmix].cm_pinned = 0;
	wchan_wakeall(coremap_pinchan);

	spinlock_release(&coremap_spinlock);
}
示例#19
0
文件: synch.c 项目: mellwa/os
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
#if OPT_A1
	KASSERT(cv != NULL);
	KASSERT(lock != NULL);
	KASSERT(lock->who_hold == curthread);
	wchan_wakeall(cv->cv_wchan);
#else
// Write this
	(void)cv;    // suppress warning until code gets written
	(void)lock;  // suppress warning until code gets written
#endif
}
示例#20
0
文件: coremap.c 项目: YueGan/CSCC69A2
/*
 * coremap_unpin: unpin a page that was pinned with coremap_pin or
 * coremap_allocuser.
 *
 * Synchronization: takes coremap_spinlock. Does not block.
 */
void
coremap_unpin(paddr_t paddr)
{
	unsigned ix;

	ix = PADDR_TO_COREMAP(paddr);
	KASSERT(ix<num_coremap_entries);

	spinlock_acquire(&coremap_spinlock);
	KASSERT(coremap[ix].cm_pinned);
	coremap[ix].cm_pinned = 0;
	wchan_wakeall(coremap_pinchan);
	spinlock_release(&coremap_spinlock);
}
示例#21
0
/*when release is be called, the lock do I hold func will be called to check 
 if this thread is the lock holder, if yes then we change the lock_holder to NULL 
 to make this lock avaliable for later use, and weak up all the thread are blocked on this lock's wchan. 
 If no just put this this thread back to the ready Q*/
void
lock_release(struct lock *lock)
{
	KASSERT(lock != NULL);
	KASSERT(lock->lock_holder != NULL);
	spinlock_acquire(&lock->lock_lock);
	if (lock_do_i_hold(lock)) 
	{
		lock->lock_holder = NULL;
		wchan_wakeall(lock->lock_wchan);
	}
	else thread_yield();
	spinlock_release(&lock->lock_lock);
}
示例#22
0
文件: synch.c 项目: patricksu/repo3
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
    // Write this
    //Peng 2.21.2016
    KASSERT(cv!=NULL);
    KASSERT(lock!=NULL);
    KASSERT(lock_do_i_hold(lock));
    spinlock_acquire(&cv->cv_splk);
    wchan_wakeall(cv->cv_wchan, &cv->cv_splk);
    spinlock_release(&cv->cv_splk);
    //Peng
    //(void)cv;    // suppress warning until code gets written
    //(void)lock;  // suppress warning until code gets written
}
示例#23
0
文件: synch.c 项目: jcseto/os161-1.99
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
    // Write this
    #if OPT_A1
    KASSERT(cv != NULL);
    KASSERT(lock != NULL);
    KASSERT(lock_do_i_hold(lock));
    spinlock_acquire(&cv->cv_spinlock);
    wchan_wakeall(cv->cv_wchan);
    spinlock_release(&cv->cv_spinlock);
    #else
    (void)cv;    // suppress warning until code gets written
    (void)lock;  // suppress warning until code gets written
    #endif
}
示例#24
0
文件: cv.c 项目: plurmiscuous/JinxOS
void
cv_broadcast(struct cv* cv, struct lock* lock) {
    // acquire spinlock
    spinlock_acquire(&cv->cv_splock);

    // release lock so other thread can use it
    lock_release(lock);

    // move all TCBs in cv's waiting queue from waiting to ready
    wchan_wakeall(cv->cv_wchan, &cv->cv_splock);

    // release spinlock
    spinlock_release(&cv->cv_splock);

    // reacquire lock while we have control again
    lock_acquire(lock);
}
示例#25
0
void rwlock_release_write(struct rwlock *rwlock) {
	KASSERT(rwlock != NULL);
	KASSERT(rwlock_do_i_hold(rwlock));
    //KASSERT(rwlock->rwlock_next_thread == READER);
// writer is done, should i wake up all sleeping readers or a writer now? use a toggle mechanism as of now
	spinlock_acquire(&rwlock->rwlock_spinlock);
	rwlock->is_held_by_writer = false;
	if(!wchan_isempty(rwlock->rwlock_read_wchan, &rwlock->rwlock_spinlock)) {
		wchan_wakeall(rwlock->rwlock_read_wchan, &rwlock->rwlock_spinlock);
		//rwlock->rwlock_next_thread = WRITER;
	} else if(!wchan_isempty(rwlock->rwlock_write_wchan, &rwlock->rwlock_spinlock)) {
		wchan_wakeone(rwlock->rwlock_write_wchan, &rwlock->rwlock_spinlock);
		//rwlock->rwlock_next_thread = READER;
	} else {
        rwlock->rwlock_next_thread = NONE;
    }
	spinlock_release(&rwlock->rwlock_spinlock);
}
示例#26
0
文件: coremap.c 项目: YueGan/CSCC69A2
/*
 * Do one TLB shootdown.
 */
void
vm_tlbshootdown(const struct tlbshootdown *ts, int num)
{
	int i;
	int tlbix;
	unsigned where;

	spinlock_acquire(&coremap_spinlock);
	ct_shootdown_interrupts++;
	for (i=0; i<num; i++) {
		tlbix = ts[i].ts_tlbix;
		where = ts[i].ts_coremapindex;
		if (coremap[where].cm_tlbix == tlbix &&
		    coremap[where].cm_cpunum == curcpu->c_number) {
			tlb_invalidate(tlbix);
			ct_shootdowns_done++;
		}
	}
	wchan_wakeall(coremap_shootchan);
	spinlock_release(&coremap_spinlock);
}
示例#27
0
文件: coremap.c 项目: YueGan/CSCC69A2
static
void
do_evict(int where)
{
	struct lpage *lp;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));
	KASSERT(curthread != NULL && !curthread->t_in_interrupt);
	KASSERT(lock_do_i_hold(global_paging_lock));

	KASSERT(coremap[where].cm_pinned==0);
	KASSERT(coremap[where].cm_allocated);
	KASSERT(coremap[where].cm_kernel==0);

	lp = coremap[where].cm_lpage;
	KASSERT(lp != NULL);

	/*
	 * Pin it now, so it doesn't get e.g. paged out by someone
	 * else while we're waiting for TLB shootdown.
	 */
	coremap[where].cm_pinned = 1;

	if (coremap[where].cm_tlbix >= 0) {
		if (coremap[where].cm_cpunum != curcpu->c_number) {
			/* yay, TLB shootdown */
			struct tlbshootdown ts;
			ts.ts_tlbix = coremap[where].cm_tlbix;
			ts.ts_coremapindex = where;
			ct_shootdowns_sent++;
			ipi_tlbshootdown(coremap[where].cm_cpunum, &ts);
			while (coremap[where].cm_tlbix != -1) {
				tlb_shootwait();
			}
			KASSERT(coremap[where].cm_tlbix == -1);
			KASSERT(coremap[where].cm_cpunum == 0);
			KASSERT(coremap[where].cm_lpage == lp);
		}
		else {
			tlb_invalidate(coremap[where].cm_tlbix);
			coremap[where].cm_tlbix = -1;
			coremap[where].cm_cpunum = 0;
		}
		DEBUG(DB_TLB, "... pa 0x%05lx --> tlb --\n", 
		      (unsigned long) COREMAP_TO_PADDR(where));
	}

	/* properly we ought to lock the lpage to test this */
	KASSERT(COREMAP_TO_PADDR(where) == (lp->lp_paddr & PAGE_FRAME));

	/* release the coremap spinlock in case we need to swap out */
	spinlock_release(&coremap_spinlock);

	lpage_evict(lp);

	spinlock_acquire(&coremap_spinlock);

	/* because the page is pinned these shouldn't have changed */
	KASSERT(coremap[where].cm_allocated == 1);
	KASSERT(coremap[where].cm_lpage == lp);
	KASSERT(coremap[where].cm_pinned == 1);

	coremap[where].cm_allocated = 0;
	coremap[where].cm_lpage = NULL;
	coremap[where].cm_pinned = 0;

	num_coremap_user--;
	num_coremap_free++;
	KASSERT(num_coremap_kernel+num_coremap_user+num_coremap_free
	       == num_coremap_entries);

	wchan_wakeall(coremap_pinchan);
}