Example #1
0
static void run_add(void)
{
    char next = 0;
    for (unsigned iteration = 0; iteration < ITERATIONS; ++iteration) {
        mutex_lock(&mutex);

        for (unsigned i = 0; i < BUF_SIZE; ++i) {
            assert_avail(i);
            assert_add_one(next, -1);
            assert_avail(i + 1);
            ++next;
        }

        /* Overwrite oldest element. It should be returned to us. */
        assert_avail(BUF_SIZE);
        assert_add_one(next, next - BUF_SIZE);
        assert_avail(BUF_SIZE);
        ++next;

        thread_wakeup(pid_get);
        mutex_unlock_and_sleep(&mutex);
    }

    thread_wakeup(pid_get);
}
Example #2
0
/*
 *	Routine:	cpu_machine_init
 *	Function:
 */
void
cpu_machine_init(
	void)
{
	struct per_proc_info			*proc_info;
	volatile struct per_proc_info	*mproc_info;


	proc_info = getPerProc();
	mproc_info = PerProcTable[master_cpu].ppe_vaddr;

	if (proc_info != mproc_info) {
		simple_lock(&rht_lock);
		if (rht_state & RHT_WAIT)
			thread_wakeup(&rht_state);
		rht_state &= ~(RHT_BUSY|RHT_WAIT);
		simple_unlock(&rht_lock);
	}

	PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));

	if (proc_info->hibernate) {
		uint32_t	tbu, tbl;

		do {
			tbu = mftbu();
			tbl = mftb();
		} while (mftbu() != tbu);

	    proc_info->hibernate = 0;
	    hibernate_machine_init();

		// hibernate_machine_init() could take minutes and we don't want timeouts
		// to fire as soon as scheduling starts. Reset timebase so it appears
		// no time has elapsed, as it would for regular sleep.
		mttb(0);
		mttbu(tbu);
		mttb(tbl);
	}

	if (proc_info != mproc_info) {
	while (!((mproc_info->cpu_flags) & SignalReady)) 
			continue;
		cpu_sync_timebase();
	}

	ml_init_interrupt();
	if (proc_info != mproc_info)
		simple_lock(&SignalReadyLock);
	proc_info->cpu_flags |= BootDone|SignalReady;
	if (proc_info != mproc_info) {
		if (proc_info->ppXFlags & SignalReadyWait) {
			(void)hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
			thread_wakeup(&proc_info->cpu_flags);
		}
		simple_unlock(&SignalReadyLock);
		pmsPark();						/* Timers should be cool now, park the power management stepper */
	}
}
Example #3
0
/*
 *      Routine:        lck_rw_done_gen
 */
lck_rw_type_t
lck_rw_done_gen(
	lck_rw_t	*lck)
{
	boolean_t	wakeup_readers = FALSE;
	boolean_t	wakeup_writers = FALSE;
	lck_rw_type_t	lck_rw_type;
	boolean_t	istate;

	istate = lck_interlock_lock(lck);

	if (lck->lck_rw_shared_count != 0) {
		lck_rw_type = LCK_RW_TYPE_SHARED;
		lck->lck_rw_shared_count--;
	}
	else {	
		lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
		if (lck->lck_rw_want_upgrade) 
			lck->lck_rw_want_upgrade = FALSE;
		else 
			lck->lck_rw_want_write = FALSE;
	}

	/*
	 *	There is no reason to wakeup a waiting thread
	 *	if the read-count is non-zero.  Consider:
	 *		we must be dropping a read lock
	 *		threads are waiting only if one wants a write lock
	 *		if there are still readers, they can't proceed
	 */

	if (lck->lck_rw_shared_count == 0) {
		if (lck->lck_w_waiting) {
			lck->lck_w_waiting = FALSE;
			wakeup_writers = TRUE;
		} 
		if (!(lck->lck_rw_priv_excl && wakeup_writers == TRUE) && 
				lck->lck_r_waiting) {
			lck->lck_r_waiting = FALSE;
			wakeup_readers = TRUE;
		}
	}

	lck_interlock_unlock(lck, istate);

	if (wakeup_readers) 
		thread_wakeup(RW_LOCK_READER_EVENT(lck));
	if (wakeup_writers) 
		thread_wakeup(RW_LOCK_WRITER_EVENT(lck));

#if CONFIG_DTRACE
	LOCKSTAT_RECORD(LS_LCK_RW_DONE_RELEASE, lck, (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE ? 1 : 0));
#endif

	return(lck_rw_type);
}
Example #4
0
void etx_clock(void) {
    /*
     * Manages the etx_beacon thread to wake up every full second +- jitter
     */

    /*
     * The jittercorrection and jitter variables keep usecond values divided
     * through 1000 to fit into uint8 variables.
     *
     * That is why they are multiplied by 1000 when used for hwtimer_wait.
     */
    uint8_t jittercorrection = ETX_DEF_JIT_CORRECT;
    uint8_t jitter = (uint8_t) (rand() % ETX_JITTER_MOD);

    while (true) {
        thread_wakeup(etx_beacon_pid);

        /*
         * Vtimer is buggy, but I seem to have no hwtimers left, so using this
         * for now.
         */
        vtimer_usleep(
                ((ETX_INTERVAL - ETX_MAX_JITTER)*MS)+ jittercorrection*MS + jitter*MS - ETX_CLOCK_ADJUST);

        //hwtimer_wait(
        //        HWTIMER_TICKS(((ETX_INTERVAL - ETX_MAX_JITTER)*MS) + jittercorrection*MS + jitter*MS - ETX_CLOCK_ADJUST));

        jittercorrection = (ETX_MAX_JITTER) - jitter;
        jitter = (uint8_t) (rand() % ETX_JITTER_MOD);
    }
}
Example #5
0
void test_ltc_tick(void *ptr)
{
    int pid = (int) ptr;

    hwtimer_tick_id = hwtimer_set(tick_ticks, test_ltc_tick, ptr);
    thread_wakeup(pid);
}
Example #6
0
static void
dbgsh (void)
{
	ulong rbx;
	int b;

	if (!config.vmm.dbgsh)
		return;
	spinlock_lock (&dbgsh_lock);
	if (!i) {
		tid = thread_new (dbgsh_thread, NULL, VMM_STACKSIZE);
		i = true;
	}
	spinlock_unlock (&dbgsh_lock);
	current->vmctl.read_general_reg (GENERAL_REG_RBX, &rbx);
	b = (int)rbx;
	if (b != -1) {
		r = b;
		s = -1;
#ifndef FWDBG
		spinlock_lock (&dbgsh_lock2);
		if (stopped)
			thread_wakeup (tid);
		spinlock_unlock (&dbgsh_lock2);
#endif
	}
	current->vmctl.write_general_reg (GENERAL_REG_RAX, (ulong)s);
}
Example #7
0
void lock_release(struct lock *lock) {
    assert(lock != NULL);
    int enabled = interrupts_set(0);
    lock->lock_state = 0;
    thread_wakeup(lock->queue, 1);
    interrupts_set(enabled);
}
Example #8
0
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
	int spl;

	// Check to make sure we have an actual lock
	assert(lock != NULL);

	// Check to make sure we have an actual cv
	assert(cv != NULL);

	spl = splhigh();
	
	// Release the lock
	lock_release(lock);

	// Wake up all threads waiting on the lock
	thread_wakeup(cv);
	
	// Reacquire the lock
	lock_acquire(lock);

	// Return to user mode
	splx(spl);
}
Example #9
0
void lock_done(
	register lock_t	l)
{
	simple_lock(&l->interlock);

	if (l->read_count != 0)
		l->read_count--;
	else
	if (l->recursion_depth != 0)
		l->recursion_depth--;
	else
	if (l->want_upgrade)
	 	l->want_upgrade = FALSE;
	else
	 	l->want_write = FALSE;

	/*
	 *	There is no reason to wakeup a waiting thread
	 *	if the read-count is non-zero.  Consider:
	 *		we must be dropping a read lock
	 *		threads are waiting only if one wants a write lock
	 *		if there are still readers, they can't proceed
	 */

	if (l->waiting && (l->read_count == 0)) {
		l->waiting = FALSE;
		thread_wakeup(l);
	}

	simple_unlock(&l->interlock);
}
Example #10
0
int main(void)
{
    indicator = 0;
    count = 0;

    main_pid = thread_getpid();

    kernel_pid_t second_pid = thread_create(stack,
                  sizeof(stack),
                  THREAD_PRIORITY_MAIN - 1,
                  THREAD_CREATE_WOUT_YIELD | THREAD_CREATE_STACKTEST,
                  second_thread,
                  NULL,
                  "second_thread");

    while (1) {
        mutex_lock(&mutex);
        thread_wakeup(second_pid);
        indicator++;
        count++;

        if (indicator > 1 || indicator < -1) {
            printf("Error, threads did not sleep properly. [indicator: %d]\n", indicator);
            return -1;
        }

        if ((count % 100000) == 0) {
            printf("Still alive alternated [count: %dk] times.\n", count / 1000);
        }

        mutex_unlock_and_sleep(&mutex);
    }
}
Example #11
0
File: synch.c Project: jessZhAnG/OS
void
cv_signal(struct cv *cv, struct lock *lock)
{
#if opt_A1
    // validate parameter

    assert (cv != NULL);
    assert (lock != NULL);

    // others

    assert (lock_do_i_hold(lock) == 1);
    // disable interrupts
    int spl = splhigh();

    if (q_empty(cv->sleeping_list)) goto done;   // signal must be called after wait!

    // pick one thread and wake it up
    thread_wakeup((struct thread*) q_remhead(cv->sleeping_list));


    // enable interrupts
done:
    splx(spl);
#else
    (void) cv;
    (void) lock;
#endif
}
Example #12
0
File: synch.c Project: jessZhAnG/OS
void
lock_release(struct lock *lock)
{
#if OPT_A1
    // validate parameter
    assert (lock != NULL);

    int spl;
    // disable interrupts
    spl = splhigh();

    assert (lock_do_i_hold(lock) == 1); // make sure right lock locks the right thread

    // release the lock
    lock ->status = 0;
    assert (lock->status==0); // check

    lock->target = NULL;
    thread_wakeup(lock);

    // enable interrupts
    splx(spl);
#else
    (void) lock;
#endif
}
Example #13
0
static void *run_get(void *arg)
{
    (void) arg;

    char next = 0;
    for (unsigned iteration = 0; iteration < ITERATIONS; ++iteration) {
        ++next; /* the first element of a stride is always overwritten */

        mutex_lock(&mutex);

        for (unsigned i = BUF_SIZE; i > 0; --i) {
            assert_avail(i);
            assert_get_one(next);
            assert_avail(i - 1);
            ++next;
        }

        assert_avail(0);
        assert_get_one(-1);
        assert_avail(0);

        thread_wakeup(pid_add);
        mutex_unlock_and_sleep(&mutex);
    }

    return NULL;
}
Example #14
0
void
cv_signal(struct cv *cv, struct lock *lock)
{
	//On a signal, this means the next thread in the queue can start!!
	
	int spl;
	//We must complete an unconditional wait once an unlock occurs and we can then take the lock. We will check the conditions now.
	assert(cv != NULL);
	assert(lock !=NULL);
	assert (lock_do_i_hold(lock));
	
	spl = splhigh(); //Disable All Interrupts
	
	cv->count--; //Decrement count since the next thread can go.
	
	//We will never know which thread is next, so we must create a temp thread pointer to be able to work with the next pointer in the queue.
	struct thread *next_thread = q_remhead(cv->thread_queue); //removes the next head in the queue.
    
	
	thread_wakeup(next_thread); //Wake up this next thread!
    
	splx(spl); //Re-enable All Interrupts
	
	(void)cv;    // suppress warning until code gets written
	(void)lock;  // suppress warning until code gets written
}
Example #15
0
File: main.c Project: JMR-b/RIOT
int main(void)
{
    count = 0;
    is_finished = 0;
    expected_value = 1000ul * 1000ul;
    pthread_cond_init(&cv, NULL);

    kernel_pid_t pid = thread_create(stack,sizeof(stack), THREAD_PRIORITY_MAIN - 1,
                                     THREAD_CREATE_WOUT_YIELD | THREAD_CREATE_STACKTEST,
                                     second_thread, NULL, "second_thread");

    while (1) {
        mutex_lock(&mutex);
        thread_wakeup(pid);
        count++;

        if ((count % 100000) == 0) {
            printf("Still alive alternated [count: %ldk] times.\n", count / 1000);
        }

        if (count == expected_value) {
            puts("condition fulfilled.");
            is_finished = 1;
            mutex_unlock(&mutex);
            return 0;
        }

        pthread_cond_wait(&cv, &mutex);
        mutex_unlock(&mutex);
    }
}
Example #16
0
static void
tcpip_init_done(void *arg)
{
	uint32_t *done = arg;
	*done = 1;
	thread_wakeup(done);
}
Example #17
0
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
  int spl = splhigh();
  thread_wakeup(cv);
  splx(spl);
}
Example #18
0
void
processor_doshutdown(
	processor_t	processor)
{
	register int		cpu = processor->slot_num;

	timer_switch(&kernel_timer[cpu]);

	/*
	 *	Ok, now exit this cpu.
	 */
	PMAP_DEACTIVATE_KERNEL(cpu);
        cpu_data[cpu].active_thread = THREAD_NULL;
	active_kloaded[cpu] = THR_ACT_NULL;
	cpu_down(cpu);
	thread_wakeup((event_t)processor);
	halt_cpu();
	panic("zombie processor");

	/*
	 *	The action thread returns to life after the call to
	 *	switch_to_shutdown_context above, on some other cpu.
	 */

	/*NOTREACHED*/
}
Example #19
0
/* 
 * Interrupts disabled, lock held; returns the same way. 
 * Only called on thread calls whose storage we own.  Wakes up
 * anyone who might be waiting on this work item and frees it
 * if the client has so requested.
 */
static void
thread_call_finish(thread_call_t call)
{
	boolean_t dowake = FALSE;

	call->tc_finish_count++;
	call->tc_refs--;

	if ((call->tc_flags & THREAD_CALL_WAIT) != 0) {
		dowake = TRUE;
		call->tc_flags &= ~THREAD_CALL_WAIT;

		/* 
		 * Dropping lock here because the sched call for the 
		 * high-pri group can take the big lock from under
		 * a thread lock.
		 */
		thread_call_unlock();
		thread_wakeup((event_t)call);
		thread_call_lock_spin();
	}

	if (call->tc_refs == 0) {
		if (dowake) {
			panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func);
		}

		enable_ints_and_unlock();

		zfree(thread_call_zone, call);

		(void)disable_ints_and_lock();
	}

}
Example #20
0
void
himem_revert(
	hil_t		hil)
{
	hil_t		next;
	boolean_t	wakeup = FALSE;
	spl_t		ipl;

	while(hil) {
		if (hil->length) {
			bcopy((char *)phystokv(hil->low_page + hil->offset),
				(char *)phystokv(hil->high_addr),
			      hil->length);
		}
		hil->high_addr = 0;
		hil->length = 0;
		hil->offset = 0;
		next = hil->next;
		ipl = splhi();
		simple_lock(&hil_lock);
		if (!(hil->next = hil_head))
			wakeup = TRUE;
		hil_head = hil;
		simple_unlock(&hil_lock);
		splx(ipl);
		hil = next;
	}
	if (wakeup)
		thread_wakeup((event_t)&hil_head);
}
Example #21
0
int main(void)
{
    indicator = 0;
    count = 0;
    mutex_init(&mutex);

    thread_create(stack,
                  KERNEL_CONF_STACKSIZE_MAIN,
                  PRIORITY_MAIN - 1,
                  CREATE_WOUT_YIELD | CREATE_STACKTEST,
                  second_thread,
                  "second_thread");

    while (1) {
        mutex_lock(&mutex);
        thread_wakeup(2);
        indicator++;
        count++;

        if (indicator > 1 || indicator < -1) {
            //printf("Error, threads did not sleep properly. [indicator: %d]\n", indicator);
            return -1;
        }

        if ((count % 100000) == 0) {
            //printf("Still alive alternated [count: %dk] times.\n", count / 1000);
        }

        mutex_unlock_and_sleep(&mutex);
    }
}
Example #22
0
void
svm_send_change_completed(
	xmm_obj_t	mobj,
	change_t	change)
{
	kern_return_t kr;

	assert(xmm_obj_lock_held(mobj));
	if (change->reply_data.type == XMM_TEMPORARY_REPLY) {
		/*
		 * This request was generated by
		 * svm_disable_active_temporary.
		 * If the object is still alive,
		 * mark the object as temporary_disabled.
		 */
		assert(! MOBJ->temporary_disabled);
		assert(MOBJ->disable_in_progress);
		if (MOBJ->state == MOBJ_STATE_READY) {
			MOBJ->temporary_disabled = TRUE;
		}
		MOBJ->disable_in_progress = FALSE;
		thread_wakeup(svm_disable_event(mobj));
	} else {
		/*
		 * This is a real request.
		 */
		xmm_obj_unlock(mobj);
		kr = M_CHANGE_COMPLETED(mobj, change->may_cache,
					change->copy_strategy,
					&change->reply_data);
		assert(kr == KERN_SUCCESS);
		xmm_obj_lock(mobj);
	}
}
Example #23
0
void thread_swapin(thread_t thread)
{
	switch (thread->state & TH_SWAP_STATE) {
	    case TH_SWAPPED:
		/*
		 *	Swapped out - queue for swapin thread.
		 */
		thread->state = (thread->state & ~TH_SWAP_STATE)
				| TH_SW_COMING_IN;
		swapper_lock();
		enqueue_tail(&swapin_queue, &(thread->links));
		swapper_unlock();
		thread_wakeup((event_t) &swapin_queue);
		break;

	    case TH_SW_COMING_IN:
		/*
		 *	Already queued for swapin thread, or being
		 *	swapped in.
		 */
		break;

	    default:
		/*
		 *	Already swapped in.
		 */
		panic("thread_swapin");
	}
}
Example #24
0
int main(void)
{
    uint32_t count = 0;

    indicator = 0;
    main_pid = thread_getpid();

    kernel_pid_t second_pid = thread_create(stack,
                  sizeof(stack),
                  THREAD_PRIORITY_MAIN - 1,
                  THREAD_CREATE_WOUT_YIELD | THREAD_CREATE_STACKTEST,
                  second_thread,
                  NULL,
                  "second_thread");

    while (1) {
        mutex_lock(&mutex);
        thread_wakeup(second_pid);
        indicator++;
        count++;

        if ((indicator > 1) || (indicator < -1)) {
            printf("[ERROR] threads did not sleep properly (%d).\n", indicator);
            return 1;
        }
        if ((count % 100000) == 0) {
            printf("[ALIVE] alternated %"PRIu32"k times.\n", (count / 1000));
        }
        mutex_unlock_and_sleep(&mutex);
    }
}
Example #25
0
void
test_wakeup(int all)
{
	Tid ret;
	long ii;
	static Tid child[NTHREADS];
	unintr_printf("starting wakeup test\n");

	done = 0;

	queue = wait_queue_create();
	assert(queue);

	/* initial thread sleep and wake up tests */
	ret = thread_sleep(NULL);
	assert(ret == THREAD_INVALID);
	unintr_printf("initial thread returns from sleep(NULL)\n");

	ret = thread_sleep(queue);
	assert(ret == THREAD_NONE);
	unintr_printf("initial thread returns from sleep(NONE)\n");

	ret = thread_wakeup(NULL, 0);
	assert(ret == 0);
	ret = thread_wakeup(queue, 1);
	assert(ret == 0);

	/* create all threads */
	for (ii = 0; ii < NTHREADS; ii++) {
		child[ii] = thread_create((void (*)(void *))test_wakeup_thread,
					  (void *)ii);
		assert(thread_ret_ok(child[ii]));
	}

	while (__sync_fetch_and_add(&done, 0) < NTHREADS) {
		/* spin for 5 ms */
		spin(5000);
		/* this requires that thread_wakeup is working correctly */
		ret = thread_wakeup(queue, all);
		assert(ret >= 0);
		assert(all ? ret <= NTHREADS : ret <= 1);
	}

	wait_queue_destroy(queue);
	unintr_printf("wakeup test done\n");
}
Example #26
0
void blockdev_notify_complete(struct blockdev_req *req, int rc)
{
	bool iflag = int_begin_atomic();
	req->state = BLOCKDEV_REQ_FINISHED;
	req->rc = rc;
	thread_wakeup(&req->waitqueue);
	int_end_atomic(iflag);
}
Example #27
0
static void second_thread(void)
{
    while (1) {
        mutex_lock(&mutex);
        thread_wakeup(1);
        indicator--;
        mutex_unlock_and_sleep(&mutex);
    }
}
Example #28
0
/*
 *	thread_stack_enqueue:
 *
 *	Enqueue a thread for stack allocation.
 *
 *	Called at splsched.
 */
void
thread_stack_enqueue(
	thread_t		thread)
{
	simple_lock(&thread_stack_lock);
	enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
	simple_unlock(&thread_stack_lock);

	thread_wakeup((event_t)&thread_stack_queue);
}
Example #29
0
/*
 *      Routine:        lck_rw_lock_exclusive_to_shared
 */
void
lck_rw_lock_exclusive_to_shared(
	lck_rw_t	*lck)
{
	boolean_t	wakeup_readers = FALSE;
	boolean_t	wakeup_writers = FALSE;
	boolean_t	istate;

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START,
			     (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0);

	istate = lck_interlock_lock(lck);

	lck->lck_rw_shared_count++;
	if (lck->lck_rw_want_upgrade)
		lck->lck_rw_want_upgrade = FALSE;
	else
	 	lck->lck_rw_want_write = FALSE;

	if (lck->lck_w_waiting) {
		lck->lck_w_waiting = FALSE;
		wakeup_writers = TRUE;
	} 
	if (!(lck->lck_rw_priv_excl && wakeup_writers == TRUE) && 
			lck->lck_r_waiting) {
		lck->lck_r_waiting = FALSE;
		wakeup_readers = TRUE;
	}

	lck_interlock_unlock(lck, istate);

	if (wakeup_readers)
		thread_wakeup(RW_LOCK_READER_EVENT(lck));
	if (wakeup_writers)
		thread_wakeup(RW_LOCK_WRITER_EVENT(lck));

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END,
			     (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, lck->lck_rw_shared_count, 0);

#if CONFIG_DTRACE
	LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, lck, 0);
#endif
}
Example #30
0
void
ipc_target_wakeup(struct ipc_target *ipt)
{
	ipt_lock(ipt);
	if (ipt->ipt_waiting) {
		thread_wakeup((int)&ipt->ipt_acts);
		ipt->ipt_waiting = 0;
	}
	ipt_unlock(ipt);
}