int periodic_wake_get_misses(unsigned short int tid)
{
	struct thread_event *te;
	spdid_t spdid = cos_spd_id();
	int m;

	TAKE(spdid);
	te = te_pget(tid);
	if (NULL == te) BUG();
	if (!(te->flags & TE_PERIODIC)) {
		RELEASE(spdid);
		return -1;
	}
	m = te->dl_missed;
	te->dl_missed = 0;
	RELEASE(spdid);

	return m;
}
/* 
 * The problem being solved here is this: T_1 wishes to take the
 * mutex, finds that it is taken by another thread.  It calls into
 * this function, but is preempted by T_2, the lock holder.  The lock
 * is released.  T_1 is switched back to and it invokes this component
 * asking to block till the lock is released.  This component has no
 * way of knowing that the lock already has been released, so we block
 * for no reason in wait for the lock to be "released".  Thus what we
 * do is have the client call the pretake function checking before and
 * after invoking it that the lock is still taken.  We record the
 * generation number in pretake and make sure that it is consistent in
 * take.  This signifies that no release has happened in the interim,
 * and that we really should sleep.
 */
int lock_component_pretake(spdid_t spd, unsigned long lock_id, unsigned short int thd)
{
	struct meta_lock *ml;
 	spdid_t spdid = cos_spd_id();
	int ret = 0;

	ACT_RECORD(ACT_PRELOCK, spd, lock_id, cos_get_thd_id(), thd);
	TAKE(spdid);
//	lock_print_all();
	ml = lock_find(lock_id, spd);
	if (NULL == ml) {
		ret = -1;
		goto done;
	}
	ml->gen_num = generation;
done:
	RELEASE(spdid);
	return ret;
}
Beispiel #3
0
int
stkmgr_spd_concurrency_estimate(spdid_t spdid)
{
	struct spd_stk_info *ssi;
	int i, avg;
	unsigned long tot = 0, cnt = 0;

	TAKE();
	ssi = get_spd_stk_info(spdid);
	if (!ssi || !SPD_IS_MANAGED(ssi)) {
		RELEASE();
		return -1;
	}

	if (ssi->num_allocated < ssi->num_desired) {
		assert(!SPD_HAS_BLK_THD(ssi));
		RELEASE();
		return ssi->num_allocated;
	}

	for (i = 0 ; i < MAX_BLKED ; i++) {
		int n = ssi->stat_thd_blk[i];

		tot += (n * i);
		cnt += n;
		ssi->stat_thd_blk[i] = 0;
	}
	if (cnt == 0 && ssi->num_blocked_thds == 0) {
		avg = ssi->num_allocated;
	} else {
		unsigned int blk_hist;

		if (cnt) blk_hist = (tot/cnt) + 1; /* adjust for rounding */
		else     blk_hist = 0;
		
		avg = ssi->num_allocated + (blk_hist > ssi->num_blocked_thds ? 
					    blk_hist : ssi->num_blocked_thds); 
	}
	RELEASE();

	return avg;
}
Beispiel #4
0
int
stkmgr_stack_introspect(spdid_t d_spdid, vaddr_t d_addr, 
			spdid_t s_spdid, vaddr_t s_addr)
{
	struct cos_stk_item *si;
	int ret = -1;

	TAKE();
	si = stkmgr_get_spds_stk_item(s_spdid, s_addr);
	if (!si) goto err;
	
	if(d_addr != mman_alias_page(cos_spd_id(), (vaddr_t)si->hptr, d_spdid, d_addr)){
		printc("<stkmgr>: Unable to map stack into component during introspection\n");
		BUG();
	}
	ret = 0;
err:
	RELEASE();
	return ret;
}
int periodic_wake_wait(spdid_t spdinv)
{
	spdid_t spdid = cos_spd_id();
	struct thread_event *te;
	u16_t tid = cos_get_thd_id();
	long long t;

	TAKE(spdid);
	te = te_pget(tid);
	if (NULL == te) BUG();
	if (!(te->flags & TE_PERIODIC)) goto err;
		
	assert(!EMPTY_LIST(te, next, prev));
	te->flags |= TE_BLOCKED;

	rdtscll(t);
	if (te->missed) {	/* we're late */
		long long diff;
		assert(te->completion);

		diff = (t - te->completion);
		te->lateness_tot += diff;
		//te->samples++;
		te->miss_lateness_tot += diff;
		//te->miss_samples++;
		
		te->completion = 0;
	} else {		/* on time! */
		te->completion = t;
	}
	RELEASE(spdid);

	if (-1 == sched_block(spdid, 0)) {
		prints("fprr: sched block failed in timed_event_periodic_wait.");
	}

	return 0;
err:
	RELEASE(spdid);
	return -1;
}
int periodic_wake_remove(spdid_t spdinv, unsigned short int tid)
{
	spdid_t spdid = cos_spd_id();
	struct thread_event *te;

	TAKE(spdid);
	te = te_pget(tid);
	if (NULL == te) BUG();
	if (!(te->flags & TE_PERIODIC)) goto err;
		
	assert(!EMPTY_LIST(te, next, prev));
	REM_LIST(te, next, prev);
	te->flags = 0;
	
	RELEASE(spdid);

	return 0;
err:
	RELEASE(spdid);
	return -1;
}
Beispiel #7
0
int
stkmgr_thd_blk_cnt(unsigned short int tid, spdid_t spdid, int reset)
{
	struct spd_stk_info *ssi;
	int n;

	TAKE();
	ssi = get_spd_stk_info(spdid);
	if (!ssi || !SPD_IS_MANAGED(ssi) || tid >= MAX_NUM_THREADS) {
		RELEASE();
		return -1;
	}
	n = ssi->nthd_blks[tid];
	/* Thread on the blocked list? */
	if (ssi->thd_blk_start[tid] && n == 0) n = 1;
	if (reset) {
		ssi->thd_blk_tot[tid] = 0;
		ssi->nthd_blks[tid] = 0;
	}
	RELEASE();
	
	return n;
}
Beispiel #8
0
int lock_component_release(spdid_t spd, unsigned long lock_id)
{
	struct meta_lock *ml;
	struct blocked_thds *sent, *bt;
	spdid_t spdid = cos_spd_id();

	ACT_RECORD(ACT_UNLOCK, spd, lock_id, cos_get_thd_id(), 0);
	TAKE(spdid);

	generation++;
	ml = lock_find(lock_id, spd);
	if (!ml) goto error;

	/* Apparently, lock_take calls haven't been made. */
	if (EMPTY_LIST(&ml->b_thds, next, prev)) {
		RELEASE(spdid);
		return 0;
	}
	sent = bt = FIRST_LIST(&ml->b_thds, next, prev);
	/* Remove all threads from the lock's list */
	REM_LIST(&ml->b_thds, next, prev);
	/* Unblock all waiting threads */
	while (1) {
		struct blocked_thds *next;
		u16_t tid;

		/* This is suboptimal: if we wake a thread with a
		 * higher priority, it will be switched to.  Given we
		 * are holding the component lock here, we should get
		 * switched _back_ to so as to wake the rest of the
		 * components. */
		next = FIRST_LIST(bt, next, prev);
		REM_LIST(bt, next, prev);

		ACT_RECORD(ACT_WAKE, spd, lock_id, cos_get_thd_id(), bt->thd_id);

		/* cache locally */
		tid = bt->thd_id;
		/* Last node in the list? */
		if (bt == next) {
			/* This is sneaky, so to reiterate: Keep this
			 * lock till now so that if we wake another
			 * thread, and it begins execution, the system
			 * will switch back to this thread so that we
			 * can wake up the rest of the waiting threads
			 * (one of which might have the highest
			 * priority).  We release before we wake the
			 * last as we don't really need the lock
			 * anymore, an it will avoid quite a few
			 * invocations.*/
			RELEASE(spdid);
		}

		/* Wakeup the way we were put to sleep */
		assert(tid != cos_get_thd_id());
		/* printc("CPU %ld: %d waking up %d for lock %d\n", cos_cpuid(), cos_get_thd_id(), tid, lock_id); */
		sched_wakeup(spdid, tid);

		if (bt == next) break;
		bt = next;
	}

	return 0;
error:
	RELEASE(spdid);
	return -1;
}
Beispiel #9
0
/* 
 * Dependencies here (thus priority inheritance) will NOT be used if
 * you specify a timeout value.
 *
 * Return 0: lock taken, -1: could not find lock, 1: inconsistency -- retry!
 */
int lock_component_take(spdid_t spd, unsigned long lock_id, unsigned short int thd_id)
{
	struct meta_lock *ml;
	spdid_t spdid = cos_spd_id();
	unsigned short int curr = (unsigned short int)cos_get_thd_id();
	struct blocked_thds blocked_desc = {.thd_id = curr};
	int ret = -1;
	
	ACT_RECORD(ACT_LOCK, spd, lock_id, cos_get_thd_id(), thd_id);
	TAKE(spdid);

	ml = lock_find(lock_id, spd);
	/* tried to access a lock not yet created */
	if (!ml) goto error;
	assert(!lock_is_thd_blocked(ml, curr));

	/* The calling component needs to retry its user-level lock,
	 * some preemption has caused the generation count to get off,
	 * i.e. we don't have the most up-to-date view of the
	 * lock's state */
	if (ml->gen_num != generation) {
		ml->gen_num = generation;
		ret = 1;
		goto error;
	}
	generation++;

	/* Note that we are creating the list of blocked threads from
	 * memory allocated on the individual thread's stacks. */
	INIT_LIST(&blocked_desc, next, prev);
	ADD_LIST(&ml->b_thds, &blocked_desc, next, prev);
	//ml->owner = thd_id;

	RELEASE(spdid);

	/* printc("cpu %ld: thd %d going to blk waiting for lock %d\n", cos_cpuid(), cos_get_thd_id(), (int)lock_id); */
	if (-1 == sched_block(spdid, thd_id)) {
		printc("Deadlock including thdids %d -> %d in spd %d, lock id %d.\n", 
		       cos_get_thd_id(), thd_id, spd, (int)lock_id);
		debug_print("BUG: Possible deadlock @ "); 
		assert(0);
		if (-1 == sched_block(spdid, 0)) assert(0);
	}

	if (!EMPTY_LIST(&blocked_desc, next, prev)) BUG();
	/* 
	 * OK, this seems ridiculous but here is the rational: Assume
	 * we are a middle-prio thread, and were just woken by a low
	 * priority thread. We will preempt that thread when woken,
	 * and will continue here.  If a high priority thread is also
	 * waiting on the lock, then we would preempt the low priority
	 * thread while it should wake the high prio thread. With the
	 * following crit sect will switch to the low prio thread that
	 * still holds the component lock.  See the comments in
	 * lock_component_release. 
	 */
	//TAKE(spdid);
	//RELEASE(spdid);

	ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0);
	ret = 0;
done:
	return ret;
error:
	RELEASE(spdid);
	goto done;
}
/* 
 * Dependencies here (thus priority inheritance) will NOT be used if
 * you specify a timeout value.
 */
int lock_component_take(spdid_t spd, unsigned long lock_id, unsigned short int thd_id, unsigned int microsec)
{
	struct meta_lock *ml;
	spdid_t spdid = cos_spd_id();
	unsigned short int curr = (unsigned short int)cos_get_thd_id();
	struct blocked_thds blocked_desc = {.thd_id = curr};
	int ret = 0;
	
//	print("thread %d from spd %d locking for %d micrseconds.", curr, spdid, microsec);

	ACT_RECORD(ACT_LOCK, spd, lock_id, cos_get_thd_id(), thd_id);
	TAKE(spdid);

	if (0 == microsec) {
		ret = TIMER_EXPIRED;
		goto error;
	}
	ml = lock_find(lock_id, spd);
	/* tried to access a lock not yet created */
	if (!ml) {
		ret = -1;
		//print("take wtf%d%d%d", 0,0,0);
		goto error;
	}
	if (lock_is_thd_blocked(ml, curr)) {
		prints("lock: lock_is_thd_blocked failed in lock_component_take\n");
		goto error;
	}

	/* The calling component needs to retry its user-level lock,
	 * some preemption has caused the generation count to get off,
	 * i.e. we don't have the most up-to-date view of the
	 * lock's state */
	if (ml->gen_num != generation) {
		ml->gen_num = generation;
		ret = 0;
		goto error;
	}
	generation++;

	/* Note that we are creating the list of blocked threads from
	 * memory allocated on the individual thread's stacks. */
	INIT_LIST(&blocked_desc, next, prev);
	ADD_LIST(&ml->b_thds, &blocked_desc, next, prev);
	blocked_desc.timed = (TIMER_EVENT_INF != microsec);
	//ml->owner = thd_id;

	RELEASE(spdid);

	/* Bypass calling the timed every component if there is an infinite wait */
//	assert(TIMER_EVENT_INF == microsec);
//	assert(!blocked_desc.timed);
	if (TIMER_EVENT_INF == microsec) {
		if (-1 == sched_block(spdid, thd_id)) BUG();
		if (!EMPTY_LIST(&blocked_desc, next, prev)) BUG();
		/* 
		 * OK, this seems ridiculous but here is the rational: Assume
		 * we are a middle-prio thread, and were just woken by a low
		 * priority thread. We will preempt that thread when woken,
		 * and will continue here.  If a high priority thread is also
		 * waiting on the lock, then we would preempt the low priority
		 * thread while it should wake the high prio thread. With the
		 * following crit sect will switch to the low prio thread that
		 * still holds the component lock.  See the comments in
		 * lock_component_release. 
		 */
		//TAKE(spdid);
		//RELEASE(spdid);

		ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0);
		ret = 0;
	} else {
		assert(0);
#ifdef NIL
		/* ret here will fall through.  We do NOT use the
		 * dependency here as I can't think through the
		 * repercussions */
		if (-1 == (ret = timed_event_block(spdid, microsec))) return ret;

		/* 
		 * We might have woken from a timeout, which means
		 * that we need to remove this thread from the waiting
		 * list for the lock.
		 */
		TAKE(spdid);
		ml = lock_find(lock_id, spd);
		if (!ml) {
			ret = -1;
			goto error;
		}
		REM_LIST(&blocked_desc, next, prev);
		RELEASE(spdid);

		ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0); 
		/* ret is set to the amnt of time we blocked */
#endif 
	}
	return ret;
error:
	RELEASE(spdid);
	return ret;
}
Beispiel #11
0
void *
cbuf_c_retrieve(spdid_t spdid, int cbid, int len)
{
	void *ret = NULL;
	char *l_addr, *d_addr;

	struct cb_desc *d;
	struct cb_mapping *m;

	TAKE();

	d = cos_map_lookup(&cb_ids, cbid);
	/* sanity and access checks */
	if (!d || d->obj_sz < len) goto done;
#ifdef PRINCIPAL_CHECKS
	if (d->principal != cos_get_thd_id()) goto done;
#endif
	/* DOUT("info: thd_id %d obj_size %d addr %p\n", d->principal, d->obj_sz, d->addr); */
	m = malloc(sizeof(struct cb_mapping));
	if (!m) goto done;

	/* u64_t start,end; */
	/* rdtscll(start); */

	INIT_LIST(m, next, prev);

	d_addr = valloc_alloc(cos_spd_id(), spdid, 1);
	l_addr = d->addr;  //cbuf_item addr, initialized in cos_init()
	/* l_addr = d->owner.addr;  // mapped from owner */

	assert(d_addr && l_addr);

	/* rdtscll(end); */
	/* printc("cost of valloc: %lu\n", end-start); */
	/* rdtscll(start); */

	/* if (!mman_alias_page(cos_spd_id(), (vaddr_t)d->addr, spdid, (vaddr_t)page)) goto err; */
	if (unlikely(!mman_alias_page(cos_spd_id(), (vaddr_t)l_addr, spdid, (vaddr_t)d_addr))) {
		printc("No alias!\n");
		goto err;
	}
	/* DOUT("<<<MAPPED>>> mgr addr %p client addr %p\n ",l_addr, d_addr); */

	/* rdtscll(end); */
	/* printc("cost of mman_alias_page: %lu\n", end-start); */

	m->cbd  = d;
	m->spd  = spdid;
	m->addr = (vaddr_t)d_addr;

	//struct cb_mapping *m;
	ADD_LIST(&d->owner, m, next, prev);
	ret = (void *)d_addr;
done:
	RELEASE();
	return ret;
err:
	valloc_free(cos_spd_id(), spdid, d_addr, 1);
	free(m);
	goto done;
}
Beispiel #12
0
int
cbuf_c_create(spdid_t spdid, int size, long cbid)
{
	int ret = -1;
	void *v;
	struct spd_tmem_info *sti;
	struct cos_cbuf_item *cbuf_item;
	struct cb_desc *d;

	union cbuf_meta *mc = NULL;

	/* DOUT("thd: %d spd: %d cbuf_c_create is called here!!\n", cos_get_thd_id(), spdid); */
	/* DOUT("passed cbid is %ld\n",cbid); */
	TAKE();

	sti = get_spd_info(spdid);
	
	/* Make sure we have access to the component shared page */
	assert(SPD_IS_MANAGED(sti));
	assert(cbid >= 0);

	if (cbid) {
		 // vector should already exist
		v = cos_map_lookup(&cb_ids, cbid);
		if (unlikely((spdid_t)(int)v != spdid)) goto err;
 	} else {
		cbid = cos_map_add(&cb_ids, (void *)(unsigned long)spdid);
		if ((mc = __spd_cbvect_lookup_range(sti, (cbid))) == NULL){
			RELEASE();
			return cbid*-1;	
		} 
	}
	cos_map_del(&cb_ids, cbid);
	cbuf_item = tmem_grant(sti);
	assert(cbuf_item);

	d             = &cbuf_item->desc;
	d->principal  = cos_get_thd_id();
	d->obj_sz     = PAGE_SIZE;
	d->owner.spd  = sti->spdid;
	d->owner.cbd  = d;

	/* Jiguo:
	  This can be two different cases:
	  1. A local cached one is returned with a cbid
	  2. A cbuf item is obtained from the global free list without cbid
	 */
	DOUT("d->cbid is %d\n",d->cbid);
	if (d->cbid == 0) {
		INIT_LIST(&d->owner, next, prev);  // only created when first time
		cbid = cos_map_add(&cb_ids, d);    // we use a new cbuf
		DOUT("new cbid is %ld\n",cbid);
	} else {
		cbid = cbuf_item->desc.cbid;       // use a local cached one
		DOUT("cached cbid is %ld\n",cbid);
	}

	DOUT("cbuf_create:::new cbid is %ld\n",cbid);
	ret = d->cbid = cbid;

	mc = __spd_cbvect_lookup_range(sti, cbid);
	assert(mc);
	cbuf_item->entry = mc;

	mc->c.ptr     = d->owner.addr >> PAGE_ORDER;
	mc->c.obj_sz  = ((unsigned int)PAGE_SIZE) >> CBUF_OBJ_SZ_SHIFT;
	mc->c_0.th_id = cos_get_thd_id();
	mc->c.flags  |= CBUFM_IN_USE | CBUFM_TOUCHED;
done:
	RELEASE();
	return ret;
err:
	ret = -1;
	goto done;
}