Пример #1
0
/**
 * grant a stack to an address
 *
 * TODO:
 *  - Keep various heap pointers around instead of incrementign it every time.
 */
void *
stkmgr_grant_stack(spdid_t d_spdid)
{
	struct cos_stk_item *stk_item;
	struct spd_stk_info *info;
	vaddr_t ret;
	int meas = 0;

	TAKE();

	info = get_spd_stk_info(d_spdid);

	DOUT("<stkmgr>: stkmgr_grant_stack for, spdid: %d, thdid %d\n",
	       d_spdid, cos_get_thd_id());
        
	// Make sure we have access to the info page
	if (!SPD_IS_MANAGED(info)) get_cos_info_page(d_spdid);
	assert(SPD_IS_MANAGED(info));
	
	/* 
	 * Is there a stack in the local freelist?  If not, is there
	 * one is the global freelist and we are under quota on
	 * stacks?  Otherwise block!
	 */
	while (NULL == (stk_item = spd_freelist_remove(d_spdid))) {
		if (info->num_allocated < info->num_desired &&
		    NULL != (stk_item = freelist_remove())) {
			stkmgr_stk_add_to_spd(stk_item, info);
			break;
		}
		if (!meas) {
			meas = 1;
			stkmgr_update_stats_block(info, cos_get_thd_id());
		}
		DOUT("Stack list is null, we need to revoke a stack: spdid: %d thdid: %d\n",
		     d_spdid,
		     cos_get_thd_id());
		stkmgr_wait_for_stack(info);
	}
	if (meas) stkmgr_update_stats_wakeup(info, cos_get_thd_id());
	
	ret = stk_item->d_addr + PAGE_SIZE;
	RELEASE();

	//DOUT("Returning Stack address: %X\n",(unsigned int)ret);

	return (void *)ret;
}
Пример #2
0
unsigned long
stkmgr_thd_blk_time(unsigned short int tid, spdid_t spdid, int reset)
{
	struct spd_stk_info *ssi;
	unsigned long a = 0;
	u64_t t;

	TAKE();
	ssi = get_spd_stk_info(spdid);
	if (!ssi || !SPD_IS_MANAGED(ssi) || tid >= MAX_NUM_THREADS) {
		RELEASE();
		return -1;
	}
	/* currently blocked? */
	if (ssi->thd_blk_start[tid]) {
		rdtscll(t);
		a += t - ssi->thd_blk_start[tid];
	}
	if (ssi->nthd_blks[tid]) {
		a = (unsigned long)((a + ssi->thd_blk_tot[tid])/ssi->nthd_blks[tid]);
	} 
	if (reset) {
		ssi->thd_blk_tot[tid] = 0;
		ssi->nthd_blks[tid] = 0;
	}
	RELEASE();
	
	return a;
}
Пример #3
0
int 
stkmgr_set_concurrency(spdid_t spdid, int concur_lvl, int remove_spare)
{
	struct spd_stk_info *ssi;
	int diff, old;

	printc("Setting concurrency of %d to %d\n", spdid, concur_lvl);
	TAKE();
	ssi = get_spd_stk_info(spdid);
	if (!ssi || !SPD_IS_MANAGED(ssi)) goto err;
	if (concur_lvl < 0) goto err;

	old = ssi->num_desired;
	ssi->num_desired = concur_lvl;
	stacks_target += concur_lvl - old;

	diff = ssi->num_allocated - ssi->num_desired;
	if (diff > 0) stkmgr_spd_remove_stacks(spdid, diff);
	if (diff < 0 && SPD_HAS_BLK_THD(ssi)) spd_wake_threads(spdid);

	if (remove_spare) while (!spd_remove_spare_stacks(ssi)) ;

	RELEASE();
	return 0;
err:
	RELEASE();
	return -1;
}
Пример #4
0
void
stkmgr_spd_meas_reset(void)
{
	struct spd_stk_info *ssi;
	int i;

	TAKE();
	for (i = 0 ; i < MAX_NUM_SPDS ; i++) {
		ssi = get_spd_stk_info(i);
		if (!ssi) BUG();
		if (!SPD_IS_MANAGED(ssi)) continue;
		
		stkmgr_reset_stats(ssi);
	}
	RELEASE();
}
Пример #5
0
int
stkmgr_spd_concurrency_estimate(spdid_t spdid)
{
	struct spd_stk_info *ssi;
	int i, avg;
	unsigned long tot = 0, cnt = 0;

	TAKE();
	ssi = get_spd_stk_info(spdid);
	if (!ssi || !SPD_IS_MANAGED(ssi)) {
		RELEASE();
		return -1;
	}

	if (ssi->num_allocated < ssi->num_desired) {
		assert(!SPD_HAS_BLK_THD(ssi));
		RELEASE();
		return ssi->num_allocated;
	}

	for (i = 0 ; i < MAX_BLKED ; i++) {
		int n = ssi->stat_thd_blk[i];

		tot += (n * i);
		cnt += n;
		ssi->stat_thd_blk[i] = 0;
	}
	if (cnt == 0 && ssi->num_blocked_thds == 0) {
		avg = ssi->num_allocated;
	} else {
		unsigned int blk_hist;

		if (cnt) blk_hist = (tot/cnt) + 1; /* adjust for rounding */
		else     blk_hist = 0;
		
		avg = ssi->num_allocated + (blk_hist > ssi->num_blocked_thds ? 
					    blk_hist : ssi->num_blocked_thds); 
	}
	RELEASE();

	return avg;
}
Пример #6
0
/* 
 * Is there a component with blocked threads?  Which is the one with
 * the largest disparity between the number of stacks it has, and the
 * number it is supposed to have?
 */
static struct spd_stk_info *
stkmgr_find_spd_requiring_stk(void)
{
	int i, max_required = 0;
	struct spd_stk_info *best = NULL;

	for (i = 0 ; i < MAX_NUM_SPDS ; i++) {
		struct spd_stk_info *ssi = &spd_stk_info_list[i];
		if (!SPD_IS_MANAGED(ssi)) continue;

		/* priority goes to spds with blocked threads */
		if (SPD_HAS_BLK_THD(ssi) && ssi->num_desired > ssi->num_allocated) {
			int diff = ssi->num_desired - ssi->num_allocated;

			if (max_required < diff) {
				best = ssi;
				max_required = diff;
			}
		}
	}
	return best;
}
Пример #7
0
int
stkmgr_thd_blk_cnt(unsigned short int tid, spdid_t spdid, int reset)
{
	struct spd_stk_info *ssi;
	int n;

	TAKE();
	ssi = get_spd_stk_info(spdid);
	if (!ssi || !SPD_IS_MANAGED(ssi) || tid >= MAX_NUM_THREADS) {
		RELEASE();
		return -1;
	}
	n = ssi->nthd_blks[tid];
	/* Thread on the blocked list? */
	if (ssi->thd_blk_start[tid] && n == 0) n = 1;
	if (reset) {
		ssi->thd_blk_tot[tid] = 0;
		ssi->nthd_blks[tid] = 0;
	}
	RELEASE();
	
	return n;
}
Пример #8
0
int
cbuf_c_create(spdid_t spdid, int size, long cbid)
{
	int ret = -1;
	void *v;
	struct spd_tmem_info *sti;
	struct cos_cbuf_item *cbuf_item;
	struct cb_desc *d;

	union cbuf_meta *mc = NULL;

	/* DOUT("thd: %d spd: %d cbuf_c_create is called here!!\n", cos_get_thd_id(), spdid); */
	/* DOUT("passed cbid is %ld\n",cbid); */
	TAKE();

	sti = get_spd_info(spdid);
	
	/* Make sure we have access to the component shared page */
	assert(SPD_IS_MANAGED(sti));
	assert(cbid >= 0);

	if (cbid) {
		 // vector should already exist
		v = cos_map_lookup(&cb_ids, cbid);
		if (unlikely((spdid_t)(int)v != spdid)) goto err;
 	} else {
		cbid = cos_map_add(&cb_ids, (void *)(unsigned long)spdid);
		if ((mc = __spd_cbvect_lookup_range(sti, (cbid))) == NULL){
			RELEASE();
			return cbid*-1;	
		} 
	}
	cos_map_del(&cb_ids, cbid);
	cbuf_item = tmem_grant(sti);
	assert(cbuf_item);

	d             = &cbuf_item->desc;
	d->principal  = cos_get_thd_id();
	d->obj_sz     = PAGE_SIZE;
	d->owner.spd  = sti->spdid;
	d->owner.cbd  = d;

	/* Jiguo:
	  This can be two different cases:
	  1. A local cached one is returned with a cbid
	  2. A cbuf item is obtained from the global free list without cbid
	 */
	DOUT("d->cbid is %d\n",d->cbid);
	if (d->cbid == 0) {
		INIT_LIST(&d->owner, next, prev);  // only created when first time
		cbid = cos_map_add(&cb_ids, d);    // we use a new cbuf
		DOUT("new cbid is %ld\n",cbid);
	} else {
		cbid = cbuf_item->desc.cbid;       // use a local cached one
		DOUT("cached cbid is %ld\n",cbid);
	}

	DOUT("cbuf_create:::new cbid is %ld\n",cbid);
	ret = d->cbid = cbid;

	mc = __spd_cbvect_lookup_range(sti, cbid);
	assert(mc);
	cbuf_item->entry = mc;

	mc->c.ptr     = d->owner.addr >> PAGE_ORDER;
	mc->c.obj_sz  = ((unsigned int)PAGE_SIZE) >> CBUF_OBJ_SZ_SHIFT;
	mc->c_0.th_id = cos_get_thd_id();
	mc->c.flags  |= CBUFM_IN_USE | CBUFM_TOUCHED;
done:
	RELEASE();
	return ret;
err:
	ret = -1;
	goto done;
}