Example #1
0
/*
 * For a certain principal, collect any unreferenced persistent cbufs
 * so that they can be reused.  This is the garbage-collection
 * mechanism.
 *
 * Collect cbufps and add them onto the component's freelist.
 */
int
cbufp_collect(spdid_t spdid, int size, long cbid)
{
	long *buf;
	int off = 0;
	struct cbufp_info *cbi;
	struct cbufp_comp_info *cci;
	int ret = -EINVAL;

	buf = cbuf2buf(cbid, PAGE_SIZE);
	if (!buf) return -1;

	CBUFP_TAKE();
	cci = cbufp_comp_info_get(spdid);
	if (!cci) {
		CBUFP_RELEASE();
		return -ENOMEM;
	}

	/* Go through all cbufs we own, and save all of them that have
	 * no current references to them. */
	cbi = cci->cbufs.c;
	do {
		if (!cbi) break;
		if (!cbufp_referenced(cbi)) {
			cbufp_references_clear(cbi);
			buf[off++] = cbi->cbid;
			if (off == PAGE_SIZE/sizeof(int)) break;
		}
		cbi = FIRST_LIST(cbi, next, prev);
	} while (cbi != cci->cbufs.c);
	CBUFP_RELEASE();
	/* nothing collected...create a new one! */
	/* TODO: only allocate when we should, and sleep otherwise */
	/* if (off == 0) { */
	/* 	int r = cbufp_create(spdid, size, 0); */
	/* 	if (r) buf[off++] = r; */
	/* } */
	ret = off;
	return ret;
}
Example #2
0
static void
cbufp_free_unmap(spdid_t spdid, struct cbufp_info *cbi)
{
	struct cbufp_maps *m = &cbi->owner;
	void *ptr = cbi->mem;
	int size;

	if (cbufp_referenced(cbi)) return;

	do {
		struct cbufp_maps *next;

		next = FIRST_LIST(m, next, prev);
		REM_LIST(m, next, prev);
		valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, cbi->size/PAGE_SIZE);
		m = next;
	} while (m != &cbi->owner);

	/* TODO: iterate through the size, and free all... */
	mman_revoke_page(cos_spd_id(), (vaddr_t)ptr, 0);
	//free_page(ptr);
}
Example #3
0
/* Do any components have a reference to the cbuf? */
static int
cbufp_referenced(struct cbufp_info *cbi)
{
	struct cbufp_maps *m = &cbi->owner;
	int sent, recvd;

	sent = recvd = 0;
	do {
		struct cbuf_meta *meta = m->m;

		if (meta) {
			if (meta->nfo.c.flags & CBUFM_IN_USE) return 1;
			sent  += meta->owner_nfo.c.nsent;
			recvd += meta->owner_nfo.c.nrecvd;
		}

		m = FIRST_LIST(m, next, prev);
	} while (m != &cbi->owner);

	if (sent != recvd) return 1;
	
	return 0;
}
Example #4
0
static void
stkmgr_print_ci_freelist(void)
{
	int i;
	struct spd_stk_info *info;
	//void *curr;
	struct cos_stk_item *stk_item;//, *p;

	for(i = 0; i < MAX_NUM_SPDS; i++){
		unsigned int cnt = 0;

		info = &spd_stk_info_list[i];
		if(info->ci == NULL) continue;

		if (info->num_allocated == 0 && info->num_blocked_thds == 0) continue;

		for (stk_item = FIRST_LIST(&info->stk_list, next, prev) ;
		     stk_item != &info->stk_list ; 
		     stk_item = FIRST_LIST(stk_item, next, prev)) {
			if (stk_item->stk->flags & IN_USE) cnt++;
		}
		printc("stkmgr: spdid %d w/ %d stacks, %d on freelist, %d blocked\n", 
		       i, info->num_allocated, cnt, info->num_blocked_thds);
		assert(info->num_allocated == stkmgr_num_alloc_stks(info->spdid));
#ifdef PRINT_FREELIST_ELEMENTS
		curr = (void *)info->ci->cos_stacks.freelists[0].freelist;
		if(curr) {
			DOUT("\tcomponent freelist: %p\n", curr);
			p = stk_item = stkmgr_get_cos_stk_item((vaddr_t)curr);
			while (stk_item) {
				DOUT("\tStack:\n"	\
				       "\t\tcurr: %X\n"	\
				       "\t\taddr: %X\n"	\
				       "\t\tnext: %X\n",
				       (unsigned int)stk_item->stk,
				       (unsigned int)D_COS_STK_ADDR(stk_item->d_addr),
				       (unsigned int)stk_item->stk->next);
				print_flags(stk_item->stk);
				curr = stk_item->stk->next;
				stk_item = stkmgr_get_cos_stk_item((vaddr_t)curr);
				if (p == stk_item) {
					printc("<<WTF: freelist recursion...>>\n");
					break;
				}
				p = stk_item;
			}
		}
		for (stk_item = FIRST_LIST(&info->stk_list, next, prev) ;
		     stk_item != &info->stk_list ;
		     stk_item = FIRST_LIST(stk_item, next, prev)) {
			if (!stkmgr_in_freelist(i, stk_item)) {
				DOUT("\tStack off of freelist:\n"	\
				       "\t\tcurr: %X\n"			\
				       "\t\taddr: %X\n"			\
				       "\t\tnext: %X\n",
				       (unsigned int)stk_item->stk,
				       (unsigned int)D_COS_STK_ADDR(stk_item->d_addr),
				       (unsigned int)stk_item->stk->next);
				print_flags(stk_item->stk);
			}
		}
#endif
	}

}
static void __event_expiration(event_time_t time, struct thread_event *events)
{
	spdid_t spdid = cos_spd_id();

	struct thread_event *tmp, *next_te;

	assert(TIMER_NO_EVENTS != time);

	for (tmp = FIRST_LIST(events, next, prev) ;
	     tmp != events && tmp->event_expiration <= time ; 
	     tmp = next_te) {
		u8_t b;
		unsigned short int tid;

		assert(tmp);
		next_te = FIRST_LIST(tmp, next, prev);
		assert(next_te && next_te->prev == tmp && tmp->next == next_te);
		tmp->flags |= TE_TIMED_OUT;
		REM_LIST(tmp, next, prev);
		b = tmp->flags & TE_BLOCKED;
		tmp->flags &= ~TE_BLOCKED;
		tid = tmp->thread_id;
		if (tmp->flags & TE_PERIODIC) {
			/* thread hasn't blocked? deadline miss! */
			if (!b) {
 			        long long period_cyc;

				tmp->dl_missed++;
				
				if (!tmp->missed) { /* first miss? */
					tmp->missed = 1;
					/* save time of deadline, unless we
					 * have saved the time of an earlier
					 * deadline miss */
					assert(!tmp->completion);
					rdtscll(tmp->completion);
					tmp->miss_samples++;
					tmp->samples++;
				} else {
					period_cyc = tmp->period*cyc_per_tick;
					assert(period_cyc > cyc_per_tick);
					tmp->lateness_tot +=period_cyc;
					tmp->miss_lateness_tot += period_cyc;
					rdtscll(tmp->completion);
				}
			} else {
				if (!tmp->missed) { /* on time, compute lateness */
					long long t;

					assert(tmp->completion);
					rdtscll(t);
					tmp->lateness_tot += -(t - tmp->completion);
					tmp->samples++;
					tmp->completion = 0;
				}
				tmp->missed = 0;
			}

			tmp->dl++;
			/* Next periodic deadline! */
			tmp->event_expiration += tmp->period;
			insert_pevent(tmp);
		}

		if (b) sched_wakeup(spdid, tmp->thread_id);
		/* We don't have to deallocate the thread_events as
		 * they are stack allocated on the sleeping
		 * threads. */
	}
}
Example #6
0
int lock_component_release(spdid_t spd, unsigned long lock_id)
{
	struct meta_lock *ml;
	struct blocked_thds *sent, *bt;
	spdid_t spdid = cos_spd_id();

	ACT_RECORD(ACT_UNLOCK, spd, lock_id, cos_get_thd_id(), 0);
	TAKE(spdid);

	generation++;
	ml = lock_find(lock_id, spd);
	if (!ml) goto error;

	/* Apparently, lock_take calls haven't been made. */
	if (EMPTY_LIST(&ml->b_thds, next, prev)) {
		RELEASE(spdid);
		return 0;
	}
	sent = bt = FIRST_LIST(&ml->b_thds, next, prev);
	/* Remove all threads from the lock's list */
	REM_LIST(&ml->b_thds, next, prev);
	/* Unblock all waiting threads */
	while (1) {
		struct blocked_thds *next;
		u16_t tid;

		/* This is suboptimal: if we wake a thread with a
		 * higher priority, it will be switched to.  Given we
		 * are holding the component lock here, we should get
		 * switched _back_ to so as to wake the rest of the
		 * components. */
		next = FIRST_LIST(bt, next, prev);
		REM_LIST(bt, next, prev);

		ACT_RECORD(ACT_WAKE, spd, lock_id, cos_get_thd_id(), bt->thd_id);

		/* cache locally */
		tid = bt->thd_id;
		/* Last node in the list? */
		if (bt == next) {
			/* This is sneaky, so to reiterate: Keep this
			 * lock till now so that if we wake another
			 * thread, and it begins execution, the system
			 * will switch back to this thread so that we
			 * can wake up the rest of the waiting threads
			 * (one of which might have the highest
			 * priority).  We release before we wake the
			 * last as we don't really need the lock
			 * anymore, an it will avoid quite a few
			 * invocations.*/
			RELEASE(spdid);
		}

		/* Wakeup the way we were put to sleep */
		assert(tid != cos_get_thd_id());
		/* printc("CPU %ld: %d waking up %d for lock %d\n", cos_cpuid(), cos_get_thd_id(), tid, lock_id); */
		sched_wakeup(spdid, tid);

		if (bt == next) break;
		bt = next;
	}

	return 0;
error:
	RELEASE(spdid);
	return -1;
}
Example #7
0
/*
 * For a certain principal, collect any unreferenced and not_in 
 * free list cbufs so that they can be reused.  This is the 
 * garbage-collection mechanism.
 *
 * Collect cbufs and add them onto the shared component's ring buffer.
 *
 * This function is semantically complicated. It can return no cbufs 
 * even if they are available to force the pool of cbufs to be
 * expanded (the client will call cbuf_create in this case). 
 * Or, the common case: it can return a number of available cbufs.
 */
int
cbuf_collect(spdid_t spdid, unsigned long size)
{
	struct cbuf_info *cbi;
	struct cbuf_comp_info *cci;
	struct cbuf_shared_page *csp;
	struct cbuf_bin *bin;
	int ret = 0;

	printl("cbuf_collect\n");

	CBUF_TAKE();
	cci  = cbuf_comp_info_get(spdid);
	tracking_start(&cci->track, CBUF_COLLECT);
	if (unlikely(!cci)) ERR_THROW(-ENOMEM, done);
	if (size + cci->allocated_size <= cci->target_size) goto done;

	csp  = cci->csp;
	if (unlikely(!csp)) ERR_THROW(-EINVAL, done);

	assert(csp->ring.size == CSP_BUFFER_SIZE);
	ret = CK_RING_SIZE(cbuf_ring, &csp->ring);
	if (ret != 0) goto done;
	/* 
	 * Go through all cbufs we own, and report all of them that
	 * have no current references to them.  Unfortunately, this is
	 * O(N*M), N = min(num cbufs, PAGE_SIZE/sizeof(int)), and M =
	 * num components.
	 */
	size = round_up_to_page(size);
	bin  = cbuf_comp_info_bin_get(cci, size);
	if (!bin) ERR_THROW(0, done);
	cbi  = bin->c;
	do {
		if (!cbi) break;
		/*
		 * skip cbufs which are in freelist. Coordinates with cbuf_free to 
		 * detect such cbufs correctly. 
		 * We must check refcnt first and then next pointer.
		 *
		 * If do not check refcnt: the manager may check "next" before cbuf_free 
		 * (when it is NULL), then switch to client who calls cbuf_free to set 
		 * "next", decrease refcnt and add cbuf to freelist. Then switch back to 
		 * manager, but now it will collect this in-freelist cbuf.
		 * 
		 * Furthermore we must check refcnt before the "next" pointer: 
		 * If not, similar to above case, the manager maybe preempted by client 
		 * between the manager checks "next" and refcnt. Therefore the manager 
		 * finds the "next" is null and refcnt is 0, and collect this cbuf.
		 * Short-circuit can prevent reordering. 
		 */
		assert(cbi->owner.m);
		if (!CBUF_REFCNT(cbi->owner.m) && !CBUF_IS_IN_FREELIST(cbi->owner.m)
                 		    && !cbuf_referenced(cbi)) {
			struct cbuf_ring_element el = { .cbid = cbi->cbid };
			cbuf_references_clear(cbi);
			if (!CK_RING_ENQUEUE_SPSC(cbuf_ring, &csp->ring, &el)) break;
			/*
			 * Prevent other collection collecting those cbufs.
			 * The manager checks if the shared ring buffer is empty upon 
			 * the entry, if not, it just returns. This is not enough to 
			 * prevent double-collection. The corner case is: 
			 * after the last one in ring buffer is dequeued and 
			 * before it is added to the free-list, the manager  
			 * appears. It may collect the last one again.
			 */
			cbi->owner.m->next = (struct cbuf_meta *)1;
			if (++ret == CSP_BUFFER_SIZE) break;
		}
		cbi = FIRST_LIST(cbi, next, prev);
	} while (cbi != bin->c);
	if (ret) cbuf_thd_wake_up(cci, ret*size);

done:
	tracking_end(&cci->track, CBUF_COLLECT);
	CBUF_RELEASE();
	return ret;
}

/* 
 * Called by cbuf_deref.
 */
int
cbuf_delete(spdid_t spdid, unsigned int cbid)
{
	struct cbuf_comp_info *cci;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta;
	int ret = -EINVAL, sz;

	printl("cbuf_delete\n");
	CBUF_TAKE();
	tracking_start(NULL, CBUF_DEL);

	cci  = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;
	cbi  = cmap_lookup(&cbufs, cbid);
	if (unlikely(!cbi)) goto done;
	meta = cbuf_meta_lookup(cci, cbid);

	/*
	 * Other threads can access the meta data simultaneously. For
	 * example, others call cbuf2buf which increase the refcnt.
	 */
	CBUF_REFCNT_ATOMIC_DEC(meta);
	/* Find the owner of this cbuf */
	if (cbi->owner.spdid != spdid) {
		cci = cbuf_comp_info_get(cbi->owner.spdid);
		if (unlikely(!cci)) goto done;
	}
	if (cbuf_free_unmap(cci, cbi)) 	goto done;
	if (cci->allocated_size < cci->target_size) {
		cbuf_thd_wake_up(cci, cci->target_size - cci->allocated_size);
	}
	ret = 0;
done:
	tracking_end(NULL, CBUF_DEL);
	CBUF_RELEASE();
	return ret;
}

/* 
 * Called by cbuf2buf to retrieve a given cbid.
 */
int
cbuf_retrieve(spdid_t spdid, unsigned int cbid, unsigned long size)
{
	struct cbuf_comp_info *cci, *own;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta, *own_meta;
	struct cbuf_maps *map;
	vaddr_t dest;
	void *page;
	int ret = -EINVAL, off;

	printl("cbuf_retrieve\n");

	CBUF_TAKE();
	tracking_start(NULL, CBUF_RETRV);

	cci        = cbuf_comp_info_get(spdid);
	if (!cci) {printd("no cci\n"); goto done; }
	cbi        = cmap_lookup(&cbufs, cbid);
	if (!cbi) {printd("no cbi\n"); goto done; }
	/* shouldn't cbuf2buf your own buffer! */
	if (cbi->owner.spdid == spdid) {
		printd("owner\n"); 
		goto done;
	}
	meta       = cbuf_meta_lookup(cci, cbid);
	if (!meta) {printd("no meta\n"); goto done; }
	assert(!(meta->nfo & ~CBUF_INCONSISENT));

	map        = malloc(sizeof(struct cbuf_maps));
	if (!map) {printd("no map\n"); ERR_THROW(-ENOMEM, done); }
	if (size > cbi->size) {printd("too big\n"); goto done; }
	assert(round_to_page(cbi->size) == cbi->size);
	size       = cbi->size;
	/* TODO: change to MAPPING_READ */
	if (cbuf_alloc_map(spdid, &map->addr, NULL, cbi->mem, size, MAPPING_RW)) {
		printc("cbuf mgr map fail spd %d mem %p sz %lu cbid %u\n", spdid, cbi->mem, size, cbid);
		goto free;
	}

	INIT_LIST(map, next, prev);
	ADD_LIST(&cbi->owner, map, next, prev);
	CBUF_PTR_SET(meta, map->addr);
	map->spdid          = spdid;
	map->m              = meta;
	meta->sz            = cbi->size >> PAGE_ORDER;
	meta->cbid_tag.cbid = cbid;
	own                 = cbuf_comp_info_get(cbi->owner.spdid);
	if (unlikely(!own)) goto done;
	/*
	 * We need to inherit the relinquish bit from the sender. 
	 * Otherwise, this cbuf cannot be returned to the manager. 
	 */
	own_meta            = cbuf_meta_lookup(own, cbid);
	if (CBUF_RELINQ(own_meta)) CBUF_FLAG_ADD(meta, CBUF_RELINQ);
	ret                 = 0;
done:
	tracking_end(NULL, CBUF_RETRV);

	CBUF_RELEASE();
	return ret;
free:
	free(map);
	goto done;
}

vaddr_t
cbuf_register(spdid_t spdid, unsigned int cbid)
{
	struct cbuf_comp_info  *cci;
	struct cbuf_meta_range *cmr;
	void *p;
	vaddr_t dest, ret = 0;

	printl("cbuf_register\n");
	CBUF_TAKE();
	tracking_start(NULL, CBUF_REG);

	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;
	cmr = cbuf_meta_lookup_cmr(cci, cbid);
	if (cmr) ERR_THROW(cmr->dest, done);

	/* Create the mapping into the client */
	if (cbuf_alloc_map(spdid, &dest, &p, NULL, PAGE_SIZE, MAPPING_RW)) goto done;
	assert((unsigned int)p == round_to_page(p));
	cmr = cbuf_meta_add(cci, cbid, p, dest);
	assert(cmr);
	ret = cmr->dest;
done:
	tracking_end(NULL, CBUF_REG);

	CBUF_RELEASE();
	return ret;
}

static void
cbuf_shrink(struct cbuf_comp_info *cci, int diff)
{
	int i, sz;
	struct cbuf_bin *bin;
	struct cbuf_info *cbi, *next, *head;

	for (i = cci->nbin-1 ; i >= 0 ; i--) {
		bin = &cci->cbufs[i];
		sz = (int)bin->size;
		if (!bin->c) continue;
		cbi = FIRST_LIST(bin->c, next, prev);
		while (cbi != bin->c) {
			next = FIRST_LIST(cbi, next, prev);
			if (!cbuf_free_unmap(cci, cbi)) {
				diff -= sz;
				if (diff <= 0) return;
			}
			cbi = next;
		}
		if (!cbuf_free_unmap(cci, cbi)) {
			diff -= sz;
			if (diff <= 0) return;
		}
	}
	if (diff > 0) cbuf_mark_relinquish_all(cci);
}
Example #8
0
unsigned long
cbuf_debug_cbuf_info(spdid_t spdid, int index, int p)
{
	unsigned long ret[20], sz;
	struct cbuf_comp_info *cci;
	struct cbuf_bin *bin;
	struct cbuf_info *cbi, *next, *head;
	struct cbuf_meta *meta;
	struct blocked_thd *bthd;
	unsigned long long cur;
	int i;

	CBUF_TAKE();
	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) assert(0);
	memset(ret, 0, sizeof(ret));

	ret[CBUF_TARGET] = cci->target_size;
	ret[CBUF_ALLOC] = cci->allocated_size;

	for (i = cci->nbin-1 ; i >= 0 ; i--) {
		bin = &cci->cbufs[i];
		sz = bin->size;
		if (!bin->c) continue;
		cbi = bin->c;
		do {
			if (__debug_reference(cbi)) ret[CBUF_USE] += sz;
			else                        ret[CBUF_GARBAGE] += sz;
			meta = cbi->owner.m;
			if (CBUF_RELINQ(meta)) ret[CBUF_RELINQ_NUM]++;
			cbi = FIRST_LIST(cbi, next, prev);
		} while(cbi != bin->c);
	}
	assert(ret[CBUF_USE]+ret[CBUF_GARBAGE] == ret[CBUF_ALLOC]);

	ret[BLK_THD_NUM] = cci->num_blocked_thds;
	if (ret[BLK_THD_NUM]) {
		rdtscll(cur);
		bthd = cci->bthd_list.next;
		while (bthd != &cci->bthd_list) {
			cci->track.blk_tot += (cur-bthd->blk_start);
			ret[CBUF_BLK] += bthd->request_size;
			bthd->blk_start = cur;
			bthd = FIRST_LIST(bthd, next, prev);
		}
	}

	ret[TOT_BLK_TSC] = (unsigned long)cci->track.blk_tot;
	ret[MAX_BLK_TSC] = (unsigned long)cci->track.blk_max;
	ret[TOT_GC_TSC]  = (unsigned long)cci->track.gc_tot;
	ret[MAX_GC_TSC]  = (unsigned long)cci->track.gc_max;
	if (p == 1) {
		printc("target %lu %lu allocate %lu %lu\n", 
			ret[CBUF_TARGET], ret[CBUF_TARGET]/PAGE_SIZE, ret[CBUF_ALLOC], ret[CBUF_ALLOC]/PAGE_SIZE);
		printc("using %lu %lu garbage %lu %lu relinq %lu\n", ret[CBUF_USE], ret[CBUF_USE]/PAGE_SIZE, 
			ret[CBUF_GARBAGE], ret[CBUF_GARBAGE]/PAGE_SIZE, ret[CBUF_RELINQ_NUM]);
		printc("spd %d %lu thd blocked request %d pages %d\n", 
			spdid, ret[BLK_THD_NUM], ret[CBUF_BLK], ret[CBUF_BLK]/PAGE_SIZE);
		printc("spd %d blk_tot %lu blk_max %lu gc_tot %lu gc_max %lu\n", spdid, ret[TOT_BLK_TSC], 
			ret[MAX_BLK_TSC], ret[TOT_GC_TSC], ret[MAX_GC_TSC]);
	}
	if (p == 2) {
		cci->track.blk_tot = cci->track.blk_max = cci->track.gc_tot = cci->track.gc_max = 0;
		cci->track.gc_num = 0;
	}

	CBUF_RELEASE();
	return ret[index];
}
struct component *
find_tardiness_comp(void)
{
	struct component * c, * max_c = NULL;
	int mgr;

	if (ALGORITHM == AVG) {
		long max = 0;
		/* find one that improve the total tardiness most */
		for (mgr = 0 ; mgr < NUM_TMEM_MGR ; mgr++) {
			for (c = FIRST_LIST(&components[mgr], next, prev) ;
			     c != &components[mgr] ;
			     c = FIRST_LIST(c, next, prev))  {
				calc_component_tardiness(c);
				if (c->add_impact > max) {
					max_c = c;
					max = c->add_impact;
				}
				if (c->add_impact == max && max > 0 && max_c != c) {
					/* if one tmem benefits multiple
					 * components the same of tardiness,
					 * we want the component with the max
					 * total block time. */
					struct thd * titer;
					unsigned long tot_impact1 = 0, tot_impact2 = 0;
					for ( titer = FIRST_LIST(&threads, next, prev) ;
					      titer != &threads ;
					      titer = FIRST_LIST(titer, next, prev)) {
						tot_impact1 += titer->comp_info[c->mgr][c->spdid].impact;
						tot_impact2 += titer->comp_info[max_c->mgr][max_c->spdid].impact;
					}
					if (tot_impact1 > tot_impact2)
						max_c = c;
				}
			}
		}
		if (max > 0)
			return max_c;
		else
			return NULL;
	} else {
		long min = 0;
		struct thd * t;
		while(1) {
			/* find a component that improve the max tardiness most */
			t = find_largest_tardiness();
			largest_tardiness = t->tardiness;
			if (largest_tardiness <= 0)
				return NULL;
			for (mgr = 0 ; mgr < NUM_TMEM_MGR ; mgr++) {
				for (c = FIRST_LIST(&components[mgr], next, prev) ;
				     c != &components[mgr] ;
				     c = FIRST_LIST(c, next, prev))  {
					calc_component_max_tardiness(c);
					if (c->add_impact < min || ! max_c) {
						max_c = c;
						min = c->add_impact;
					}
					if (c->add_impact == min && t->comp_info[c->mgr][c->spdid].impact > t->comp_info[max_c->mgr][max_c->spdid].impact) {
						max_c = c;
					}
				}
			}
			if (t->comp_info[max_c->mgr][max_c->spdid].avg_time_blocked && t->comp_info[max_c->mgr][max_c->spdid].impact)
				break;
			else
				t->tardiness = 0;
			/* allocating tmems can't benefit current largest tardiness thread */
		}
		return max_c;
	}
}
struct component *
find_min_tardiness_comp(struct component * c_original)
{
	struct component * c, * min_c = NULL;
	struct thd_comp * tc, * tco;
	struct thd * titer;
	int mgr;

	if (ALGORITHM == AVG) {
		long worsen, min = 0, tmp_tardiness, tot_impact, min_tot_impact = 0;
		unsigned long impact_with_history;
		/* find the component that increasing the total tardiness least if take one tmem to c_original */
		for (mgr = 0; mgr < NUM_TMEM_MGR; mgr++) {
			for (c = FIRST_LIST(&components[mgr], next, prev) ;
			     c != &components[mgr] ;
			     c = FIRST_LIST(c, next, prev)) {
				if (c->concur_new == 1 || c == c_original || c->add_in || c->ss_counter + 1 >= c->concur_new)
					continue;
				tot_impact = 0;
				for ( titer = FIRST_LIST(&threads, next, prev) ;
				      titer != &threads ;
				      titer = FIRST_LIST(titer, next, prev)) {
					tc = &titer->comp_info[c->mgr][c->spdid];
					tco = &titer->comp_info[c_original->mgr][c_original->spdid];
					tmp_tardiness = titer->tardiness  - (long)tco->impact;
					impact_with_history = tc->impact ? tc->impact : tc->old_impact;
					tot_impact += impact_with_history;
					if (tmp_tardiness > 0) {
						c->remove_impact += impact_with_history;
					} else {
						if (tmp_tardiness + (long)impact_with_history > 0 && c->concur_new > 1)
							c->remove_impact += tmp_tardiness + (long)impact_with_history;
					}
				}
				worsen = c->remove_impact;
				if (!min_c || worsen < min) {
					min = worsen;
					min_c = c;
					min_tot_impact = tot_impact;
				}
				if (worsen == min && min_tot_impact > tot_impact) {
					min_c = c;
					min_tot_impact = tot_impact;
				}
			}
		}
		if (min_c && min < c_original->add_impact)
			return min_c;
		else
			return NULL;
	} else {
		long largest, impact_largest, tmp_tardiness, min = 0, min_impact = 0;
		unsigned long impact_with_history;
		/* find the component that influence the max tardiness least if take one tmem to c_original */
		for (mgr = 0; mgr < NUM_TMEM_MGR; mgr++) {		
			for( c = FIRST_LIST(&components[mgr], next, prev);
			     c != &components[mgr] ;
			     c = FIRST_LIST(c, next, prev)) {
				if (c->concur_new == 1 || c == c_original || c->add_in || c->ss_counter + 1 >= c->concur_new || (mgr == CBUF_MGR && c->concur_est > 0 && c->concur_new <= CBUF_UNIT))
					continue;
				largest = 0;
				impact_largest = LONG_MIN;
				for ( titer = FIRST_LIST(&threads, next, prev) ;
				      titer != &threads ;
				      titer = FIRST_LIST(titer, next, prev)) {
					tc = &titer->comp_info[c->mgr][c->spdid];
					tco = &titer->comp_info[c_original->mgr][c_original->spdid];
					impact_with_history = tc->impact ? tc->impact : tc->old_impact;
					tmp_tardiness = titer->tardiness + (long)impact_with_history - (long)tco->impact;
					if (tmp_tardiness > largest)
						largest = tmp_tardiness;
					if (impact_with_history && tmp_tardiness > impact_largest)
						impact_largest = tmp_tardiness;
				}
				c->remove_impact = largest;
				if (largest < min || ! min_c) {
					min = largest;
					min_c = c;
					min_impact = impact_largest;
				}
				/* if multiple components impact the
				 * same to the max tardiness, we
				 * choose one impact the actual
				 * tardiness least */
				if (largest == min && impact_largest < min_impact) {
					min_c = c;
					min_impact = impact_largest;
				}
			}
		}
		if (min < largest_tardiness && min_c)
			return min_c;
		else
			return NULL;
	}
}
static void
gather_data(int counter)
{
	struct thd *titer;
	struct component *citer;
	int mgr;

	DOUT("Tmem policy: Gathering data.\n");
	for (titer = FIRST_LIST(&threads, next, prev) ;
	     titer != &threads ;
	     titer = FIRST_LIST(titer, next, prev)) {
		unsigned short int tid = titer->tid;
		struct thd_sched *ts = &titer->sched_info;
		/* Scheduling info */
		ts->misses = periodic_wake_get_misses(tid);
		ts->deadlines = periodic_wake_get_deadlines(tid);
		ts->lateness = periodic_wake_get_lateness(tid);
		ts->miss_lateness = periodic_wake_get_miss_lateness(tid);
		titer->tardiness = ts->miss_lateness > 0 ? ts->miss_lateness : ts->lateness;
		/* printc("Thread %d, period %d, prio %d: %d deadlines, %d misses,"
		       "%ld lateness, %ld miss lateness.\n",
		       tid, ts->period, ts->priority, ts->deadlines,
		       ts->misses, ts->lateness, ts->miss_lateness); */
		if (counter == 0) printc("Thread %d DLM%d, %ld miss\n", tid, ts->misses, ts->miss_lateness);
		/* Component info */
		for (mgr = 0; mgr < NUM_TMEM_MGR; mgr++) {
			for (citer = FIRST_LIST(&components[mgr], next, prev) ;
			     citer != &components[mgr] ;
			     citer = FIRST_LIST(citer, next, prev)) {
				struct thd_comp *tc;

				tc = &titer->comp_info[citer->mgr][citer->spdid];
				assert(tc && tc->c);
				switch (mgr) {
				case STK_MGR:
					collect_stk_blkinfo(tc, tid);
					break;
				case CBUF_MGR:
					collect_cbuf_blkinfo(tc, tid);
					break;
				default: 
					BUG();
				}
				assert(tc->tmem_misses >= 0);
			
				if (counter == 0 && tc->tmem_misses) {
					/* printc("MGR%d Tmem info for spd %d: time blocked %ld, misses %d\n", tc->c->mgr, tc->c->spdid, tc->avg_time_blocked, tc->tmem_misses); */
				}
			}
		}
	}

	for (mgr = 0 ; mgr < NUM_TMEM_MGR ; mgr++) {
		for (citer = FIRST_LIST(&components[mgr], next, prev) ;
		     citer != &components[mgr] ;
		     citer = FIRST_LIST(citer, next, prev)) {
			switch (mgr) {
			case STK_MGR:
				collect_stk_compinfo(citer);
				break;
			case CBUF_MGR:
				collect_cbuf_compinfo(citer);
				break;
			default: 
				BUG();
			}
			/* printc("MGR %d, Spd %d concurrency estimate: %d;alloc %d,ss %d\n", mgr, citer->spdid, citer->concur_est, citer->allocated, citer->ss_counter); */
		}
	}
}