Beispiel #1
0
/* 
 * Precondition: cbuf lock is taken.
 */
struct cbuf_alloc_desc *
__cbuf_alloc_slow(int size, int *len, int tmem)
{
	struct cbuf_alloc_desc *d_prev, *ret = NULL;
	struct cbuf_meta *cm;
	void *addr;
	int cbid;
	int cnt;

	/* printc("on the cbuf slow alloc path (thd %d)\n", cos_get_thd_id()); */
	cnt = cbid = 0;
	do {
		int error = 0;

		CBUF_RELEASE();
		if (tmem) {
			cbid = cbuf_c_create(cos_spd_id(), size, cbid*-1);
			*len = 0; /* tmem */
		} else {
			cbid = __cbufp_alloc_slow(cbid, size, len, &error);
			if (unlikely(error)) {
				CBUF_TAKE();
				ret = NULL;
				goto done;
			}
		}
		CBUF_TAKE();
		/* TODO: we will hold the lock in expand, which calls
		 * the manager...remove that */
		if (cbid < 0 && 
		    cbuf_vect_expand(tmem ? &meta_cbuf : &meta_cbufp, 
				     cbid_to_meta_idx(cbid*-1), tmem) < 0) goto done;
		/* though it's possible this is valid, it probably
		 * indicates an error */
		assert(cnt++ < 10); 
	} while (cbid < 0);
	assert(cbid);
	cm   = cbuf_vect_lookup_addr(cbid_to_meta_idx(cbid), tmem);
	assert(cm && cm->nfo.c.ptr);
	assert(cm && cm->nfo.c.flags & CBUFM_IN_USE);
	assert(!tmem || cm->owner_nfo.thdid);
	addr = (void*)(cm->nfo.c.ptr << PAGE_ORDER);
	assert(addr);
	/* 
	 * See __cbuf_alloc and cbuf_slab_free.  It is possible that a
	 * slab descriptor will exist for a piece of cbuf memory
	 * _before_ it is allocated because it is actually from a
	 * previous cbuf.  If this is the case, then we should trash
	 * the old one and allocate a new one.
	 */
	/* TODO: check if this is correct. what if this cbuf is from
	 * the local cache and has been taken by another thd? */
	d_prev = __cbuf_alloc_lookup((u32_t)addr>>PAGE_ORDER);
	if (d_prev) __cbuf_desc_free(d_prev);
	ret    = __cbuf_desc_alloc(cbid, size, addr, cm, tmem);
done:   
	return ret;
}
Beispiel #2
0
static inline int
__cbufp_alloc_slow(int cbid, int size, int *len, int *error)
{
	int amnt = 0, i;
	cbuf_t cb;
	int *cbs;


	assert(cbid <= 0);
	if (cbid == 0) {
		struct cbuf_meta *cm;

		cbs    = cbuf_alloc(PAGE_SIZE, &cb);
		assert(cbs);
		cbs[0] = 0;
		/* Do a garbage collection */
		amnt = cbufp_collect(cos_spd_id(), size, cb);
		if (amnt < 0) {
			*error = 1;
			return -1;
		}

		CBUF_TAKE();
		cbid = cbs[0];
		/* own the cbuf we just collected */
		if (amnt > 0) {
			cm = cbuf_vect_lookup_addr(cbid_to_meta_idx(cbid), 0);
			assert(cm);
			/* (should be atomic) */
			cm->nfo.c.flags |= CBUFM_IN_USE | CBUFM_TOUCHED; 
		}
		/* ...add the rest back into freelists */
		for (i = 1 ; i < amnt ; i++) {
			struct cbuf_alloc_desc *d, *fl;
			struct cbuf_meta *meta;
			int idx = cbid_to_meta_idx(cbs[i]);
			u32_t page;
			void *data;

			assert(idx > 0);
			meta = cbuf_vect_lookup_addr(idx, 0);
			d    = __cbuf_alloc_lookup(meta->nfo.c.ptr);
			assert(d && d->cbid == cbs[i]);
			fl   = d->flhead;
			assert(fl);
			ADD_LIST(fl, d, next, prev);
		}
		CBUF_RELEASE();
		cbuf_free(cbs);
	}
	/* Nothing collected...allocate a new cbufp! */
	if (amnt == 0) {
		cbid = cbufp_create(cos_spd_id(), size, cbid*-1);
		if (cbid == 0) assert(0);
	} 
	/* TODO update correctly */
	*len = 1;

	return cbid;
}
Beispiel #3
0
vaddr_t
cbuf_map_at(spdid_t s_spd, unsigned int cbid, spdid_t d_spd, vaddr_t d_addr)
{
	vaddr_t ret = (vaddr_t)NULL;
	struct cbuf_info *cbi;
	int flags;
	
	CBUF_TAKE();
	cbi = cmap_lookup(&cbufs, cbid);
	assert(cbi);
	if (unlikely(!cbi)) goto done;
	assert(cbi->owner.spdid == s_spd);
	/*
	 * the low-order bits of the d_addr are packed with the MAPPING flags (0/1)
	 * and a flag (2) set if valloc should not be used.
	 */
	flags = d_addr & 0x3;
	d_addr &= ~0x3;
	if (!(flags & 2) && valloc_alloc_at(s_spd, d_spd, (void*)d_addr, cbi->size/PAGE_SIZE)) goto done;
	if (cbuf_map(d_spd, d_addr, cbi->mem, cbi->size, flags & (MAPPING_READ|MAPPING_RW))) goto free;
	ret = d_addr;
	/*
	 * do not add d_spd to the meta list because the cbuf is not
	 * accessible directly. The s_spd must maintain the necessary info
	 * about the cbuf and its mapping in d_spd.
	 */
done:
	CBUF_RELEASE();
	return ret;
free:
	if (!(flags & 2)) valloc_free(s_spd, d_spd, (void*)d_addr, cbi->size);
	goto done;
}
Beispiel #4
0
/*
 * Allocate and map the garbage-collection list used for cbuf_collect()
 */
vaddr_t
cbuf_map_collect(spdid_t spdid)
{
	struct cbuf_comp_info *cci;
	vaddr_t ret = (vaddr_t)NULL;

	printl("cbuf_map_collect\n");

	CBUF_TAKE();
	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;

	/* if the mapped page exists already, just return it. */
	if (cci->dest_csp) {
		ret = cci->dest_csp;
		goto done;
	}

	assert(sizeof(struct cbuf_shared_page) <= PAGE_SIZE);
	/* alloc/map is leaked. Where should it be freed/unmapped? */
	if (cbuf_alloc_map(spdid, &cci->dest_csp, (void**)&cci->csp, NULL, PAGE_SIZE, MAPPING_RW)) {
		goto done;
	}
	ret = cci->dest_csp;

	/* initialize a continuous ck ring */
	assert(cci->csp->ring.size == 0);
	CK_RING_INIT(cbuf_ring, &cci->csp->ring, NULL, CSP_BUFFER_SIZE);

done:
	CBUF_RELEASE();
	return ret;
}
Beispiel #5
0
unsigned long
cbuf_memory_target_get(spdid_t spdid)
{
	struct cbuf_comp_info *cci;
	int ret;
	CBUF_TAKE();
	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) ERR_THROW(-ENOMEM, done);
	ret = cci->target_size;
done:
	CBUF_RELEASE();
	return ret;
}
Beispiel #6
0
int cbuf_fork_spd(spdid_t spd, spdid_t s_spd, spdid_t d_spd)
{
	struct cbuf_comp_info *s_cci, *d_cci;
	int ret = 0;

	printl("cbuf_fork_spd\n");

	CBUF_TAKE();
	s_cci = cbuf_comp_info_get(s_spd);
	if (unlikely(!s_cci)) goto done;
	d_cci = cbuf_comp_info_get(d_spd);
	/* FIXME: This should be making copies to avoid sharing */
	__cbuf_copy_cci(s_cci, d_cci);

done:
	CBUF_RELEASE();
	return ret;
}
Beispiel #7
0
/* target_size is an absolute size */
void
cbuf_mempool_resize(spdid_t spdid, unsigned long target_size)
{
	struct cbuf_comp_info *cci;
	int diff;

	CBUF_TAKE();
	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;
	target_size = round_up_to_page(target_size);
	diff = (int)(target_size - cci->target_size);
	cci->target_size = target_size;
	if (diff < 0 && cci->allocated_size > cci->target_size) {
		cbuf_shrink(cci, cci->allocated_size - cci->target_size);
	}
	if (diff > 0) cbuf_expand(cci, diff);
done:
	CBUF_RELEASE();
}
Beispiel #8
0
/* map a stack into d_spdid.
 * TODO: use cbufs. */
void *
stkmgr_grant_stack(spdid_t d_spdid)
{
	struct cbuf_comp_info *cci;
	void *p, *ret = NULL;
	vaddr_t d_addr;

	printl("stkmgr_grant_stack (cbuf)\n");

	CBUF_TAKE();
	cci = cbuf_comp_info_get(d_spdid);
	if (!cci) goto done;

	if (cbuf_alloc_map(d_spdid, &d_addr, (void**)&p, NULL, PAGE_SIZE, MAPPING_RW)) goto done;
	ret = (void*)D_COS_STK_ADDR(d_addr);

done:
	CBUF_RELEASE();
	return ret;
}
Beispiel #9
0
/* 
 * Precondition: cbuf lock is taken.
 *
 * A component has tried to map a cbuf_t to a buffer, but that cbuf
 * isn't mapping into the component.  The component's cache of cbufs
 * had a miss.
 */
int 
__cbuf_2buf_miss(int cbid, int len, int tmem)
{
	struct cbuf_meta *mc;
	int ret;
	void *h;

	/* 
	 * FIXME: This can lead to a DOS where the client passes all
	 * possible cbids to be cbuf2bufed, which will allocate the
	 * entire 4M of meta vectors in this component.  Yuck.
	 * 
	 * Solution: the security mechanisms of the kernel (preventing
	 * cbids being passed to this component if they don't already
	 * belong to the client) should fix this.
	 */
	mc = cbuf_vect_lookup_addr(cbid_to_meta_idx(cbid), tmem);
	/* ...have to expand the cbuf_vect */
	if (unlikely(!mc)) {
		if (cbuf_vect_expand(tmem ? &meta_cbuf : &meta_cbufp, 
				     cbid_to_meta_idx(cbid), tmem)) BUG();
		mc = cbuf_vect_lookup_addr(cbid_to_meta_idx(cbid), tmem);
		assert(mc);
	}

	CBUF_RELEASE();
	if (tmem) ret = cbuf_c_retrieve(cos_spd_id(), cbid, len);
	else      ret = cbufp_retrieve(cos_spd_id(), cbid, len);
	CBUF_TAKE();
	if (unlikely(ret < 0                                   ||
		     mc->sz < len                              ||
		     (tmem && !(mc->nfo.c.flags & CBUFM_TMEM)) || 
		     (!tmem && mc->nfo.c.flags & CBUFM_TMEM))) {
		return -1;
	}
	assert(mc->nfo.c.ptr);
	if (tmem) mc->owner_nfo.thdid = 0;

	return 0;
}
Beispiel #10
0
int
cbuf_unmap_at(spdid_t s_spd, unsigned int cbid, spdid_t d_spd, vaddr_t d_addr)
{
	struct cbuf_info *cbi;
	int ret = 0, err = 0;
	u32_t off;

	assert(d_addr);
	CBUF_TAKE();
	cbi = cmap_lookup(&cbufs, cbid);
	if (unlikely(!cbi)) ERR_THROW(-EINVAL, done);
	if (unlikely(cbi->owner.spdid != s_spd)) ERR_THROW(-EPERM, done);
	assert(cbi->size == round_to_page(cbi->size));
	/* unmap pages in only the d_spd client */
	for (off = 0 ; off < cbi->size ; off += PAGE_SIZE)
		err |= mman_release_page(d_spd, d_addr + off, 0);
	err |= valloc_free(s_spd, d_spd, (void*)d_addr, cbi->size/PAGE_SIZE);
	if (unlikely(err)) ERR_THROW(-EFAULT, done);
	assert(!err);
done:
	CBUF_RELEASE();
	return ret;
}
Beispiel #11
0
/*
 * For a certain principal, collect any unreferenced and not_in 
 * free list cbufs so that they can be reused.  This is the 
 * garbage-collection mechanism.
 *
 * Collect cbufs and add them onto the shared component's ring buffer.
 *
 * This function is semantically complicated. It can return no cbufs 
 * even if they are available to force the pool of cbufs to be
 * expanded (the client will call cbuf_create in this case). 
 * Or, the common case: it can return a number of available cbufs.
 */
int
cbuf_collect(spdid_t spdid, unsigned long size)
{
	struct cbuf_info *cbi;
	struct cbuf_comp_info *cci;
	struct cbuf_shared_page *csp;
	struct cbuf_bin *bin;
	int ret = 0;

	printl("cbuf_collect\n");

	CBUF_TAKE();
	cci  = cbuf_comp_info_get(spdid);
	tracking_start(&cci->track, CBUF_COLLECT);
	if (unlikely(!cci)) ERR_THROW(-ENOMEM, done);
	if (size + cci->allocated_size <= cci->target_size) goto done;

	csp  = cci->csp;
	if (unlikely(!csp)) ERR_THROW(-EINVAL, done);

	assert(csp->ring.size == CSP_BUFFER_SIZE);
	ret = CK_RING_SIZE(cbuf_ring, &csp->ring);
	if (ret != 0) goto done;
	/* 
	 * Go through all cbufs we own, and report all of them that
	 * have no current references to them.  Unfortunately, this is
	 * O(N*M), N = min(num cbufs, PAGE_SIZE/sizeof(int)), and M =
	 * num components.
	 */
	size = round_up_to_page(size);
	bin  = cbuf_comp_info_bin_get(cci, size);
	if (!bin) ERR_THROW(0, done);
	cbi  = bin->c;
	do {
		if (!cbi) break;
		/*
		 * skip cbufs which are in freelist. Coordinates with cbuf_free to 
		 * detect such cbufs correctly. 
		 * We must check refcnt first and then next pointer.
		 *
		 * If do not check refcnt: the manager may check "next" before cbuf_free 
		 * (when it is NULL), then switch to client who calls cbuf_free to set 
		 * "next", decrease refcnt and add cbuf to freelist. Then switch back to 
		 * manager, but now it will collect this in-freelist cbuf.
		 * 
		 * Furthermore we must check refcnt before the "next" pointer: 
		 * If not, similar to above case, the manager maybe preempted by client 
		 * between the manager checks "next" and refcnt. Therefore the manager 
		 * finds the "next" is null and refcnt is 0, and collect this cbuf.
		 * Short-circuit can prevent reordering. 
		 */
		assert(cbi->owner.m);
		if (!CBUF_REFCNT(cbi->owner.m) && !CBUF_IS_IN_FREELIST(cbi->owner.m)
                 		    && !cbuf_referenced(cbi)) {
			struct cbuf_ring_element el = { .cbid = cbi->cbid };
			cbuf_references_clear(cbi);
			if (!CK_RING_ENQUEUE_SPSC(cbuf_ring, &csp->ring, &el)) break;
			/*
			 * Prevent other collection collecting those cbufs.
			 * The manager checks if the shared ring buffer is empty upon 
			 * the entry, if not, it just returns. This is not enough to 
			 * prevent double-collection. The corner case is: 
			 * after the last one in ring buffer is dequeued and 
			 * before it is added to the free-list, the manager  
			 * appears. It may collect the last one again.
			 */
			cbi->owner.m->next = (struct cbuf_meta *)1;
			if (++ret == CSP_BUFFER_SIZE) break;
		}
		cbi = FIRST_LIST(cbi, next, prev);
	} while (cbi != bin->c);
	if (ret) cbuf_thd_wake_up(cci, ret*size);

done:
	tracking_end(&cci->track, CBUF_COLLECT);
	CBUF_RELEASE();
	return ret;
}

/* 
 * Called by cbuf_deref.
 */
int
cbuf_delete(spdid_t spdid, unsigned int cbid)
{
	struct cbuf_comp_info *cci;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta;
	int ret = -EINVAL, sz;

	printl("cbuf_delete\n");
	CBUF_TAKE();
	tracking_start(NULL, CBUF_DEL);

	cci  = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;
	cbi  = cmap_lookup(&cbufs, cbid);
	if (unlikely(!cbi)) goto done;
	meta = cbuf_meta_lookup(cci, cbid);

	/*
	 * Other threads can access the meta data simultaneously. For
	 * example, others call cbuf2buf which increase the refcnt.
	 */
	CBUF_REFCNT_ATOMIC_DEC(meta);
	/* Find the owner of this cbuf */
	if (cbi->owner.spdid != spdid) {
		cci = cbuf_comp_info_get(cbi->owner.spdid);
		if (unlikely(!cci)) goto done;
	}
	if (cbuf_free_unmap(cci, cbi)) 	goto done;
	if (cci->allocated_size < cci->target_size) {
		cbuf_thd_wake_up(cci, cci->target_size - cci->allocated_size);
	}
	ret = 0;
done:
	tracking_end(NULL, CBUF_DEL);
	CBUF_RELEASE();
	return ret;
}

/* 
 * Called by cbuf2buf to retrieve a given cbid.
 */
int
cbuf_retrieve(spdid_t spdid, unsigned int cbid, unsigned long size)
{
	struct cbuf_comp_info *cci, *own;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta, *own_meta;
	struct cbuf_maps *map;
	vaddr_t dest;
	void *page;
	int ret = -EINVAL, off;

	printl("cbuf_retrieve\n");

	CBUF_TAKE();
	tracking_start(NULL, CBUF_RETRV);

	cci        = cbuf_comp_info_get(spdid);
	if (!cci) {printd("no cci\n"); goto done; }
	cbi        = cmap_lookup(&cbufs, cbid);
	if (!cbi) {printd("no cbi\n"); goto done; }
	/* shouldn't cbuf2buf your own buffer! */
	if (cbi->owner.spdid == spdid) {
		printd("owner\n"); 
		goto done;
	}
	meta       = cbuf_meta_lookup(cci, cbid);
	if (!meta) {printd("no meta\n"); goto done; }
	assert(!(meta->nfo & ~CBUF_INCONSISENT));

	map        = malloc(sizeof(struct cbuf_maps));
	if (!map) {printd("no map\n"); ERR_THROW(-ENOMEM, done); }
	if (size > cbi->size) {printd("too big\n"); goto done; }
	assert(round_to_page(cbi->size) == cbi->size);
	size       = cbi->size;
	/* TODO: change to MAPPING_READ */
	if (cbuf_alloc_map(spdid, &map->addr, NULL, cbi->mem, size, MAPPING_RW)) {
		printc("cbuf mgr map fail spd %d mem %p sz %lu cbid %u\n", spdid, cbi->mem, size, cbid);
		goto free;
	}

	INIT_LIST(map, next, prev);
	ADD_LIST(&cbi->owner, map, next, prev);
	CBUF_PTR_SET(meta, map->addr);
	map->spdid          = spdid;
	map->m              = meta;
	meta->sz            = cbi->size >> PAGE_ORDER;
	meta->cbid_tag.cbid = cbid;
	own                 = cbuf_comp_info_get(cbi->owner.spdid);
	if (unlikely(!own)) goto done;
	/*
	 * We need to inherit the relinquish bit from the sender. 
	 * Otherwise, this cbuf cannot be returned to the manager. 
	 */
	own_meta            = cbuf_meta_lookup(own, cbid);
	if (CBUF_RELINQ(own_meta)) CBUF_FLAG_ADD(meta, CBUF_RELINQ);
	ret                 = 0;
done:
	tracking_end(NULL, CBUF_RETRV);

	CBUF_RELEASE();
	return ret;
free:
	free(map);
	goto done;
}

vaddr_t
cbuf_register(spdid_t spdid, unsigned int cbid)
{
	struct cbuf_comp_info  *cci;
	struct cbuf_meta_range *cmr;
	void *p;
	vaddr_t dest, ret = 0;

	printl("cbuf_register\n");
	CBUF_TAKE();
	tracking_start(NULL, CBUF_REG);

	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;
	cmr = cbuf_meta_lookup_cmr(cci, cbid);
	if (cmr) ERR_THROW(cmr->dest, done);

	/* Create the mapping into the client */
	if (cbuf_alloc_map(spdid, &dest, &p, NULL, PAGE_SIZE, MAPPING_RW)) goto done;
	assert((unsigned int)p == round_to_page(p));
	cmr = cbuf_meta_add(cci, cbid, p, dest);
	assert(cmr);
	ret = cmr->dest;
done:
	tracking_end(NULL, CBUF_REG);

	CBUF_RELEASE();
	return ret;
}

static void
cbuf_shrink(struct cbuf_comp_info *cci, int diff)
{
	int i, sz;
	struct cbuf_bin *bin;
	struct cbuf_info *cbi, *next, *head;

	for (i = cci->nbin-1 ; i >= 0 ; i--) {
		bin = &cci->cbufs[i];
		sz = (int)bin->size;
		if (!bin->c) continue;
		cbi = FIRST_LIST(bin->c, next, prev);
		while (cbi != bin->c) {
			next = FIRST_LIST(cbi, next, prev);
			if (!cbuf_free_unmap(cci, cbi)) {
				diff -= sz;
				if (diff <= 0) return;
			}
			cbi = next;
		}
		if (!cbuf_free_unmap(cci, cbi)) {
			diff -= sz;
			if (diff <= 0) return;
		}
	}
	if (diff > 0) cbuf_mark_relinquish_all(cci);
}
Beispiel #12
0
int
cbuf_create(spdid_t spdid, unsigned long size, int cbid)
{
	struct cbuf_comp_info *cci;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta;
	struct cbuf_bin *bin;
	int ret = 0;
	unsigned int id = (unsigned int)cbid;

	printl("cbuf_create\n");
	if (unlikely(cbid < 0)) return 0;
	CBUF_TAKE();
	tracking_start(NULL, CBUF_CRT);

	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;

	/* 
	 * Client wants to allocate a new cbuf, but the meta might not
	 * be mapped in.
	 */
	if (!cbid) {
		/* TODO: check if have enough free memory: ask mem manager */
		/*memory usage exceeds the target, block this thread*/
		if (size + cci->allocated_size > cci->target_size) {
			cbuf_shrink(cci, size);
			if (size + cci->allocated_size > cci->target_size) {
				cbuf_thread_block(cci, size);
				return 0;
			}
		}

 		cbi = malloc(sizeof(struct cbuf_info));

		if (unlikely(!cbi)) goto done;
		/* Allocate and map in the cbuf. Discard inconsistent cbufs */
		/* TODO: Find a better way to manage those inconsistent cbufs */
		do {
			id   = cmap_add(&cbufs, cbi);
			meta = cbuf_meta_lookup(cci, id);
		} while(meta && CBUF_INCONSISENT(meta));

		cbi->cbid        = id;
		size             = round_up_to_page(size);
		cbi->size        = size;
		cbi->owner.m     = NULL;
		cbi->owner.spdid = spdid;
		INIT_LIST(&cbi->owner, next, prev);
		INIT_LIST(cbi, next, prev);
		if (cbuf_alloc_map(spdid, &(cbi->owner.addr), 
				   (void**)&(cbi->mem), NULL, size, MAPPING_RW)) {
			goto free;
		}
	} 
	/* If the client has a cbid, then make sure we agree! */
	else {
		cbi = cmap_lookup(&cbufs, id);
		if (unlikely(!cbi)) goto done;
		if (unlikely(cbi->owner.spdid != spdid)) goto done;
	}
	meta = cbuf_meta_lookup(cci, id);

	/* We need to map in the meta for this cbid.  Tell the client. */
	if (!meta) {
		ret = (int)id * -1;
		goto done;
	}
	
	/* 
	 * Now we know we have a cbid, a backing structure for it, a
	 * component structure, and the meta mapped in for the cbuf.
	 * Update the meta with the correct addresses and flags!
	 */
	memset(meta, 0, sizeof(struct cbuf_meta));
	meta->sz            = cbi->size >> PAGE_ORDER;
	meta->cbid_tag.cbid = id;
	CBUF_FLAG_ADD(meta, CBUF_OWNER);
	CBUF_PTR_SET(meta, cbi->owner.addr);
	CBUF_REFCNT_INC(meta);

	/*
	 * When creates a new cbuf, the manager should be the only
	 * one who can access the meta
	 */
	/* TODO: malicious client may trigger this assertion, just for debug */
	assert(CBUF_REFCNT(meta) == 1);
	assert(CBUF_PTR(meta));
	cbi->owner.m = meta;

	/*
	 * Install cbi last. If not, after return a negative cbid, 
	 * collection may happen and get a dangle cbi
	 */
	bin = cbuf_comp_info_bin_get(cci, size);
	if (!bin) bin = cbuf_comp_info_bin_add(cci, size);
	if (unlikely(!bin)) goto free;
	if (bin->c) ADD_LIST(bin->c, cbi, next, prev);
	else        bin->c   = cbi;
	cci->allocated_size += size;
	ret = (int)id;
done:
	tracking_end(NULL, CBUF_CRT);
	CBUF_RELEASE();

	return ret;
free:
	cmap_del(&cbufs, id);
	free(cbi);
	goto done;
}
Beispiel #13
0
unsigned long
cbuf_debug_cbuf_info(spdid_t spdid, int index, int p)
{
	unsigned long ret[20], sz;
	struct cbuf_comp_info *cci;
	struct cbuf_bin *bin;
	struct cbuf_info *cbi, *next, *head;
	struct cbuf_meta *meta;
	struct blocked_thd *bthd;
	unsigned long long cur;
	int i;

	CBUF_TAKE();
	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) assert(0);
	memset(ret, 0, sizeof(ret));

	ret[CBUF_TARGET] = cci->target_size;
	ret[CBUF_ALLOC] = cci->allocated_size;

	for (i = cci->nbin-1 ; i >= 0 ; i--) {
		bin = &cci->cbufs[i];
		sz = bin->size;
		if (!bin->c) continue;
		cbi = bin->c;
		do {
			if (__debug_reference(cbi)) ret[CBUF_USE] += sz;
			else                        ret[CBUF_GARBAGE] += sz;
			meta = cbi->owner.m;
			if (CBUF_RELINQ(meta)) ret[CBUF_RELINQ_NUM]++;
			cbi = FIRST_LIST(cbi, next, prev);
		} while(cbi != bin->c);
	}
	assert(ret[CBUF_USE]+ret[CBUF_GARBAGE] == ret[CBUF_ALLOC]);

	ret[BLK_THD_NUM] = cci->num_blocked_thds;
	if (ret[BLK_THD_NUM]) {
		rdtscll(cur);
		bthd = cci->bthd_list.next;
		while (bthd != &cci->bthd_list) {
			cci->track.blk_tot += (cur-bthd->blk_start);
			ret[CBUF_BLK] += bthd->request_size;
			bthd->blk_start = cur;
			bthd = FIRST_LIST(bthd, next, prev);
		}
	}

	ret[TOT_BLK_TSC] = (unsigned long)cci->track.blk_tot;
	ret[MAX_BLK_TSC] = (unsigned long)cci->track.blk_max;
	ret[TOT_GC_TSC]  = (unsigned long)cci->track.gc_tot;
	ret[MAX_GC_TSC]  = (unsigned long)cci->track.gc_max;
	if (p == 1) {
		printc("target %lu %lu allocate %lu %lu\n", 
			ret[CBUF_TARGET], ret[CBUF_TARGET]/PAGE_SIZE, ret[CBUF_ALLOC], ret[CBUF_ALLOC]/PAGE_SIZE);
		printc("using %lu %lu garbage %lu %lu relinq %lu\n", ret[CBUF_USE], ret[CBUF_USE]/PAGE_SIZE, 
			ret[CBUF_GARBAGE], ret[CBUF_GARBAGE]/PAGE_SIZE, ret[CBUF_RELINQ_NUM]);
		printc("spd %d %lu thd blocked request %d pages %d\n", 
			spdid, ret[BLK_THD_NUM], ret[CBUF_BLK], ret[CBUF_BLK]/PAGE_SIZE);
		printc("spd %d blk_tot %lu blk_max %lu gc_tot %lu gc_max %lu\n", spdid, ret[TOT_BLK_TSC], 
			ret[MAX_BLK_TSC], ret[TOT_GC_TSC], ret[MAX_GC_TSC]);
	}
	if (p == 2) {
		cci->track.blk_tot = cci->track.blk_max = cci->track.gc_tot = cci->track.gc_max = 0;
		cci->track.gc_num = 0;
	}

	CBUF_RELEASE();
	return ret[index];
}