示例#1
0
int __cbuf_c_delete(struct spd_tmem_info *sti, int cbid, struct cb_desc *d)
{
	struct cb_mapping *m;
	struct spd_tmem_info *map_sti;
	DOUT("_c_delete....cbid %d\n", cbid);
	__spd_cbvect_clean_val(sti, cbid);
	//assert(sti->ci.meta[(cbid-1)].c_0.v == NULL);
	//printc("_c_delete....cbid %d, meta %p\n", cbid, sti->ci.meta[cbid - 1].c_0.v);
	mman_revoke_page(cos_spd_id(), (vaddr_t)d->addr, 0);  // remove all mapped children

	m = FIRST_LIST(&d->owner, next, prev);
	
	while (m != &d->owner) {
		struct cb_mapping *n;

		/* remove from the vector in all mapped spds as well! */
		map_sti = get_spd_info(m->spd);
		DOUT("Clean val in spd %d\n", map_sti->spdid);
		DOUT("Clean: cbid  %d\n",cbid);
		__spd_cbvect_clean_val(map_sti, cbid);

		valloc_free(cos_spd_id(), m->spd, (void *)(m->addr), 1);
		n = FIRST_LIST(m, next, prev);
		REM_LIST(m, next, prev);
		free(m);
		m = n;
	}
	valloc_free(cos_spd_id(), sti->spdid, (void *)(d->owner.addr), 1);

	DOUT("unmapped is done\n");
	return 0;
}
示例#2
0
/* the stack should NOT be on the freelist within the spd */
static int
stkmgr_stk_remove_from_spd(struct cos_stk_item *stk_item, struct spd_stk_info *ssi)
{
	spdid_t s_spdid;

	s_spdid = ssi->spdid;
	DOUT("Releasing Stack\n");
	mman_revoke_page(cos_spd_id(), (vaddr_t)(stk_item->hptr), 0); 
	valloc_free(cos_spd_id(), s_spdid, (void*)stk_item->d_addr, 1);
	DOUT("Putting stack back on free list\n");
	
	// cause underflow for MAX Int
	stk_item->parent_spdid = 0;
	
	// Clear our memory to prevent leakage
	memset(stk_item->hptr, 0, PAGE_SIZE);
	
	DOUT("Removing from local list\n");
	// remove from s_spdid's stk_list;
	REM_LIST(stk_item, next, prev);
	ssi->num_allocated--;
	assert(ssi->num_allocated == stkmgr_num_alloc_stks(s_spdid));

	return 0;
}
示例#3
0
vaddr_t
cbuf_map_at(spdid_t s_spd, unsigned int cbid, spdid_t d_spd, vaddr_t d_addr)
{
	vaddr_t ret = (vaddr_t)NULL;
	struct cbuf_info *cbi;
	int flags;
	
	CBUF_TAKE();
	cbi = cmap_lookup(&cbufs, cbid);
	assert(cbi);
	if (unlikely(!cbi)) goto done;
	assert(cbi->owner.spdid == s_spd);
	/*
	 * the low-order bits of the d_addr are packed with the MAPPING flags (0/1)
	 * and a flag (2) set if valloc should not be used.
	 */
	flags = d_addr & 0x3;
	d_addr &= ~0x3;
	if (!(flags & 2) && valloc_alloc_at(s_spd, d_spd, (void*)d_addr, cbi->size/PAGE_SIZE)) goto done;
	if (cbuf_map(d_spd, d_addr, cbi->mem, cbi->size, flags & (MAPPING_READ|MAPPING_RW))) goto free;
	ret = d_addr;
	/*
	 * do not add d_spd to the meta list because the cbuf is not
	 * accessible directly. The s_spd must maintain the necessary info
	 * about the cbuf and its mapping in d_spd.
	 */
done:
	CBUF_RELEASE();
	return ret;
free:
	if (!(flags & 2)) valloc_free(s_spd, d_spd, (void*)d_addr, cbi->size);
	goto done;
}
示例#4
0
/* 
 * map the memory from address p and size sz to the component spdid with 
 * permission flags. if p is NULL allocate a piece of new memory
 * return spdid's address to daddr, manager's virtual address to page
 */
static int
cbuf_alloc_map(spdid_t spdid, vaddr_t *daddr, void **page, void *p, unsigned long sz, int flags)
{
	vaddr_t dest;
	int ret = 0;
	void *new_p;

	tracking_start(NULL, CBUF_MAP);

	assert(sz == round_to_page(sz));
	if (!p) {
		new_p = page_alloc(sz/PAGE_SIZE);
		assert(new_p);
		memset(new_p, 0, sz);
	} else {
		new_p = p;
	}

	dest = (vaddr_t)valloc_alloc(cos_spd_id(), spdid, sz/PAGE_SIZE);
	if (unlikely(!dest)) goto free;
	if (!cbuf_map(spdid, dest, new_p, sz, flags)) goto done;

free: 
	if (dest) valloc_free(cos_spd_id(), spdid, (void *)dest, 1);
	if (!p) page_free(new_p, sz/PAGE_SIZE);
	ret = -1;

done:	
	if (page) *page  = new_p;
	*daddr = dest;
	tracking_end(NULL, CBUF_MAP);
	return ret;
}
示例#5
0
//  all cbufs that created for this component
void mgr_map_client_mem(struct cos_cbuf_item *cci, struct spd_tmem_info *sti)
{
	char *l_addr, *d_addr;
	spdid_t d_spdid;
//	struct cb_desc *d;

	assert(sti && cci);
	assert(EMPTY_LIST(cci, next, prev));

	d_spdid = sti->spdid;

	/* TODO: multiple pages cbuf! */
	d_addr = valloc_alloc(cos_spd_id(), sti->spdid, 1);
	l_addr = cci->desc.addr;  //initialized in cos_init()

	assert(d_addr && l_addr); 

	/* ...map it into the requesting component */
	if (unlikely(!mman_alias_page(cos_spd_id(), (vaddr_t)l_addr, d_spdid, (vaddr_t)d_addr))) 
		goto err;
	/* DOUT("<<<MAPPED>>> mgr addr %p client addr %p\n ",l_addr, d_addr); */
	cci->desc.owner.addr = (vaddr_t)d_addr;
	cci->parent_spdid = d_spdid;
	assert(cci->desc.cbid == 0);
	// add the cbuf to shared vect here? now we do it in the client.
	// and l_addr and d_addr has been assinged
done:
	return;
err:
	DOUT("Cbuf mgr: Cannot alias page to client!\n");
	mman_release_page(cos_spd_id(), (vaddr_t)l_addr, 0);
	/* valloc_free(cos_spd_id(), cos_spd_id(), l_addr, 1); */
	valloc_free(cos_spd_id(), d_spdid, (void *)d_addr, 1);
	goto done;
}
示例#6
0
/* 
 * As clients maybe malicious or don't use protocol correctly, we cannot 
 * simply unmap memory here. We guarantee that fault can only happen within 
 * the malicious component, but for other components, they either receive a 
 * NULL pointer from cbuf2buf or see wrong data. No fault happen in other 
 * components. See details in cbuf_unmap_prepare
 */
static int
cbuf_free_unmap(struct cbuf_comp_info *cci, struct cbuf_info *cbi)
{
	struct cbuf_maps *m = &cbi->owner, *next;
	struct cbuf_bin *bin;
	void *ptr = cbi->mem;
	unsigned long off, size = cbi->size;

	if (cbuf_unmap_prepare(cbi)) return 1;

	/* Unmap all of the pages from the clients */
	for (off = 0 ; off < size ; off += PAGE_SIZE) {
		mman_revoke_page(cos_spd_id(), (vaddr_t)ptr + off, 0);
	}

	/* 
	 * Deallocate the virtual address in the client, and cleanup
	 * the memory in this component
	 */
	m = FIRST_LIST(&cbi->owner, next, prev);
	while (m != &cbi->owner) {
		next = FIRST_LIST(m, next, prev);
		REM_LIST(m, next, prev);
		valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, size/PAGE_SIZE);
		free(m);
		m = next;
	}
	valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, size/PAGE_SIZE);

	/* deallocate/unlink our data-structures */
	page_free(ptr, size/PAGE_SIZE);
	cmap_del(&cbufs, cbi->cbid);
	cci->allocated_size -= size;
	bin = cbuf_comp_info_bin_get(cci, size);
	if (EMPTY_LIST(cbi, next, prev)) {
		bin->c = NULL;
	} else {
		if (bin->c == cbi) bin->c = cbi->next;
		REM_LIST(cbi, next, prev);
	}
	free(cbi);

	return 0;
}
示例#7
0
int
cbufp_retrieve(spdid_t spdid, int cbid, int len)
{
	struct cbufp_comp_info *cci;
	struct cbufp_info *cbi;
	struct cbuf_meta *meta;
	struct cbufp_maps *map;
	vaddr_t dest;
	void *page;
	int ret = -1;

	CBUFP_TAKE();
	cci = cbufp_comp_info_get(spdid);
	if (!cci) goto done;
	cbi = cmap_lookup(&cbufs, cbid);
	if (!cbi) goto done;
	/* shouldn't cbuf2buf your own buffer! */
	if (cbi->owner.spdid == spdid) goto done;
	meta = cbufp_meta_lookup(cci, cbid);
	if (!meta) goto done;

	map        = malloc(sizeof(struct cbufp_maps));
	if (!map) goto done;
	dest = (vaddr_t)valloc_alloc(cos_spd_id(), spdid, 1);
	if (!dest) goto free;

	map->spdid = spdid;
	map->m     = meta;
	map->addr  = dest;
	INIT_LIST(map, next, prev);
	ADD_LIST(&cbi->owner, map, next, prev);

	page = cbi->mem;
	assert(page);
	if (dest != (mman_alias_page(cos_spd_id(), (vaddr_t)page, spdid, dest))) {
		assert(0);
		valloc_free(cos_spd_id(), spdid, (void *)dest, 1);
	}

	meta->nfo.c.flags |= CBUFM_TOUCHED;
	meta->nfo.c.ptr    = map->addr >> PAGE_ORDER;
	ret                = 0;
done:
	CBUFP_RELEASE();

	return ret;
free:
	free(map);
	goto done;
}
示例#8
0
int
cbufp_alloc_map(spdid_t spdid, vaddr_t *daddr, void **page, int size)
{
	void *p;
	vaddr_t dest;

	assert(size == PAGE_SIZE);
	dest = (vaddr_t)valloc_alloc(cos_spd_id(), spdid, 1);
	assert(dest);
	p = alloc_page();
	assert(p);
	memset(p, 0, PAGE_SIZE);
	if (dest != (mman_alias_page(cos_spd_id(), (vaddr_t)p, spdid, dest))) {
		assert(0);
		valloc_free(cos_spd_id(), spdid, (void *)dest, 1);
	}
	*page  = p;
	*daddr = dest;

	return 0;
}
示例#9
0
static void
cbufp_free_unmap(spdid_t spdid, struct cbufp_info *cbi)
{
	struct cbufp_maps *m = &cbi->owner;
	void *ptr = cbi->mem;
	int size;

	if (cbufp_referenced(cbi)) return;

	do {
		struct cbufp_maps *next;

		next = FIRST_LIST(m, next, prev);
		REM_LIST(m, next, prev);
		valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, cbi->size/PAGE_SIZE);
		m = next;
	} while (m != &cbi->owner);

	/* TODO: iterate through the size, and free all... */
	mman_revoke_page(cos_spd_id(), (vaddr_t)ptr, 0);
	//free_page(ptr);
}
示例#10
0
int
cbuf_unmap_at(spdid_t s_spd, unsigned int cbid, spdid_t d_spd, vaddr_t d_addr)
{
	struct cbuf_info *cbi;
	int ret = 0, err = 0;
	u32_t off;

	assert(d_addr);
	CBUF_TAKE();
	cbi = cmap_lookup(&cbufs, cbid);
	if (unlikely(!cbi)) ERR_THROW(-EINVAL, done);
	if (unlikely(cbi->owner.spdid != s_spd)) ERR_THROW(-EPERM, done);
	assert(cbi->size == round_to_page(cbi->size));
	/* unmap pages in only the d_spd client */
	for (off = 0 ; off < cbi->size ; off += PAGE_SIZE)
		err |= mman_release_page(d_spd, d_addr + off, 0);
	err |= valloc_free(s_spd, d_spd, (void*)d_addr, cbi->size/PAGE_SIZE);
	if (unlikely(err)) ERR_THROW(-EFAULT, done);
	assert(!err);
done:
	CBUF_RELEASE();
	return ret;
}
示例#11
0
vaddr_t
cbuf_c_register(spdid_t spdid, long cbid)
{
	struct spd_tmem_info *sti;
	vaddr_t p, mgr_addr;

	/* DOUT("\nREGISTERED!!!\n"); */
	sti = get_spd_info(spdid);
	
	mgr_addr = (vaddr_t)alloc_page();
	p = (vaddr_t)valloc_alloc(cos_spd_id(), spdid, 1);
	if (p !=
	    (mman_alias_page(cos_spd_id(), mgr_addr, spdid, p))) {
		DOUT("mapped faied p is %p\n",(void *)p);
		valloc_free(cos_spd_id(), spdid, (void *)p, 1);
		return -1;
	}
	sti->managed = 1;
	/* __spd_cbvect_add_range(sti, cbid, (struct cbuf_vect_intern_struct *)mgr_addr); */
	__spd_cbvect_add_range(sti, cbid, mgr_addr);

	return p;
}
示例#12
0
void *
cbuf_c_retrieve(spdid_t spdid, int cbid, int len)
{
	void *ret = NULL;
	char *l_addr, *d_addr;

	struct cb_desc *d;
	struct cb_mapping *m;

	TAKE();

	d = cos_map_lookup(&cb_ids, cbid);
	/* sanity and access checks */
	if (!d || d->obj_sz < len) goto done;
#ifdef PRINCIPAL_CHECKS
	if (d->principal != cos_get_thd_id()) goto done;
#endif
	/* DOUT("info: thd_id %d obj_size %d addr %p\n", d->principal, d->obj_sz, d->addr); */
	m = malloc(sizeof(struct cb_mapping));
	if (!m) goto done;

	/* u64_t start,end; */
	/* rdtscll(start); */

	INIT_LIST(m, next, prev);

	d_addr = valloc_alloc(cos_spd_id(), spdid, 1);
	l_addr = d->addr;  //cbuf_item addr, initialized in cos_init()
	/* l_addr = d->owner.addr;  // mapped from owner */

	assert(d_addr && l_addr);

	/* rdtscll(end); */
	/* printc("cost of valloc: %lu\n", end-start); */
	/* rdtscll(start); */

	/* if (!mman_alias_page(cos_spd_id(), (vaddr_t)d->addr, spdid, (vaddr_t)page)) goto err; */
	if (unlikely(!mman_alias_page(cos_spd_id(), (vaddr_t)l_addr, spdid, (vaddr_t)d_addr))) {
		printc("No alias!\n");
		goto err;
	}
	/* DOUT("<<<MAPPED>>> mgr addr %p client addr %p\n ",l_addr, d_addr); */

	/* rdtscll(end); */
	/* printc("cost of mman_alias_page: %lu\n", end-start); */

	m->cbd  = d;
	m->spd  = spdid;
	m->addr = (vaddr_t)d_addr;

	//struct cb_mapping *m;
	ADD_LIST(&d->owner, m, next, prev);
	ret = (void *)d_addr;
done:
	RELEASE();
	return ret;
err:
	valloc_free(cos_spd_id(), spdid, d_addr, 1);
	free(m);
	goto done;
}
示例#13
0
static int shared_page_setup(int thd_id) 
{
	// thd_id is the upcall thread on the server side.

	struct srv_thd_info *thd;
	int cspd, sspd;
	vaddr_t ring_mgr, ring_cli, ring_srv;

	assert(thd_id);

	thd = &srv_thd_info[thd_id];
	cspd = thd->cli_spd_id;
	sspd = thd->srv_spd_id;
	
	if (!cspd || !sspd) goto err;

	ring_mgr = (vaddr_t)alloc_page();
	if (!ring_mgr) {
		printc("par_mgr: alloc ring buffer failed in mgr %ld.\n", cos_spd_id());
		goto err;
	}

	srv_thd_info[thd_id].mgr_ring = ring_mgr;

	ring_cli = (vaddr_t)valloc_alloc(cos_spd_id(), cspd, 1);
	if (unlikely(!ring_cli)) {
		printc("par_mgr: vaddr alloc failed in client comp %d.\n", cspd);
		goto err_cli;
	}
	if (unlikely(ring_cli != mman_alias_page(cos_spd_id(), ring_mgr, cspd, ring_cli, MAPPING_RW))) {
		printc("par_mgr: alias to client %d failed.\n", cspd);
		goto err_cli_alias;
	}
	comp_info[cspd].cli_thd_info[cos_get_thd_id()]->cap_info[thd->cli_cap_id].cap_ring = ring_cli;
	
	ring_srv = (vaddr_t)valloc_alloc(cos_spd_id(), sspd, 1);
	if (unlikely(!ring_srv)) {
		goto err_srv;
		printc("par_mgr: vaddr alloc failed in server comp  %d.\n", sspd);
	}
	if (unlikely(ring_srv != mman_alias_page(cos_spd_id(), ring_mgr, sspd, ring_srv, MAPPING_RW))) {
		printc("par_mgr: alias to server %d failed.\n", sspd);
		goto err_srv_alias;
	}
	srv_thd_info[thd_id].srv_ring = ring_srv;

	/* Initialize the ring buffer. Passing NULL because we use
	 * continuous ring (struct + data region). The ring starts
	 * from the second cache line of the page. (First cache line
	 * is used for the server thread active flag) */
	CK_RING_INIT(inv_ring, (CK_RING_INSTANCE(inv_ring) *)((void *)ring_mgr + CACHE_LINE), NULL, 
		     leqpow2((PAGE_SIZE - CACHE_LINE - sizeof(CK_RING_INSTANCE(inv_ring))) / 2 / sizeof(struct inv_data)));
	
	return 0;

err_srv_alias:
	valloc_free(cos_spd_id(), sspd, (void *)ring_srv, 1);
err_srv:
	mman_revoke_page(cos_spd_id(), ring_mgr, 0); 
err_cli_alias:
	valloc_free(cos_spd_id(), cspd, (void *)ring_cli, 1);
err_cli:
	free_page((void *)ring_mgr);
err:	
	return -1;
}