Exemplo n.º 1
0
//  all cbufs that created for this component
void mgr_map_client_mem(struct cos_cbuf_item *cci, struct spd_tmem_info *sti)
{
	char *l_addr, *d_addr;
	spdid_t d_spdid;
//	struct cb_desc *d;

	assert(sti && cci);
	assert(EMPTY_LIST(cci, next, prev));

	d_spdid = sti->spdid;

	/* TODO: multiple pages cbuf! */
	d_addr = valloc_alloc(cos_spd_id(), sti->spdid, 1);
	l_addr = cci->desc.addr;  //initialized in cos_init()

	assert(d_addr && l_addr); 

	/* ...map it into the requesting component */
	if (unlikely(!mman_alias_page(cos_spd_id(), (vaddr_t)l_addr, d_spdid, (vaddr_t)d_addr))) 
		goto err;
	/* DOUT("<<<MAPPED>>> mgr addr %p client addr %p\n ",l_addr, d_addr); */
	cci->desc.owner.addr = (vaddr_t)d_addr;
	cci->parent_spdid = d_spdid;
	assert(cci->desc.cbid == 0);
	// add the cbuf to shared vect here? now we do it in the client.
	// and l_addr and d_addr has been assinged
done:
	return;
err:
	DOUT("Cbuf mgr: Cannot alias page to client!\n");
	mman_release_page(cos_spd_id(), (vaddr_t)l_addr, 0);
	/* valloc_free(cos_spd_id(), cos_spd_id(), l_addr, 1); */
	valloc_free(cos_spd_id(), d_spdid, (void *)d_addr, 1);
	goto done;
}
Exemplo n.º 2
0
/* Return the top address of the page it is mapped into the
 * component */
static vaddr_t
stkmgr_stk_add_to_spd(struct cos_stk_item *stk_item, struct spd_stk_info *info)
{
	vaddr_t d_addr, stk_addr, ret;
	spdid_t d_spdid;
	assert(info && stk_item);
	assert(EMPTY_LIST(stk_item, next, prev));

	d_spdid = info->spdid;
	// FIXME:  Race condition
	ret = d_addr = (vaddr_t)valloc_alloc(cos_spd_id(), d_spdid, 1);
	/* d_addr = info->ci->cos_heap_ptr;  */
	/* info->ci->cos_heap_ptr += PAGE_SIZE; */
	/* ret = info->ci->cos_heap_ptr; */

//	DOUT("Setting flags and assigning flags\n");
	stk_item->stk->flags = 0xDEADBEEF;
	stk_item->stk->next = (void *)0xDEADBEEF;
	stk_addr = (vaddr_t)(stk_item->hptr);
	if(d_addr != mman_alias_page(cos_spd_id(), stk_addr, d_spdid, d_addr)){
		printc("<stkmgr>: Unable to map stack into component");
		BUG();
	}
//	DOUT("Mapped page\n");
	stk_item->d_addr = d_addr;
	stk_item->parent_spdid = d_spdid;
    
	// Add stack to allocated stack array
//	DOUT("Adding to local spdid stk list\n");
	ADD_LIST(&info->stk_list, stk_item, next, prev); 
	info->num_allocated++;
	assert(info->num_allocated == stkmgr_num_alloc_stks(info->spdid));

	return ret;
}
Exemplo n.º 3
0
/* 
 * map the memory from address p and size sz to the component spdid with 
 * permission flags. if p is NULL allocate a piece of new memory
 * return spdid's address to daddr, manager's virtual address to page
 */
static int
cbuf_alloc_map(spdid_t spdid, vaddr_t *daddr, void **page, void *p, unsigned long sz, int flags)
{
	vaddr_t dest;
	int ret = 0;
	void *new_p;

	tracking_start(NULL, CBUF_MAP);

	assert(sz == round_to_page(sz));
	if (!p) {
		new_p = page_alloc(sz/PAGE_SIZE);
		assert(new_p);
		memset(new_p, 0, sz);
	} else {
		new_p = p;
	}

	dest = (vaddr_t)valloc_alloc(cos_spd_id(), spdid, sz/PAGE_SIZE);
	if (unlikely(!dest)) goto free;
	if (!cbuf_map(spdid, dest, new_p, sz, flags)) goto done;

free: 
	if (dest) valloc_free(cos_spd_id(), spdid, (void *)dest, 1);
	if (!p) page_free(new_p, sz/PAGE_SIZE);
	ret = -1;

done:	
	if (page) *page  = new_p;
	*daddr = dest;
	tracking_end(NULL, CBUF_MAP);
	return ret;
}
Exemplo n.º 4
0
void read_ltoc(void) 
{
	char *addr, *start;
	unsigned long i, sz;
	unsigned short int bid;
	int direction;
	int channel = COS_TRANS_SERVICE_PONG;
	char buf[512];

	direction = cos_trans_cntl(COS_TRANS_DIRECTION, channel, 0, 0);
	if (direction < 0) {
		channels[channel].exists = 0;
		return;
	}  
	channels[channel].exists = 1;
	channels[channel].direction = direction;

	sz = cos_trans_cntl(COS_TRANS_MAP_SZ, channel, 0, 0);
	assert(sz <= (4*1024*1024)); /* current 8MB max */
	start = valloc_alloc(cos_spd_id(), cos_spd_id(), sz/PAGE_SIZE);
	assert(start);
	for (i = 0, addr = start ; i < sz ; i += PAGE_SIZE, addr += PAGE_SIZE) {
		assert(!cos_trans_cntl(COS_TRANS_MAP, channel, (unsigned long)addr, i));
	}
	cringbuf_init(&channels[channel].rb, start, sz);

	assert(direction == COS_TRANS_DIR_LTOC);

	bid = cos_brand_cntl(COS_BRAND_CREATE, 0, 0, cos_spd_id());
	assert(bid > 0);
	assert(!cos_trans_cntl(COS_TRANS_BRAND, channel, bid, 0));
	if (sched_add_thd_to_brand(cos_spd_id(), bid, cos_get_thd_id())) BUG();

	while (1) {
		int ret, i;
		char *p;
		struct channel_info *info;
		unsigned long long *t, local_t;
//		printc("going to wait for input...\n");
		if (-1 == (ret = cos_brand_wait(bid))) BUG();
		rdtscll(local_t);
		ret = cringbuf_consume(&channels[channel].rb, buf, 512);
		p = buf;
//		while (*p != '\0') {
		t = p;
		meas[idx++] = (local_t - *t);
		assert(local_t > *t);
//		printc("local t %llu, start %llu, diff %u\n", local_t, *t, meas[idx-1]);
		*p = '\0';
//			p++;
//		}

		if (idx == ITER) break;
	}
	report();


	return;
}
Exemplo n.º 5
0
int
cbufp_retrieve(spdid_t spdid, int cbid, int len)
{
	struct cbufp_comp_info *cci;
	struct cbufp_info *cbi;
	struct cbuf_meta *meta;
	struct cbufp_maps *map;
	vaddr_t dest;
	void *page;
	int ret = -1;

	CBUFP_TAKE();
	cci = cbufp_comp_info_get(spdid);
	if (!cci) goto done;
	cbi = cmap_lookup(&cbufs, cbid);
	if (!cbi) goto done;
	/* shouldn't cbuf2buf your own buffer! */
	if (cbi->owner.spdid == spdid) goto done;
	meta = cbufp_meta_lookup(cci, cbid);
	if (!meta) goto done;

	map        = malloc(sizeof(struct cbufp_maps));
	if (!map) goto done;
	dest = (vaddr_t)valloc_alloc(cos_spd_id(), spdid, 1);
	if (!dest) goto free;

	map->spdid = spdid;
	map->m     = meta;
	map->addr  = dest;
	INIT_LIST(map, next, prev);
	ADD_LIST(&cbi->owner, map, next, prev);

	page = cbi->mem;
	assert(page);
	if (dest != (mman_alias_page(cos_spd_id(), (vaddr_t)page, spdid, dest))) {
		assert(0);
		valloc_free(cos_spd_id(), spdid, (void *)dest, 1);
	}

	meta->nfo.c.flags |= CBUFM_TOUCHED;
	meta->nfo.c.ptr    = map->addr >> PAGE_ORDER;
	ret                = 0;
done:
	CBUFP_RELEASE();

	return ret;
free:
	free(map);
	goto done;
}
Exemplo n.º 6
0
static int channel_init(int channel)
{
	char *addr, *start;
	unsigned long i, sz;
	int acap, srv_acap;
	int direction;

	direction = cos_trans_cntl(COS_TRANS_DIRECTION, channel, 0, 0);
	if (direction < 0) {
		channels[channel].exists = 0;
		return 0;
	}  
	channels[channel].exists = 1;
	channels[channel].direction = direction;

	sz = cos_trans_cntl(COS_TRANS_MAP_SZ, channel, 0, 0);
	assert(sz <= (4*1024*1024)); /* current 8MB max */
	start = valloc_alloc(cos_spd_id(), cos_spd_id(), sz/PAGE_SIZE);
	assert(start);
	for (i = 0, addr = start ; i < sz ; i += PAGE_SIZE, addr += PAGE_SIZE) {
		assert(!cos_trans_cntl(COS_TRANS_MAP, channel, (unsigned long)addr, i));
	}
	cringbuf_init(&channels[channel].rb, start, sz);

	if (direction == COS_TRANS_DIR_LTOC) {
		acap = cos_async_cap_cntl(COS_ACAP_CREATE, cos_spd_id(), cos_spd_id(), 
					  cos_get_thd_id() << 16 | cos_get_thd_id());
		assert(acap);
		/* cli acap not used. Linux thread will be triggering the
		 * acap. We set the cli acap owner to the current thread for
		 * access control only.*/
		srv_acap = acap & 0xFFFF;
		cos_trans_cntl(COS_TRANS_ACAP, channel, srv_acap, 0);

		while (1) {
			int ret;
			if (-1 == (ret = cos_ainv_wait(srv_acap))) BUG();
			assert(channels[channel].t);
			evt_trigger(cos_spd_id(), channels[channel].t->evtid);
		}
	}

	return 0;
}
Exemplo n.º 7
0
int
cbufp_alloc_map(spdid_t spdid, vaddr_t *daddr, void **page, int size)
{
	void *p;
	vaddr_t dest;

	assert(size == PAGE_SIZE);
	dest = (vaddr_t)valloc_alloc(cos_spd_id(), spdid, 1);
	assert(dest);
	p = alloc_page();
	assert(p);
	memset(p, 0, PAGE_SIZE);
	if (dest != (mman_alias_page(cos_spd_id(), (vaddr_t)p, spdid, dest))) {
		assert(0);
		valloc_free(cos_spd_id(), spdid, (void *)dest, 1);
	}
	*page  = p;
	*daddr = dest;

	return 0;
}
Exemplo n.º 8
0
static int channel_init(int channel)
{
	char *addr, *start;
	unsigned long i, sz;
	unsigned short int bid;
	int direction;

	direction = cos_trans_cntl(COS_TRANS_DIRECTION, channel, 0, 0);
	if (direction < 0) {
		channels[channel].exists = 0;
		return 0;
	}  
	channels[channel].exists = 1;
	channels[channel].direction = direction;

	sz = cos_trans_cntl(COS_TRANS_MAP_SZ, channel, 0, 0);
	assert(sz <= (4*1024*1024)); /* current 8MB max */
	start = valloc_alloc(cos_spd_id(), cos_spd_id(), sz/PAGE_SIZE);
	assert(start);
	for (i = 0, addr = start ; i < sz ; i += PAGE_SIZE, addr += PAGE_SIZE) {
		assert(!cos_trans_cntl(COS_TRANS_MAP, channel, (unsigned long)addr, i));
	}
	cringbuf_init(&channels[channel].rb, start, sz);

	if (direction == COS_TRANS_DIR_LTOC) {
		bid = cos_brand_cntl(COS_BRAND_CREATE, 0, 0, cos_spd_id());
		assert(bid > 0);
		assert(!cos_trans_cntl(COS_TRANS_BRAND, channel, bid, 0));
		if (sched_add_thd_to_brand(cos_spd_id(), bid, cos_get_thd_id())) BUG();
		while (1) {
			int ret;
			if (-1 == (ret = cos_brand_wait(bid))) BUG();
			assert(channels[channel].t);
			evt_trigger(cos_spd_id(), channels[channel].t->evtid);
		}
	}


	return 0;
}
Exemplo n.º 9
0
vaddr_t
cbuf_c_register(spdid_t spdid, long cbid)
{
	struct spd_tmem_info *sti;
	vaddr_t p, mgr_addr;

	/* DOUT("\nREGISTERED!!!\n"); */
	sti = get_spd_info(spdid);
	
	mgr_addr = (vaddr_t)alloc_page();
	p = (vaddr_t)valloc_alloc(cos_spd_id(), spdid, 1);
	if (p !=
	    (mman_alias_page(cos_spd_id(), mgr_addr, spdid, p))) {
		DOUT("mapped faied p is %p\n",(void *)p);
		valloc_free(cos_spd_id(), spdid, (void *)p, 1);
		return -1;
	}
	sti->managed = 1;
	/* __spd_cbvect_add_range(sti, cbid, (struct cbuf_vect_intern_struct *)mgr_addr); */
	__spd_cbvect_add_range(sti, cbid, mgr_addr);

	return p;
}
vaddr_t ec3_ser3_test(void)
{
	/* do not return valloc address */
	vaddr_t ret = (vaddr_t)valloc_alloc(cos_spd_id(), cos_spd_id(), 1);
	return ret;
}
Exemplo n.º 11
0
void 
cos_init(void *d)
{
	DOUT("CBUFMgr: %d in spd %ld cbuf mgr running.....\n", cos_get_thd_id(), cos_spd_id());
	LOCK_INIT();
	cos_map_init_static(&cb_ids);
	BUG_ON(cos_map_add(&cb_ids, NULL)); /* reserve id 0 */
	int i;

	memset(spd_tmem_info_list, 0, sizeof(struct spd_tmem_info) * MAX_NUM_SPDS);
    
	for(i = 0; i < MAX_NUM_SPDS; i++){
		spd_tmem_info_list[i].spdid = i;    
		INIT_LIST(&spd_tmem_info_list[i].ci, next, prev);
		INIT_LIST(&spd_tmem_info_list[i].tmem_list, next, prev);
		INIT_LIST(&spd_tmem_info_list[i].bthd_list, next, prev);
	}

	free_tmem_list = NULL;
	INIT_LIST(&global_blk_list, next, prev);

	tmems_allocated = 0;

	// Map all of the spds we can into this component
	for (i = 0 ; i < MAX_NUM_SPDS ; i++) {
		spdid_t spdid = i;

		void *hp;
		hp = valloc_alloc(cos_spd_id(), cos_spd_id(), 1);
		spdid = cinfo_get_spdid(i);
		if (!spdid) break;

		if(cinfo_map(cos_spd_id(), (vaddr_t)hp, spdid)){
			DOUT("Could not map cinfo page for %d\n", spdid);
			BUG();
		}
		/* spd_tmem_info_list[spdid].ci = hp;  */
		spd_tmem_info_list[spdid].ci.spd_cinfo_page = hp;
		/* spd_tmem_info_list[spdid].spd_cinfo_page = hp; */

		spd_tmem_info_list[spdid].ci.meta = NULL; 
		spd_tmem_info_list[spdid].managed = 1;

		spd_tmem_info_list[spdid].relinquish_mark = 0; 
    
		tmems_target += DEFAULT_TARGET_ALLOC;
		spd_tmem_info_list[spdid].num_allocated = 0;
		spd_tmem_info_list[spdid].num_desired = DEFAULT_TARGET_ALLOC;
		spd_tmem_info_list[spdid].num_blocked_thds = 0;
		spd_tmem_info_list[spdid].num_waiting_thds = 0;
		spd_tmem_info_list[spdid].num_glb_blocked = 0;
		spd_tmem_info_list[spdid].ss_counter = 0;
		spd_tmem_info_list[spdid].ss_max = MAX_NUM_MEM;
		empty_comps++;
	}
	over_quota_total = 0;
	over_quota_limit = MAX_NUM_MEM;

	event_waiting();
	return;
}
Exemplo n.º 12
0
void *
cbuf_c_retrieve(spdid_t spdid, int cbid, int len)
{
	void *ret = NULL;
	char *l_addr, *d_addr;

	struct cb_desc *d;
	struct cb_mapping *m;

	TAKE();

	d = cos_map_lookup(&cb_ids, cbid);
	/* sanity and access checks */
	if (!d || d->obj_sz < len) goto done;
#ifdef PRINCIPAL_CHECKS
	if (d->principal != cos_get_thd_id()) goto done;
#endif
	/* DOUT("info: thd_id %d obj_size %d addr %p\n", d->principal, d->obj_sz, d->addr); */
	m = malloc(sizeof(struct cb_mapping));
	if (!m) goto done;

	/* u64_t start,end; */
	/* rdtscll(start); */

	INIT_LIST(m, next, prev);

	d_addr = valloc_alloc(cos_spd_id(), spdid, 1);
	l_addr = d->addr;  //cbuf_item addr, initialized in cos_init()
	/* l_addr = d->owner.addr;  // mapped from owner */

	assert(d_addr && l_addr);

	/* rdtscll(end); */
	/* printc("cost of valloc: %lu\n", end-start); */
	/* rdtscll(start); */

	/* if (!mman_alias_page(cos_spd_id(), (vaddr_t)d->addr, spdid, (vaddr_t)page)) goto err; */
	if (unlikely(!mman_alias_page(cos_spd_id(), (vaddr_t)l_addr, spdid, (vaddr_t)d_addr))) {
		printc("No alias!\n");
		goto err;
	}
	/* DOUT("<<<MAPPED>>> mgr addr %p client addr %p\n ",l_addr, d_addr); */

	/* rdtscll(end); */
	/* printc("cost of mman_alias_page: %lu\n", end-start); */

	m->cbd  = d;
	m->spd  = spdid;
	m->addr = (vaddr_t)d_addr;

	//struct cb_mapping *m;
	ADD_LIST(&d->owner, m, next, prev);
	ret = (void *)d_addr;
done:
	RELEASE();
	return ret;
err:
	valloc_free(cos_spd_id(), spdid, d_addr, 1);
	free(m);
	goto done;
}
Exemplo n.º 13
0
static int shared_page_setup(int thd_id) 
{
	// thd_id is the upcall thread on the server side.

	struct srv_thd_info *thd;
	int cspd, sspd;
	vaddr_t ring_mgr, ring_cli, ring_srv;

	assert(thd_id);

	thd = &srv_thd_info[thd_id];
	cspd = thd->cli_spd_id;
	sspd = thd->srv_spd_id;
	
	if (!cspd || !sspd) goto err;

	ring_mgr = (vaddr_t)alloc_page();
	if (!ring_mgr) {
		printc("par_mgr: alloc ring buffer failed in mgr %ld.\n", cos_spd_id());
		goto err;
	}

	srv_thd_info[thd_id].mgr_ring = ring_mgr;

	ring_cli = (vaddr_t)valloc_alloc(cos_spd_id(), cspd, 1);
	if (unlikely(!ring_cli)) {
		printc("par_mgr: vaddr alloc failed in client comp %d.\n", cspd);
		goto err_cli;
	}
	if (unlikely(ring_cli != mman_alias_page(cos_spd_id(), ring_mgr, cspd, ring_cli, MAPPING_RW))) {
		printc("par_mgr: alias to client %d failed.\n", cspd);
		goto err_cli_alias;
	}
	comp_info[cspd].cli_thd_info[cos_get_thd_id()]->cap_info[thd->cli_cap_id].cap_ring = ring_cli;
	
	ring_srv = (vaddr_t)valloc_alloc(cos_spd_id(), sspd, 1);
	if (unlikely(!ring_srv)) {
		goto err_srv;
		printc("par_mgr: vaddr alloc failed in server comp  %d.\n", sspd);
	}
	if (unlikely(ring_srv != mman_alias_page(cos_spd_id(), ring_mgr, sspd, ring_srv, MAPPING_RW))) {
		printc("par_mgr: alias to server %d failed.\n", sspd);
		goto err_srv_alias;
	}
	srv_thd_info[thd_id].srv_ring = ring_srv;

	/* Initialize the ring buffer. Passing NULL because we use
	 * continuous ring (struct + data region). The ring starts
	 * from the second cache line of the page. (First cache line
	 * is used for the server thread active flag) */
	CK_RING_INIT(inv_ring, (CK_RING_INSTANCE(inv_ring) *)((void *)ring_mgr + CACHE_LINE), NULL, 
		     leqpow2((PAGE_SIZE - CACHE_LINE - sizeof(CK_RING_INSTANCE(inv_ring))) / 2 / sizeof(struct inv_data)));
	
	return 0;

err_srv_alias:
	valloc_free(cos_spd_id(), sspd, (void *)ring_srv, 1);
err_srv:
	mman_revoke_page(cos_spd_id(), ring_mgr, 0); 
err_cli_alias:
	valloc_free(cos_spd_id(), cspd, (void *)ring_cli, 1);
err_cli:
	free_page((void *)ring_mgr);
err:	
	return -1;
}