Beispiel #1
0
int valloc_alloc_at(spdid_t spdid, spdid_t dest, void *addr, unsigned long npages)
{
	int ret = -1, i = 0;
	struct spd_vas_tracker *trac;
	struct spd_vas_occupied *occ;
	unsigned long off, ext_size;

	LOCK();
	trac = cos_vect_lookup(&spd_vect, dest);
	if (!trac) {
		if (__valloc_init(dest) ||
		    !(trac = cos_vect_lookup(&spd_vect, dest))) goto done;
	}

	if (unlikely(npages > MAP_MAX * sizeof(u32_t))) {
		printc("valloc: cannot alloc more than %u bytes in one time!\n", 32 * WORDS_PER_PAGE * PAGE_SIZE);
		goto done;
	}

	while (trac->extents[i].map) {
		if (addr < trac->extents[i].start || addr > trac->extents[i].end) {
			if (++i == MAX_SPD_VAS_LOCATIONS) goto done;
			continue;
		}
		/* the address is in the range of an existing extent */
		occ = trac->extents[i].map;
		off = ((char*)addr - (char*)trac->extents[i].start) / PAGE_SIZE;
		assert(off + npages < MAP_MAX * sizeof(u32_t));
		ret = bitmap_extent_set_at(&occ->pgd_occupied[0], off, npages, MAP_MAX);
		goto done;
	}

	ext_size = round_up_to_pgd_page(npages * PAGE_SIZE);
	trac->extents[i].map = alloc_page();
	occ = trac->extents[i].map;
	assert(occ);
	if (vas_mgr_take(spdid, dest, (vaddr_t)addr, ext_size) == 0) goto free;
	trac->extents[i].start = addr;
	trac->extents[i].end = (void *)((uintptr_t)addr + ext_size);
	bitmap_set_contig(&occ->pgd_occupied[0], 0, ext_size / PAGE_SIZE, 1);
	bitmap_set_contig(&occ->pgd_occupied[0], 0, npages, 0);
	ret = 0;
done:
	UNLOCK();
	return ret;
free:
	free_page(trac->extents[i].map);
	goto done;
}
Beispiel #2
0
void *valloc_alloc(spdid_t spdid, spdid_t dest, unsigned long npages)
{
	/* JWW print out a few things : spdid, heap ptr, make sure the heap ptr is sane */

	void *ret = NULL;
	struct spd_vas_tracker *trac;
	struct spd_vas_occupied *occ;
	long off;
	/*JWW REMOVE THIS */
	struct cos_component_information *ci;
	unsigned long page_off;
	void *hp;
	/* /JWW */

	LOCK();
	/*JWW REMOVE THIS */
	ci = cos_get_vas_page();
	if (cinfo_map(cos_spd_id(), (vaddr_t)ci, spdid)) {
		// error
		cos_release_vas_page(ci);
		printc("CINFO_MAP ERROR\n");
	}
	hp = (void*)ci->cos_heap_ptr;
	// now print some things out.
	//	printc("valloc alloc heap_ptr: %x, ucap_tbl: %x, npages: %ul \n", (unsigned int) hp, (unsigned int) ci->cos_user_caps, npages);
	/* /JWW */

	page_off = ((unsigned long)hp - (unsigned long)round_to_pgd_page(hp))/PAGE_SIZE;

	trac = cos_vect_lookup(&spd_vect, dest);
	if (!trac) {
		printc("valloc init being called\n");
		if (__valloc_init(dest) ||
		    !(trac = cos_vect_lookup(&spd_vect, dest))) goto done;
	}
	//	printc("valloc alloc past init\n");
	
	occ = trac->map;
	assert(occ);
	//	off = bitmap_extent_find_set(&occ->pgd_occupied[0], page_off, npages, MAP_MAX);
	off = bitmap_extent_find_set(&occ->pgd_occupied[0], 0, npages, MAP_MAX);
	if (off < 0) goto done;
	ret = ((char *)trac->extents[0].start) + (off * PAGE_SIZE);
done:   
	//	printc("valloc alloc returning %x\n", (unsigned int) ret);
	UNLOCK();
	return ret;
}
Beispiel #3
0
int valloc_free(spdid_t spdid, spdid_t dest, void *addr, unsigned long npages)
{
	int ret = -1;
	struct spd_vas_tracker *trac;
	struct spd_vas_occupied *occ;
	unsigned long off;

	LOCK();
	trac = cos_vect_lookup(&spd_vect, dest);
	if (!trac) goto done;

	int i = 0;
	/* locate the address to be freed in which range (extents) */
	while (addr < trac->extents[i].start || addr > trac->extents[i].end) {
		if (++i == MAX_SPD_VAS_LOCATIONS) goto done;
	}
	occ = trac->extents[i].map;
	assert(occ);
	off = ((char *)addr - (char *)trac->extents[i].start) / PAGE_SIZE;
	assert(off + npages < MAP_MAX * sizeof(u32_t));
	bitmap_set_contig(&occ->pgd_occupied[0], off, npages, 1);
	ret = 0;
done:	
	UNLOCK();
	return ret;
}
Beispiel #4
0
void *valloc_alloc(spdid_t spdid, spdid_t dest, unsigned long npages)
{
	void *ret = NULL;
	struct spd_vas_tracker *trac;
	struct spd_vas_occupied *occ;
	unsigned long ext_size;
	long off, i = 0;

	LOCK();

	trac = cos_vect_lookup(&spd_vect, dest);
	if (!trac) {
		if (__valloc_init(dest) ||
		    !(trac = cos_vect_lookup(&spd_vect, dest))) goto done;
	}

	if (unlikely(npages > MAP_MAX * sizeof(u32_t))) {
		printc("valloc: cannot alloc more than %u bytes in one time!\n", 32 * WORDS_PER_PAGE * PAGE_SIZE);
		goto done;
	}

	while (trac->extents[i].map) {
		occ = trac->extents[i].map;
		off = bitmap_extent_find_set(&occ->pgd_occupied[0], 0, npages, MAP_MAX);
		if (off < 0) {
			if (++i == MAX_SPD_VAS_LOCATIONS) goto done;
			continue;
		}
		ret = (void *)((char *)trac->extents[i].start + off * PAGE_SIZE);
		goto done;
	}

	ext_size = round_up_to_pgd_page(npages * PAGE_SIZE);
	trac->extents[i].map = alloc_page();
	occ = trac->extents[i].map;
	assert(occ);
	trac->extents[i].start = (void*)vas_mgr_expand(spdid, dest, ext_size);
	trac->extents[i].end = (void *)(trac->extents[i].start + ext_size);
	bitmap_set_contig(&occ->pgd_occupied[0], 0, ext_size / PAGE_SIZE, 1);
	bitmap_set_contig(&occ->pgd_occupied[0], 0, npages, 0);
	ret = trac->extents[i].start;
done:
	UNLOCK();
	return ret;
}
Beispiel #5
0
static int __valloc_init(spdid_t spdid)
{
	int ret = -1;
	struct spd_vas_tracker *trac;
	struct spd_vas_occupied *occ;
	struct cos_component_information *ci;
	unsigned long page_off;
	void *hp;

	if (cos_vect_lookup(&spd_vect, spdid)) goto success;
	trac = malloc(sizeof(struct spd_vas_tracker));
	if (!trac) goto done;

	occ = alloc_page();
	if (!occ) goto err_free1;
	
	ci = cos_get_vas_page();
	if (cinfo_map(cos_spd_id(), (vaddr_t)ci, spdid)) goto err_free2;
	hp = (void*)ci->cos_heap_ptr;
	//	printc("valloc init heap_ptr: %x\n", (unsigned int) hp);

	trac->spdid            = spdid;
	trac->ci               = ci;
	trac->map              = occ;
	trac->extents[0].start = (void*)round_to_pgd_page(hp);
	trac->extents[0].end   = (void*)round_up_to_pgd_page(hp);
	page_off = ((unsigned long)hp - (unsigned long)round_to_pgd_page(hp))/PAGE_SIZE;
	bitmap_set_contig(&occ->pgd_occupied[0], page_off, (PGD_SIZE/PAGE_SIZE)-page_off, 1);

	cos_vect_add_id(&spd_vect, trac, spdid);
	assert(cos_vect_lookup(&spd_vect, spdid));
success:
	//	printc("valloc init success\n");
	ret = 0;
done:
	return ret;
err_free2:
	cos_release_vas_page(ci);
	free_page(occ);
err_free1:
	free(trac);
	goto done;
}
Beispiel #6
0
static int rem_thd_map(unsigned short int tid)
{
	struct thd_map *tm;

	tm = cos_vect_lookup(&tmap, tid);
	if (NULL == tm) return -1;
	free(tm);
	if (cos_vect_del(&tmap, tid)) return -1;

	return 0;
}
int cinfo_map(spdid_t spdid, vaddr_t map_addr, spdid_t target)
{
	vaddr_t cinfo_addr;

	cinfo_addr = (vaddr_t)cos_vect_lookup(&spd_info_addresses, target);
	if (0 == cinfo_addr) return -1;
	if (map_addr != 
	    (mman_alias_page(cos_spd_id(), cinfo_addr, spdid, map_addr))) {
		return -1;
	}

	return 0;
}
Beispiel #8
0
static int __valloc_init(spdid_t spdid)
{
	int ret = -1;
	struct spd_vas_tracker *trac;
	struct spd_vas_occupied *occ;
	unsigned long page_off;
	void *hp;

	if (cos_vect_lookup(&spd_vect, spdid)) goto success;
	trac = malloc(sizeof(struct spd_vas_tracker));
	if (!trac) goto done;

	occ = alloc_page();
	if (!occ) goto err_free1;
	
	hp = cinfo_get_heap_pointer(cos_spd_id(), spdid);
	if (!hp) goto err_free2;

        trac->spdid            = spdid;
        trac->map              = occ;
        trac->extents[0].start = (void*)round_to_pgd_page(hp);
        trac->extents[0].end   = (void*)round_up_to_pgd_page(hp);
        trac->extents[0].map   = occ;
        page_off = ((unsigned long)hp - (unsigned long)round_to_pgd_page(hp))/PAGE_SIZE;
        bitmap_set_contig(&occ->pgd_occupied[0], page_off, (PGD_SIZE/PAGE_SIZE)-page_off, 1);
        bitmap_set_contig(&occ->pgd_occupied[0], 0, page_off, 0);

	cos_vect_add_id(&spd_vect, trac, spdid);
	assert(cos_vect_lookup(&spd_vect, spdid));
success:
	ret = 0;
done:
	return ret;
err_free2:
	free_page(occ);
err_free1:
	free(trac);
	goto done;
}
Beispiel #9
0
/* 
 * FIXME: to make this predictable (avoid memory allocation in the
 * must-be-predictable case, we should really cos_vect_add_id when we
 * first find out about the possibility of the thread making any
 * invocations.
 */
static struct blocked_thds *bt_get(unsigned short int tid)
{
	struct blocked_thds *bt;

	bt = cos_vect_lookup(&bthds, tid);
	if (NULL == bt) {
		bt = malloc(sizeof(struct blocked_thds));
		if (NULL == bt) return NULL;
		INIT_LIST(bt, next, prev);
		bt->thd_id = tid;
		if (tid != cos_vect_add_id(&bthds, bt, tid)) return NULL;
	}
	return bt;
}
Beispiel #10
0
void *valloc_alloc(spdid_t spdid, spdid_t dest, unsigned long npages)
{
	void *ret = NULL;
	struct spd_vas_tracker *trac;
	struct spd_vas_occupied *occ;
	long off;

	LOCK();

	trac = cos_vect_lookup(&spd_vect, dest);
	if (!trac) {
		if (__valloc_init(dest) ||
		    !(trac = cos_vect_lookup(&spd_vect, dest))) goto done;
	}

	occ = trac->map;
	assert(occ);
	off = bitmap_extent_find_set(&occ->pgd_occupied[0], 0, npages, MAP_MAX);
	if (off < 0) goto done;
	ret = ((char *)trac->extents[0].start) + (off * PAGE_SIZE);
done:   
	UNLOCK();
	return ret;
}
/* 
 * FIXME: to make this predictable (avoid memory allocation in the
 * must-be-predictable case, we should really cos_vect_add_id when we
 * first find out about the possibility of the thread making any
 * invocations.
 */
static struct thread_event *__te_get(unsigned short int tid, cos_vect_t *v)
{
	struct thread_event *te;

	te = cos_vect_lookup(v, tid);
	if (NULL == te) {
		te = malloc(sizeof(struct thread_event));
		if (NULL == te) return NULL;
		memset(te, 0, sizeof(struct thread_event));
		te->thread_id = tid;
		INIT_LIST(te, next, prev);
		if (tid != cos_vect_add_id(v, te, tid)) return NULL;
	}
	return te;
}
Beispiel #12
0
int valloc_free(spdid_t spdid, spdid_t dest, void *addr, unsigned long npages)
{
	int ret = -1;
	struct spd_vas_tracker *trac;
	struct spd_vas_occupied *occ;
	unsigned long off;

	LOCK();
	trac = cos_vect_lookup(&spd_vect, dest);
	if (!trac) goto done;
	occ = trac->map;
	assert(occ);
	off = ((char *)addr - (char *)trac->extents[0].start)/PAGE_SIZE;
	assert(off+npages < MAP_MAX*sizeof(u32_t));
	bitmap_set_contig(&occ->pgd_occupied[0], off, npages, 1);
	ret = 0;
done:	
	UNLOCK();
	return ret;
}
static void boot_symb_process(struct cobj_header *h, spdid_t spdid, vaddr_t heap_val, char *mem, 
			      vaddr_t d_addr, vaddr_t symb_addr)
{
	if (round_to_page(symb_addr) == d_addr) {
		struct cos_component_information *ci;
		
		ci = (struct cos_component_information*)(mem + ((PAGE_SIZE-1) & symb_addr));
//		ci->cos_heap_alloc_extent = ci->cos_heap_ptr;
//		ci->cos_heap_allocated = heap_val;
		if (!ci->cos_heap_ptr) ci->cos_heap_ptr = heap_val;
		ci->cos_this_spd_id = spdid;

		/* save the address of this page for later retrieval
		 * (e.g. to manipulate the stack pointer) */
		if (!cos_vect_lookup(&spd_info_addresses, spdid)) {
			boot_spd_set_symbs(h, spdid, ci);
			cos_vect_add_id(&spd_info_addresses, (void*)round_to_page(ci), spdid);
		}
	}
}
Beispiel #14
0
/* This structure allows an upcall thread to find its associated ring
 * buffers
 */
static struct thd_map *get_thd_map(unsigned short int thd_id)
{
	return cos_vect_lookup(&tmap, thd_id);
}