int __cbuf_c_delete(struct spd_tmem_info *sti, int cbid, struct cb_desc *d) { struct cb_mapping *m; struct spd_tmem_info *map_sti; DOUT("_c_delete....cbid %d\n", cbid); __spd_cbvect_clean_val(sti, cbid); //assert(sti->ci.meta[(cbid-1)].c_0.v == NULL); //printc("_c_delete....cbid %d, meta %p\n", cbid, sti->ci.meta[cbid - 1].c_0.v); mman_revoke_page(cos_spd_id(), (vaddr_t)d->addr, 0); // remove all mapped children m = FIRST_LIST(&d->owner, next, prev); while (m != &d->owner) { struct cb_mapping *n; /* remove from the vector in all mapped spds as well! */ map_sti = get_spd_info(m->spd); DOUT("Clean val in spd %d\n", map_sti->spdid); DOUT("Clean: cbid %d\n",cbid); __spd_cbvect_clean_val(map_sti, cbid); valloc_free(cos_spd_id(), m->spd, (void *)(m->addr), 1); n = FIRST_LIST(m, next, prev); REM_LIST(m, next, prev); free(m); m = n; } valloc_free(cos_spd_id(), sti->spdid, (void *)(d->owner.addr), 1); DOUT("unmapped is done\n"); return 0; }
/* the stack should NOT be on the freelist within the spd */ static int stkmgr_stk_remove_from_spd(struct cos_stk_item *stk_item, struct spd_stk_info *ssi) { spdid_t s_spdid; s_spdid = ssi->spdid; DOUT("Releasing Stack\n"); mman_revoke_page(cos_spd_id(), (vaddr_t)(stk_item->hptr), 0); valloc_free(cos_spd_id(), s_spdid, (void*)stk_item->d_addr, 1); DOUT("Putting stack back on free list\n"); // cause underflow for MAX Int stk_item->parent_spdid = 0; // Clear our memory to prevent leakage memset(stk_item->hptr, 0, PAGE_SIZE); DOUT("Removing from local list\n"); // remove from s_spdid's stk_list; REM_LIST(stk_item, next, prev); ssi->num_allocated--; assert(ssi->num_allocated == stkmgr_num_alloc_stks(s_spdid)); return 0; }
int __sg_mman_revoke_page(spdid_t spd, vaddr_t addr, int flags) { int ret = 0; if (flags == 1) ret = __mman_revoke_page(spd, addr, flags); else ret = mman_revoke_page(spd, addr, flags); return ret; }
static void revoke_test() { int i; vaddr_t addr = 0; printc("\n<<< REVOKE TEST BEGIN! >>>\n"); #ifdef TEN2TEN /* 10 to 10 */ for (i = 0; i<PAGE_NUM; i++) { addr = s_addr[i]; /* printc("s_addr %p\n", addr); */ /* rdtscll(start); */ mman_revoke_page(cos_spd_id(), addr, 0); /* rdtscll(end); */ /* printc("COST (mman_revoke_page) %llu\n", end - start); */ } #else /* 1 to 10 */ addr = s_addr[0]; /* printc("s_addr %p\n", addr); */ mman_revoke_page(cos_spd_id(), addr, 0); #endif printc("<<< REVOKE TEST END! >>>\n\n"); return; }
/* * As clients maybe malicious or don't use protocol correctly, we cannot * simply unmap memory here. We guarantee that fault can only happen within * the malicious component, but for other components, they either receive a * NULL pointer from cbuf2buf or see wrong data. No fault happen in other * components. See details in cbuf_unmap_prepare */ static int cbuf_free_unmap(struct cbuf_comp_info *cci, struct cbuf_info *cbi) { struct cbuf_maps *m = &cbi->owner, *next; struct cbuf_bin *bin; void *ptr = cbi->mem; unsigned long off, size = cbi->size; if (cbuf_unmap_prepare(cbi)) return 1; /* Unmap all of the pages from the clients */ for (off = 0 ; off < size ; off += PAGE_SIZE) { mman_revoke_page(cos_spd_id(), (vaddr_t)ptr + off, 0); } /* * Deallocate the virtual address in the client, and cleanup * the memory in this component */ m = FIRST_LIST(&cbi->owner, next, prev); while (m != &cbi->owner) { next = FIRST_LIST(m, next, prev); REM_LIST(m, next, prev); valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, size/PAGE_SIZE); free(m); m = next; } valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, size/PAGE_SIZE); /* deallocate/unlink our data-structures */ page_free(ptr, size/PAGE_SIZE); cmap_del(&cbufs, cbi->cbid); cci->allocated_size -= size; bin = cbuf_comp_info_bin_get(cci, size); if (EMPTY_LIST(cbi, next, prev)) { bin->c = NULL; } else { if (bin->c == cbi) bin->c = cbi->next; REM_LIST(cbi, next, prev); } free(cbi); return 0; }
static int cbuf_map(spdid_t spdid, vaddr_t daddr, void *page, unsigned long size, int flags) { unsigned long off; assert(size == round_to_page(size)); assert(daddr); assert(page); for (off = 0 ; off < size ; off += PAGE_SIZE) { vaddr_t d = daddr + off; if (unlikely(d != (mman_alias_page(cos_spd_id(), ((vaddr_t)page) + off, spdid, d, flags)))) { for (d = daddr + off - PAGE_SIZE ; d >= daddr ; d -= PAGE_SIZE) { mman_revoke_page(spdid, d, 0); } return -ENOMEM; } } return 0; }
static void all_in_one() { int i; for (i = 0; i<PAGE_NUM; i++) { s_addr[i] = (vaddr_t)cos_get_vas_page(); d_addr[i] = mm_test2(); } for (i = 0; i<PAGE_NUM; i++) { /* rdtscll(start); */ mman_get_page(cos_spd_id(), s_addr[i], 0); mman_alias_page(cos_spd_id(), s_addr[i], cos_spd_id()+1, d_addr[i]); mman_revoke_page(cos_spd_id(), s_addr[i], 0); /* rdtscll(end); */ /* printc("grant-alias-revoke cost %llu\n", end - start); */ /* mman_release_page(cos_spd_id(), s_addr[i], 0); */ } return; }
static void cbufp_free_unmap(spdid_t spdid, struct cbufp_info *cbi) { struct cbufp_maps *m = &cbi->owner; void *ptr = cbi->mem; int size; if (cbufp_referenced(cbi)) return; do { struct cbufp_maps *next; next = FIRST_LIST(m, next, prev); REM_LIST(m, next, prev); valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, cbi->size/PAGE_SIZE); m = next; } while (m != &cbi->owner); /* TODO: iterate through the size, and free all... */ mman_revoke_page(cos_spd_id(), (vaddr_t)ptr, 0); //free_page(ptr); }
/* * FIXME: change interface to include the component making the call to * make sure that it owns the page it is trying to unmap (and the one * it is unmapping is a descendent. */ void mman_release_page(spdid_t spd, vaddr_t addr, int flags) { int alias; long idx; struct mem_cell *mc; struct mapping_info *mi; mman_revoke_page(spd, addr, flags); mc = find_cell(spd, addr, &alias); if (!mc) { /* FIXME: add return codes to this call */ return; } mi = mc->map; idx = cos_mmap_cntl(COS_MMAP_REVOKE, 0, mi[alias].owner_spd, mi[alias].addr, 0); assert(&cells[idx] == mc); mi[alias].addr = 0; mi[alias].owner_spd = 0; mi[alias].parent = 0; mc->naliases--; return; }
static int shared_page_setup(int thd_id) { // thd_id is the upcall thread on the server side. struct srv_thd_info *thd; int cspd, sspd; vaddr_t ring_mgr, ring_cli, ring_srv; assert(thd_id); thd = &srv_thd_info[thd_id]; cspd = thd->cli_spd_id; sspd = thd->srv_spd_id; if (!cspd || !sspd) goto err; ring_mgr = (vaddr_t)alloc_page(); if (!ring_mgr) { printc("par_mgr: alloc ring buffer failed in mgr %ld.\n", cos_spd_id()); goto err; } srv_thd_info[thd_id].mgr_ring = ring_mgr; ring_cli = (vaddr_t)valloc_alloc(cos_spd_id(), cspd, 1); if (unlikely(!ring_cli)) { printc("par_mgr: vaddr alloc failed in client comp %d.\n", cspd); goto err_cli; } if (unlikely(ring_cli != mman_alias_page(cos_spd_id(), ring_mgr, cspd, ring_cli, MAPPING_RW))) { printc("par_mgr: alias to client %d failed.\n", cspd); goto err_cli_alias; } comp_info[cspd].cli_thd_info[cos_get_thd_id()]->cap_info[thd->cli_cap_id].cap_ring = ring_cli; ring_srv = (vaddr_t)valloc_alloc(cos_spd_id(), sspd, 1); if (unlikely(!ring_srv)) { goto err_srv; printc("par_mgr: vaddr alloc failed in server comp %d.\n", sspd); } if (unlikely(ring_srv != mman_alias_page(cos_spd_id(), ring_mgr, sspd, ring_srv, MAPPING_RW))) { printc("par_mgr: alias to server %d failed.\n", sspd); goto err_srv_alias; } srv_thd_info[thd_id].srv_ring = ring_srv; /* Initialize the ring buffer. Passing NULL because we use * continuous ring (struct + data region). The ring starts * from the second cache line of the page. (First cache line * is used for the server thread active flag) */ CK_RING_INIT(inv_ring, (CK_RING_INSTANCE(inv_ring) *)((void *)ring_mgr + CACHE_LINE), NULL, leqpow2((PAGE_SIZE - CACHE_LINE - sizeof(CK_RING_INSTANCE(inv_ring))) / 2 / sizeof(struct inv_data))); return 0; err_srv_alias: valloc_free(cos_spd_id(), sspd, (void *)ring_srv, 1); err_srv: mman_revoke_page(cos_spd_id(), ring_mgr, 0); err_cli_alias: valloc_free(cos_spd_id(), cspd, (void *)ring_cli, 1); err_cli: free_page((void *)ring_mgr); err: return -1; }