/* * As clients maybe malicious or don't use protocol correctly, we cannot * simply unmap memory here. We guarantee that fault can only happen within * the malicious component, but for other components, they either receive a * NULL pointer from cbuf2buf or see wrong data. No fault happen in other * components. See details in cbuf_unmap_prepare */ static int cbuf_free_unmap(struct cbuf_comp_info *cci, struct cbuf_info *cbi) { struct cbuf_maps *m = &cbi->owner, *next; struct cbuf_bin *bin; void *ptr = cbi->mem; unsigned long off, size = cbi->size; if (cbuf_unmap_prepare(cbi)) return 1; /* Unmap all of the pages from the clients */ for (off = 0 ; off < size ; off += PAGE_SIZE) { mman_revoke_page(cos_spd_id(), (vaddr_t)ptr + off, 0); } /* * Deallocate the virtual address in the client, and cleanup * the memory in this component */ m = FIRST_LIST(&cbi->owner, next, prev); while (m != &cbi->owner) { next = FIRST_LIST(m, next, prev); REM_LIST(m, next, prev); valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, size/PAGE_SIZE); free(m); m = next; } valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, size/PAGE_SIZE); /* deallocate/unlink our data-structures */ page_free(ptr, size/PAGE_SIZE); cmap_del(&cbufs, cbi->cbid); cci->allocated_size -= size; bin = cbuf_comp_info_bin_get(cci, size); if (EMPTY_LIST(cbi, next, prev)) { bin->c = NULL; } else { if (bin->c == cbi) bin->c = cbi->next; REM_LIST(cbi, next, prev); } free(cbi); return 0; }
int cbufp_create(spdid_t spdid, int size, long cbid) { struct cbufp_comp_info *cci; struct cbufp_info *cbi; struct cbuf_meta *meta; int ret = 0; if (unlikely(cbid < 0)) return 0; CBUFP_TAKE(); cci = cbufp_comp_info_get(spdid); if (!cci) goto done; /* * Client wants to allocate a new cbuf, but the meta might not * be mapped in. */ if (!cbid) { cbi = malloc(sizeof(struct cbufp_info)); if (!cbi) goto done; /* Allocate and map in the cbuf. */ cbid = cmap_add(&cbufs, cbi); cbi->cbid = cbid; cbi->size = size; cbi->owner.m = NULL; cbi->owner.spdid = spdid; INIT_LIST(&cbi->owner, next, prev); INIT_LIST(cbi, next, prev); if (cbufp_alloc_map(spdid, &(cbi->owner.addr), (void**)&(cbi->mem), size)) goto free; if (cci->cbufs.c) ADD_LIST(cci->cbufs.c, cbi, next, prev); else cci->cbufs.c = cbi; } /* If the client has a cbid, then make sure we agree! */ else { cbi = cmap_lookup(&cbufs, cbid); if (!cbi) goto done; if (cbi->owner.spdid != spdid) goto done; } meta = cbufp_meta_lookup(cci, cbid); /* We need to map in the meta for this cbid. Tell the client. */ if (!meta) { ret = cbid * -1; goto done; } cbi->owner.m = meta; /* * Now we know we have a cbid, a backing structure for it, a * component structure, and the meta mapped in for the cbuf. * Update the meta with the correct addresses and flags! */ memset(meta, 0, sizeof(struct cbuf_meta)); meta->nfo.c.flags |= CBUFM_IN_USE | CBUFM_TOUCHED | CBUFM_OWNER | CBUFM_WRITABLE; meta->nfo.c.ptr = cbi->owner.addr >> PAGE_ORDER; ret = cbid; done: CBUFP_RELEASE(); return ret; free: cmap_del(&cbufs, cbid); free(cbi); goto done; }
int cbuf_create(spdid_t spdid, unsigned long size, int cbid) { struct cbuf_comp_info *cci; struct cbuf_info *cbi; struct cbuf_meta *meta; struct cbuf_bin *bin; int ret = 0; unsigned int id = (unsigned int)cbid; printl("cbuf_create\n"); if (unlikely(cbid < 0)) return 0; CBUF_TAKE(); tracking_start(NULL, CBUF_CRT); cci = cbuf_comp_info_get(spdid); if (unlikely(!cci)) goto done; /* * Client wants to allocate a new cbuf, but the meta might not * be mapped in. */ if (!cbid) { /* TODO: check if have enough free memory: ask mem manager */ /*memory usage exceeds the target, block this thread*/ if (size + cci->allocated_size > cci->target_size) { cbuf_shrink(cci, size); if (size + cci->allocated_size > cci->target_size) { cbuf_thread_block(cci, size); return 0; } } cbi = malloc(sizeof(struct cbuf_info)); if (unlikely(!cbi)) goto done; /* Allocate and map in the cbuf. Discard inconsistent cbufs */ /* TODO: Find a better way to manage those inconsistent cbufs */ do { id = cmap_add(&cbufs, cbi); meta = cbuf_meta_lookup(cci, id); } while(meta && CBUF_INCONSISENT(meta)); cbi->cbid = id; size = round_up_to_page(size); cbi->size = size; cbi->owner.m = NULL; cbi->owner.spdid = spdid; INIT_LIST(&cbi->owner, next, prev); INIT_LIST(cbi, next, prev); if (cbuf_alloc_map(spdid, &(cbi->owner.addr), (void**)&(cbi->mem), NULL, size, MAPPING_RW)) { goto free; } } /* If the client has a cbid, then make sure we agree! */ else { cbi = cmap_lookup(&cbufs, id); if (unlikely(!cbi)) goto done; if (unlikely(cbi->owner.spdid != spdid)) goto done; } meta = cbuf_meta_lookup(cci, id); /* We need to map in the meta for this cbid. Tell the client. */ if (!meta) { ret = (int)id * -1; goto done; } /* * Now we know we have a cbid, a backing structure for it, a * component structure, and the meta mapped in for the cbuf. * Update the meta with the correct addresses and flags! */ memset(meta, 0, sizeof(struct cbuf_meta)); meta->sz = cbi->size >> PAGE_ORDER; meta->cbid_tag.cbid = id; CBUF_FLAG_ADD(meta, CBUF_OWNER); CBUF_PTR_SET(meta, cbi->owner.addr); CBUF_REFCNT_INC(meta); /* * When creates a new cbuf, the manager should be the only * one who can access the meta */ /* TODO: malicious client may trigger this assertion, just for debug */ assert(CBUF_REFCNT(meta) == 1); assert(CBUF_PTR(meta)); cbi->owner.m = meta; /* * Install cbi last. If not, after return a negative cbid, * collection may happen and get a dangle cbi */ bin = cbuf_comp_info_bin_get(cci, size); if (!bin) bin = cbuf_comp_info_bin_add(cci, size); if (unlikely(!bin)) goto free; if (bin->c) ADD_LIST(bin->c, cbi, next, prev); else bin->c = cbi; cci->allocated_size += size; ret = (int)id; done: tracking_end(NULL, CBUF_CRT); CBUF_RELEASE(); return ret; free: cmap_del(&cbufs, id); free(cbi); goto done; }