Example #1
0
void *
kernel_realloc(void *cp, size_t nbytes)
{
	size_t cur_space;	/* Space in the current bucket */
	size_t smaller_space;	/* Space in the next smaller bucket */
	union overhead *op;
	char *res;

	if (cp == NULL)
		return (kernel_malloc(nbytes));
	op = find_overhead(cp);
	if (op == NULL)
		return (NULL);
	cur_space = (1 << (op->ov_index + 3)) - sizeof(*op);

	/* avoid the copy if same size block */
	/*
	 * XXX-BD: Arguably we should be tracking the actual allocation
	 * not just the bucket size so that we can do a full malloc+memcpy
	 * when the caller has restricted the length of the pointer passed
	 * realloc() but is growing the buffer within the current bucket.
	 *
	 * As it is, this code contains a leak where realloc recovers access
	 * to the contents in foo:
	 * char *foo = malloc(10);
	 * strcpy(foo, "abcdefghi");
	 * cheri_setbouds(foo, 5);
	 * foo = realloc(foo, 10);
	 */
	smaller_space = (1 << (op->ov_index + 2)) - sizeof(*op);
	if (nbytes <= cur_space && nbytes > smaller_space)
		return (cheri_andperm(cheri_setbounds(op + 1, nbytes),
		    cheri_getperm(cp)));

	if ((res = kernel_malloc(nbytes)) == NULL)
		return (NULL);
	/*
	 * Only copy data the caller had access to even if this is less
	 * than the size of the original allocation.  This risks surprise
	 * for some programmers, but to do otherwise risks information leaks.
	 */
	memcpy(res, cp, (nbytes <= cheri_getlen(cp)) ? nbytes : cheri_getlen(cp));
	res = cheri_andperm(res, cheri_getperm(cp));
	kernel_free(cp);
	return (res);
}
Example #2
0
int
sandbox_object_load(struct sandbox_class *sbcp, struct sandbox_object *sbop)
{
	__capability void *basecap, *sbcap;
	struct sandbox_metadata *sbm;
	size_t length;
	int saved_errno;
	uint8_t *base;

	/*
	 * Perform an initial reservation of space for the sandbox, but using
	 * anonymous memory that is neither readable nor writable.  This
	 * ensures there is space for all the various segments we will be
	 * installing later.
	 *
	 * The rough sandbox memory map is as follows:
	 *
	 * K + 0x1000 [stack]
	 * K          [guard page]
	 * J + 0x1000 [heap]
	 * J          [guard page]
	 * 0x8000     [memory mapped binary] (SANDBOX_ENTRY)
	 * 0x2000     [guard page]
	 * 0x1000     [read-only sandbox metadata page]
	 * 0x0000     [guard page]
	 *
	 * Address constants in sandbox.h must be synchronised with the layout
	 * implemented here.  Location and contents of sandbox metadata is
	 * part of the ABI.
	 */
	length = sbcp->sbc_sandboxlen;
	base = sbop->sbo_mem = mmap(NULL, length, 0, MAP_ANON, -1, 0);
	if (sbop->sbo_mem == MAP_FAILED) {
		saved_errno = errno;
		warn("%s: mmap region", __func__);
		goto error;
	}

	/*
	 * Skip guard page(s) to the base of the metadata structure.
	 */
	base += SANDBOX_METADATA_BASE;
	length -= SANDBOX_METADATA_BASE;

	/*
	 * Map metadata structure -- but can't fill it out until we have
	 * calculated all the other addresses involved.
	 */
	if ((sbm = mmap(base, METADATA_SIZE, PROT_READ | PROT_WRITE,
	    MAP_ANON | MAP_FIXED, -1, 0)) == MAP_FAILED) {
		saved_errno = errno;
		warn("%s: mmap metadata", __func__);
		goto error;
	}

	/*
	 * Skip forward to the mapping location for the binary -- in case we
	 * add more metadata in the future.  Assert that we didn't bump into
	 * the sandbox entry address.  This address is hard to change as it is
	 * the address used in static linking for sandboxed code.
	 */
	assert((register_t)base - (register_t)sbop->sbo_mem < SANDBOX_ENTRY);
	base = (void *)((register_t)sbop->sbo_mem + SANDBOX_ENTRY);
	length = sbcp->sbc_sandboxlen - SANDBOX_ENTRY;

	/*
	 * Map program binary.
	 */
	if (mmap(base, sbcp->sbc_stat.st_size, PROT_READ | PROT_WRITE,
	    MAP_PRIVATE | MAP_FIXED, sbcp->sbc_fd, 0) == MAP_FAILED) {
		saved_errno = errno;
		warn("%s: mmap %s", __func__, sbcp->sbc_path);
		goto error;
	}
	base += roundup2(sbcp->sbc_stat.st_size, PAGE_SIZE);
	length += roundup2(sbcp->sbc_stat.st_size, PAGE_SIZE);

	/*
	 * Skip guard page.
	 */
	base += GUARD_PAGE_SIZE;
	length -= GUARD_PAGE_SIZE;

	/*
	 * Heap.
	 */
	sbop->sbo_heapbase = (register_t)base - (register_t)sbop->sbo_mem;
	sbop->sbo_heaplen = length - (GUARD_PAGE_SIZE + STACK_SIZE);
	if (mmap(base, sbop->sbo_heaplen, PROT_READ | PROT_WRITE,
	    MAP_ANON | MAP_FIXED, -1, 0) == MAP_FAILED) {
		saved_errno = errno;
		warn("%s: mmap heap", __func__);
		goto error;
	}
	memset(base, 0, sbop->sbo_heaplen);
	base += sbop->sbo_heaplen;
	length -= sbop->sbo_heaplen;

	/*
	 * Skip guard page.
	 */
	base += GUARD_PAGE_SIZE;
	length -= GUARD_PAGE_SIZE;

	/*
	 * Stack.
	 */
	if (mmap(base, length, PROT_READ | PROT_WRITE, MAP_ANON | MAP_FIXED,
	    -1, 0) == MAP_FAILED) {
		saved_errno = errno;
		warn("%s: mmap stack", __func__);
		goto error;
	}
	memset(base, 0, length);
	base += STACK_SIZE;
	length -= STACK_SIZE;

	/*
	 * There should not be too much, nor too little space remaining.  0
	 * is our Goldilocks number.
	 */
	assert(length == 0);

	/*
	 * Now that addresses are known, write out metadata for in-sandbox
	 * use; then mprotect() so that it can't be modified by the sandbox.
	 */
	sbm->sbm_heapbase = sbop->sbo_heapbase;
	sbm->sbm_heaplen = sbop->sbo_heaplen;
	if (mprotect(base, METADATA_SIZE, PROT_READ) < 0) {
		saved_errno = errno;
		warn("%s: mprotect metadata", __func__);
		goto error;
	}

	if (sbcp->sbc_sandbox_class_statp != NULL) {
		(void)sandbox_stat_object_register(
		    &sbop->sbo_sandbox_object_statp,
		    sbcp->sbc_sandbox_class_statp,
		    SANDBOX_OBJECT_TYPE_POINTER, (uintptr_t)sbop->sbo_mem);
		SANDBOX_CLASS_ALLOC(sbcp->sbc_sandbox_class_statp);
	}

	/*
	 * Construct a generic capability that describes the combined
	 * data/code segment that we will seal.
	 */
	basecap = cheri_ptrtype(sbop->sbo_mem, sbcp->sbc_sandboxlen,
	    SANDBOX_ENTRY);

	/* Construct sealed code capability. */
	sbcap = cheri_andperm(basecap, CHERI_PERM_EXECUTE | CHERI_PERM_LOAD |
	    CHERI_PERM_SEAL);
	sbop->sbo_cheri_object.co_codecap =
	    cheri_sealcode(sbcap);

	/* Construct sealed data capability. */
	sbcap = cheri_andperm(basecap, CHERI_PERM_LOAD | CHERI_PERM_STORE |
	    CHERI_PERM_LOAD_CAP | CHERI_PERM_STORE_CAP |
	    CHERI_PERM_STORE_EPHEM_CAP);
	sbop->sbo_cheri_object.co_datacap = cheri_sealdata(sbcap, basecap);

	/*
	 * Construct an object capability for the system class instance that
	 * will be passed into the sandbox.  Its code capability is just our
	 * $c0; the data capability is to the sandbox structure itself, which
	 * allows the system class to identify which sandbox a request is
	 * being issued from.
	 *
	 * Note that $c0 in the 'sandbox' will be set from $pcc, so leave a
	 * full set of write/etc permissions on the code capability.
	 */
	basecap = cheri_settype(cheri_getdefault(),
	    (register_t)CHERI_CLASS_ENTRY(libcheri_system));
	sbop->sbo_cheri_system_object.co_codecap = cheri_sealcode(basecap);

	sbcap = cheri_ptr(sbop, sizeof(*sbop));
	sbcap = cheri_andperm(sbcap,
	    CHERI_PERM_LOAD | CHERI_PERM_STORE | CHERI_PERM_LOAD_CAP |
	    CHERI_PERM_STORE_CAP | CHERI_PERM_STORE_EPHEM_CAP);
	sbop->sbo_cheri_system_object.co_datacap = cheri_sealdata(sbcap,
	    basecap);
	return (0);

error:
	if (sbop->sbo_mem != NULL)
		munmap(sbop->sbo_mem, sbcp->sbc_sandboxlen);
	errno = saved_errno;
	return (-1);
}
Example #3
0
static kernel_if_t* get_if() {
	return (kernel_if_t*) cheri_andperm(&internel_if, CHERI_PERM_LOAD | CHERI_PERM_LOAD_CAP);
}