Esempio n. 1
0
/*
 * Forward write_c() on a cheri_fd to the underlying file descriptor.
 */
struct cheri_fd_ret
cheri_fd_write(__capability const void *buf_c, size_t nbytes)
{
	struct cheri_fd_ret ret;
	__capability struct cheri_fd *cfp;
	void *buf;

	/* XXXRW: Object-capability user permission check on idc. */

	/* XXXRW: Change to check permissions directly and throw exception. */
	if (!(cheri_getperm(buf_c) & CHERI_PERM_LOAD)) {
		ret.cfr_retval0 = -1;
		ret.cfr_retval1 = EPROT;
		return (ret);
	}
	buf = (void *)buf_c;

	/* Check that cheri_fd hasn't been revoked. */
	cfp = cheri_getidc();
	if (cfp->cf_fd == -1) {
		ret.cfr_retval0 = -1;
		ret.cfr_retval1 = EBADF;
		return (ret);
	}

	/* Forward to operating system. */
	ret.cfr_retval0 = write(cfp->cf_fd, buf,
	    min(nbytes, cheri_getlen(buf_c) - cheri_getoffset(buf_c)));
	ret.cfr_retval1 = (ret.cfr_retval0 < 0 ? errno : 0);
	return (ret);
}
Esempio n. 2
0
static void
sb_read_fn(png_structp png_ptr, png_bytep data, png_size_t length)
{
	void *io_ptr = png_get_io_ptr(png_ptr);

#if 0
	printf("in sb_read_fn, data base 0x%jx offset 0x%jx length 0x%zx (min len 0x%zx)\n",
	    cheri_getbase(data), cheri_getoffset(data),
	    cheri_getlen(data), length);
#endif

	libpng_sb_read_callback(io_ptr, cheri_setlen(data, length), length);
}
Esempio n. 3
0
/*
 * Check for high-precision bounds for a variety of small object sizes,
 * allocated from the stack.  These should be precise regardless of capability
 * compression, as the allocator promises to align things suitably.  Test both
 * static and dynamic allocation.
 */
static void
test_bounds_precise(__capability void *c, size_t expected_len)
{
    size_t len, offset;

    /* Confirm precise lower bound: offset of zero. */
    offset = cheri_getoffset(c);
    if (offset != 0)
        cheritest_failure_errx("offset (%jd) not zero", offset);

    /* Confirm precise upper bound: length of expected size for type. */
    len = cheri_getlen(c);
    if (len != expected_len)
        cheritest_failure_errx("length (%jd) not expected %jd", len,
                               expected_len);
    cheritest_success();
}
Esempio n. 4
0
void bootloader_main(void) {

	/* Init hardware */
	hw_init();

	/* Initialize elf-loader environment */
	init_elf_loader();

    /* Load the nano kernel. Doing this will install exception vectors */
    boot_printf("Boot: loading nano kernel ...\n");
	nano_init_t * nano_init = (nano_init_t *)load_nano(); //We have to rederive this as an executable cap
    nano_init = (nano_init_t*)cheri_setoffset(cheri_getpcc(),cheri_getoffset(nano_init));

    /* TODO: we could have some boot exception vectors if we want exception  handling in boot. */
    /* These should be in ROM as a part of the boot image (i.e. make a couple more dedicated sections */
    cp0_status_bev_set(0);

    boot_printf("Boot: loading kernel ...\n");
    size_t entry = load_kernel();

    boot_printf("Boot: loading init ...\n");
    boot_info_t *bi = load_init();

    size_t invalid_length = bi->init_end;
    capability phy_start = cheri_setbounds(cheri_setoffset(cheri_getdefault(), MIPS_KSEG0), invalid_length);

    /* Do we actually need this? */
    //boot_printf("Invalidating %p length %lx:\n", phy_start, invalid_length);
    //caches_invalidate(phy_start, invalid_length);


    register_t mem_size = bi->init_end - bi->nano_end;

    /* Jumps to the nano kernel init. This will completely destroy boot and so we can never return here.
     * All registers will be cleared apart from a specified few. mem_size of memory will be left unmanaged and the
     * rest will be returned as a reservation. The third argument is an extra argument to the kernel */

    boot_printf("Jumping to nano kernel...\n");
    BOOT_PRINT_CAP(nano_init);
    nano_init(mem_size, entry, bi->init_begin - bi->kernel_begin, bi->init_entry);
}
Esempio n. 5
0
/*
 * Unwind the trusted stack by the specified number of frames (or all).
 */
int
cheri_stack_unwind(ucontext_t *uap, register_t ret, u_int op,
    u_int num_frames)
{
	struct cheri_frame *cfp;
	struct cheri_stack cs;
	struct cheri_stack_frame *csfp;
	u_int stack_size, stack_frames;
	register_t saved_mcreg0;

	if (op != CHERI_STACK_UNWIND_OP_N &&
	    op != CHERI_STACK_UNWIND_OP_ALL) {
		errno = EINVAL;
		return (-1);
	}

	/*
	 * Request to unwind zero frames is a no-op: no state transformation
	 * is needed.
	 */
	if ((op == CHERI_STACK_UNWIND_OP_N) && (num_frames == 0))
		return (0);

	/*
	 * Retrieve trusted stack and validate before attempting to unwind.
	 */
	if (sysarch(CHERI_GET_STACK, &cs) != 0)
		return (-1);
	if ((cs.cs_tsize % CHERI_FRAME_SIZE) != 0 ||
	    (cs.cs_tsp > cs.cs_tsize) ||
	    (cs.cs_tsp % CHERI_FRAME_SIZE) != 0) {
		errno = ERANGE;
		return (-1);
	}

	/*
	 * See if there is room on the stack for that much unwinding.
	 */
	stack_size = cs.cs_tsize / CHERI_FRAME_SIZE;
	stack_frames = (cs.cs_tsize - cs.cs_tsp) / CHERI_FRAME_SIZE;
	if (op == CHERI_STACK_UNWIND_OP_ALL)
		num_frames = stack_frames;
	if ((num_frames < 0) || (stack_frames < num_frames)) {
		errno = ERANGE;
		return (-1);
	}

	/*
	 * Restore state from the last frame being unwound.
	 */
	csfp = &cs.cs_frames[stack_size - (stack_frames - num_frames) - 1];
#if 0
	/* Make sure we will be returning to ambient authority. */
	if (cheri_getbase(csfp->csf_pcc) != cheri_getbase(cheri_getpcc()) ||
	    cheri_getlen(csfp->csf_pcc) != cheri_getlen(cheri_getpcc()))
		return (-1);
#endif

	/*
	 * Pop stack desired number of frames.
	 */
	cs.cs_tsp += num_frames * CHERI_FRAME_SIZE;
	assert(cs.cs_tsp <= cs.cs_tsize);

#ifdef __CHERI_PURE_CAPABILITY__
	cfp = &uap->uc_mcontext.mc_cheriframe;
#else
	cfp = (struct cheri_frame *)uap->uc_mcontext.mc_cp2state;
	if (cfp == NULL || uap->uc_mcontext.mc_cp2state_len != sizeof(*cfp)) {
		errno = ERANGE;
		return (-1);
	}
#endif

	/*
	 * Zero the capability register file, explicitly restoring $pcc and
	 * $idc from the last trusted-stack frame.
	 */
	memset(cfp, 0, sizeof(*cfp));
	cfp->cf_idc =  csfp->csf_idc;
	cfp->cf_pcc = csfp->csf_pcc;

	/*
	 * Zero the general-purpose register file.  restore not only $pc, but
	 * also the slot for $zero, which will hold a magic number across
	 * sigcode and sigreturn().  Also set a return value.
	 *
	 * XXXRW: The kernel unwinder sets V1 to the signal number?
	 */
	saved_mcreg0 = uap->uc_mcontext.mc_regs[0];
	memset(uap->uc_mcontext.mc_regs, 0, sizeof(uap->uc_mcontext.mc_regs));
	uap->uc_mcontext.mc_regs[0] = saved_mcreg0;
	uap->uc_mcontext.mc_pc = cheri_getoffset(cfp->cf_pcc);
	uap->uc_mcontext.mc_regs[V0] = ret;

	/*
	 * Update kernel view of trusted stack.
	 */
	if (sysarch(CHERI_SET_STACK, &cs) != 0)
		return (-1);
	return (0);
}