Example #1
0
static int
invoke_cap_fault(register_t op)
{
	char buffer[N], ch;
	__capability char *cap;

	cap = cheri_ptrperm(buffer, sizeof(buffer), CHERI_PERM_LOAD);
	switch (op) {
	case CHERITEST_HELPER_OP_CP2_BOUND:
		ch = cap[N];
		return (ch);

	case CHERITEST_HELPER_OP_CP2_PERM:
		cap[0] = 0;
		break;

	case CHERITEST_HELPER_OP_CP2_TAG:
		cap = cheri_zerocap();
		ch = cap[0];
		return (ch);

	case CHERITEST_HELPER_OP_CP2_SEAL:
		cap = cheri_sealcode(cap);
		ch = cap[0];
		return (ch);
	}
	return (0);
}
Example #2
0
int
sandbox_object_reset(struct sandbox_object *sbop)
{
	struct sandbox_class *sbcp;

	assert(sbop != NULL);
	sbcp = sbop->sbo_sandbox_classp;
	assert(sbcp != NULL);

	/*
	 * Reset loader-managed address space.
	 */
	if (sandbox_object_reload(sbop) == -1) {
		warn("%s:, sandbox_object_reload", __func__);
		return (-1);
	}

	/*
	 * Reset external stack.
	 */
	if (mmap(sbop->sbo_stackmem, sbop->sbo_stacklen,
	    PROT_READ | PROT_WRITE, MAP_ANON | MAP_FIXED, -1, 0) ==
	    MAP_FAILED) {
		warn("%s: stack reset", __func__);
		return (-1);
	}

	/*
	 * (Re-)invoke object instance's constructors.  Note that, given the
	 * tight binding of class and object in the sandbox library currently,
	 * this will need to change in the future.  We also need to think more
	 * carefully about the mechanism here.
	 */
	(void)cheri_invoke(sbop->sbo_cheri_object_rtld,
	    SANDBOX_RUNTIME_CONSTRUCTORS,
	    0, 0, 0, 0, 0, 0, 0, 0,
	    cheri_zerocap(), cheri_zerocap(), cheri_zerocap(),
	    cheri_zerocap(), cheri_zerocap(), cheri_zerocap(),
	    cheri_zerocap(), cheri_zerocap());

	return (0);
}
Example #3
0
/*
 * This version of invoke() is intended for callers not implementing CHERI
 * compiler support -- but internally, it can be implemented either way.
 *
 * XXXRW: Zeroing the capability pointer will clear the tag, but it seems a
 * bit ugly.  It would be nice to have a pretty way to do this.  Note that C
 * NULL != an untagged capability pointer, and we would benefit from having a
 * canonical 'NULL' for the capability space (connoting no rights).
 */
register_t
sandbox_object_invoke(struct sandbox_object *sbop, register_t methodnum,
    register_t a1, register_t a2, register_t a3,
    register_t a4, register_t a5, register_t a6, register_t a7,
    struct chericap *c3p, struct chericap *c4p, struct chericap *c5p,
    struct chericap *c6p, struct chericap *c7p, struct chericap *c8p,
    struct chericap *c9p, struct chericap *c10p)
{
	struct sandbox_class *sbcp;
	__capability void *c3, *c4, *c5, *c6, *c7, *c8, *c9, *c10;
	__capability void *cclear;
	register_t v0;

	sbcp = sbop->sbo_sandbox_classp;
	if (methodnum < SANDBOX_CLASS_METHOD_COUNT)
		SANDBOX_METHOD_INVOKE(sbcp->sbc_sandbox_methods[methodnum]);
	else
		SANDBOX_METHOD_INVOKE(sbcp->sbc_sandbox_method_nonamep);
	SANDBOX_OBJECT_INVOKE(sbop->sbo_sandbox_object_statp);
	cclear = cheri_zerocap();
	c3 = (c3p != NULL ? *(__capability void **)c3p : cclear);
	c4 = (c4p != NULL ? *(__capability void **)c4p : cclear);
	c5 = (c5p != NULL ? *(__capability void **)c5p : cclear);
	c6 = (c6p != NULL ? *(__capability void **)c6p : cclear);
	c7 = (c7p != NULL ? *(__capability void **)c7p : cclear);
	c8 = (c8p != NULL ? *(__capability void **)c8p : cclear);
	c9 = (c9p != NULL ? *(__capability void **)c9p : cclear);
	c10 = (c10p != NULL ? (__capability void *)c10p : cclear);

	v0 = sandbox_object_cinvoke(sbop,
	    methodnum,
	    a1, a2, a3, a4, a5, a6, a7,
	    c3, c4, c5, c6, c7, c8, c9, c10);
	if (v0 < 0) {
		if (methodnum < SANDBOX_CLASS_METHOD_COUNT)
			SANDBOX_METHOD_FAULT(
			    sbcp->sbc_sandbox_methods[methodnum]);
		else
			SANDBOX_METHOD_FAULT(
			    sbcp->sbc_sandbox_method_nonamep);
		SANDBOX_OBJECT_FAULT(sbop->sbo_sandbox_object_statp);
	}
	return (v0);
}
	csfp = &cs.cs_frames[stack_depth - 2];
	if ((cheri_getbase(csfp->csf_pcc) != cheri_getbase(
	    sandbox_object_getobject(cheritest_objectp).co_codecap)) ||
	    cheri_getlen(csfp->csf_pcc) != cheri_getlen(
	    sandbox_object_getobject(cheritest_objectp).co_codecap))
		cheritest_failure_errx("frame 1: not sandbox code cap");
	return (0);
}

void
test_sandbox_getstack(const struct cheri_test *ctp __unused)
{
	__capability void *cclear;
	register_t v;

	cclear = cheri_zerocap();
	v = invoke_libcheri_userfn(CHERITEST_USERFN_GETSTACK, 0);
	if (v != 0)
		cheritest_failure_errx("Incorrect return value 0x%ld"
		    " (expected 0)\n", v);
	cheritest_success();
}

#define	CHERITEST_SETSTACK_CONSTANT	37568

register_t
cheritest_libcheri_userfn_setstack(register_t arg)
{
	struct cheri_stack cs;
	struct cheri_stack_frame *csfp;
	u_int stack_depth;
Example #5
0
/*
 * XXXRW: I'm not really happy with this approach of limiting access to system
 * resources via flags passed here.  We should use a more general security
 * model based on capability permissions.  However, this does allow us to more
 * generally get up and running.
 * XXXBD: I broke the flags when switching system functions to cheri_ccallee.
 */
int
sandbox_object_new_flags(struct sandbox_class *sbcp, size_t heaplen,
    uint flags, struct sandbox_object **sbopp)
{
	struct sandbox_object *sbop;
	int error;

	if (sandbox_program_sanity_check() < 0)
		errx(1, "%s: sandbox_program_sanity_check", __func__);

	sbop = calloc(1, sizeof(*sbop));
	if (sbop == NULL)
		return (-1);
	CHERI_SYSTEM_OBJECT_INIT(sbop, cheri_system_vtable);
	sbop->sbo_sandbox_classp = sbcp;
	sbop->sbo_flags = flags;
	sbop->sbo_heaplen = heaplen;

	/*
	 * XXXRW: In due course, stack size should be a parameter rather than
	 * a constant.
	 */
	sbop->sbo_stacklen = SANDBOX_STACK_SIZE;
	sbop->sbo_stackmem = mmap(0, sbop->sbo_stacklen,
	    PROT_READ | PROT_WRITE, MAP_ANON, -1, 0);
	if (sbop->sbo_stackmem == NULL) {
		free(sbop);
		return (-1);
	}

	/*
	 * Configure the object's stack before loading so that the stack
	 * capability can be installed into sandbox metadata.  Note that the
	 * capability is local (can't be shared) and can store local pointers
	 * (i.e., further stack-derived capabilities such as return
	 * addresses).
	 */
	sbop->sbo_stackcap = cheri_local(cheri_ptrperm(sbop->sbo_stackmem,
	    sbop->sbo_stacklen, CHERI_PERM_LOAD | CHERI_PERM_LOAD_CAP |
	    CHERI_PERM_STORE | CHERI_PERM_STORE_CAP |
	    CHERI_PERM_STORE_LOCAL_CAP));

	/*
	 * Set up the sandbox's code/data segments, sealed capabilities.
	 */
	error = sandbox_object_load(sbcp, sbop);
	if (error) {
		(void)munmap(sbop->sbo_stackmem, sbop->sbo_stacklen);
		free(sbop);
		return (-1);
	}

	/*
	 * Invoke object instance's constructors.  Note that, given the tight
	 * binding of class and object in the sandbox library currently, this
	 * will need to change in the future.  We also need to think more
	 * carefully about the mechanism here.
	 */
	if (cheri_invoke(sbop->sbo_cheri_object_rtld,
	    SANDBOX_RUNTIME_CONSTRUCTORS,
	    0, 0, 0, 0, 0, 0, 0, 0,
	    cheri_zerocap(), cheri_zerocap(), cheri_zerocap(),
	    cheri_zerocap(), cheri_zerocap(), cheri_zerocap(),
	    cheri_zerocap(), cheri_zerocap()) != 0) {
		sandbox_object_unload(sbop);
		(void)munmap(sbop->sbo_stackmem, sbop->sbo_stacklen);
		return (-1);
	}

	/*
	 * Now that constructors have completed, return object.
	 */
	*sbopp = sbop;
	return (0);
}