Exemple #1
0
static int
invoke_syscap(struct cheri_object system_object)
{

	return (cheri_invoke(system_object, 0, 0, 0, 0, 0, 0, 0, 0, NULL,
	    NULL, NULL, NULL, NULL, NULL, NULL, NULL));
}
Exemple #2
0
register_t
sandbox_object_cinvoke(struct sandbox_object *sbop, register_t methodnum,
    register_t a1, register_t a2, register_t a3,
    register_t a4, register_t a5, register_t a6, register_t a7,
    __capability void *c3, __capability void *c4, __capability void *c5,
    __capability void *c6, __capability void *c7, __capability void *c8,
    __capability void *c9, __capability void *c10)
{
	struct sandbox_class *sbcp;
	uint64_t sample, start;
	register_t v0;

	/*
	 * XXXRW: TODO:
	 *
	 * 1. What about $v1, capability return values?
	 * 2. Does the right thing happen with $a0..$a7, $c3..$c10?
	 */
	sbcp = sbop->sbo_sandbox_classp;
	if (methodnum < SANDBOX_CLASS_METHOD_COUNT)
		SANDBOX_METHOD_INVOKE(sbcp->sbc_sandbox_methods[methodnum]);
	else
		SANDBOX_METHOD_INVOKE(sbcp->sbc_sandbox_method_nonamep);
	SANDBOX_OBJECT_INVOKE(sbop->sbo_sandbox_object_statp);
	start = cheri_get_cyclecount();
	v0 = cheri_invoke(sbop->sbo_cheri_object_invoke,
	    CHERI_INVOKE_METHOD_LEGACY_INVOKE,
	    methodnum,
	    a1, a2, a3, a4, a5, a6, a7,
	    c3, c4, c5, c6, c7, c8, c9, c10);
	sample = cheri_get_cyclecount() - start;
	if (methodnum < SANDBOX_CLASS_METHOD_COUNT)
		SANDBOX_METHOD_TIME_SAMPLE(
		    sbcp->sbc_sandbox_methods[methodnum], sample);
	else
		SANDBOX_METHOD_TIME_SAMPLE(
		    sbcp->sbc_sandbox_method_nonamep, sample);
	SANDBOX_OBJECT_TIME_SAMPLE(sbop->sbo_sandbox_object_statp, sample);
	if (v0 < 0) {
		if (methodnum < SANDBOX_CLASS_METHOD_COUNT)
			SANDBOX_METHOD_FAULT(
			    sbcp->sbc_sandbox_methods[methodnum]);
		else
			SANDBOX_METHOD_FAULT(
			    sbcp->sbc_sandbox_method_nonamep);
		SANDBOX_OBJECT_FAULT(sbop->sbo_sandbox_object_statp);
	}
	return (v0);
}
Exemple #3
0
int
sandbox_object_reset(struct sandbox_object *sbop)
{
	struct sandbox_class *sbcp;

	assert(sbop != NULL);
	sbcp = sbop->sbo_sandbox_classp;
	assert(sbcp != NULL);

	/*
	 * Reset loader-managed address space.
	 */
	if (sandbox_object_reload(sbop) == -1) {
		warn("%s:, sandbox_object_reload", __func__);
		return (-1);
	}

	/*
	 * Reset external stack.
	 */
	if (mmap(sbop->sbo_stackmem, sbop->sbo_stacklen,
	    PROT_READ | PROT_WRITE, MAP_ANON | MAP_FIXED, -1, 0) ==
	    MAP_FAILED) {
		warn("%s: stack reset", __func__);
		return (-1);
	}

	/*
	 * (Re-)invoke object instance's constructors.  Note that, given the
	 * tight binding of class and object in the sandbox library currently,
	 * this will need to change in the future.  We also need to think more
	 * carefully about the mechanism here.
	 */
	(void)cheri_invoke(sbop->sbo_cheri_object_rtld,
	    SANDBOX_RUNTIME_CONSTRUCTORS,
	    0, 0, 0, 0, 0, 0, 0, 0,
	    cheri_zerocap(), cheri_zerocap(), cheri_zerocap(),
	    cheri_zerocap(), cheri_zerocap(), cheri_zerocap(),
	    cheri_zerocap(), cheri_zerocap());

	return (0);
}
Exemple #4
0
/*
 * XXXRW: I'm not really happy with this approach of limiting access to system
 * resources via flags passed here.  We should use a more general security
 * model based on capability permissions.  However, this does allow us to more
 * generally get up and running.
 * XXXBD: I broke the flags when switching system functions to cheri_ccallee.
 */
int
sandbox_object_new_flags(struct sandbox_class *sbcp, size_t heaplen,
    uint flags, struct sandbox_object **sbopp)
{
	struct sandbox_object *sbop;
	int error;

	if (sandbox_program_sanity_check() < 0)
		errx(1, "%s: sandbox_program_sanity_check", __func__);

	sbop = calloc(1, sizeof(*sbop));
	if (sbop == NULL)
		return (-1);
	CHERI_SYSTEM_OBJECT_INIT(sbop, cheri_system_vtable);
	sbop->sbo_sandbox_classp = sbcp;
	sbop->sbo_flags = flags;
	sbop->sbo_heaplen = heaplen;

	/*
	 * XXXRW: In due course, stack size should be a parameter rather than
	 * a constant.
	 */
	sbop->sbo_stacklen = SANDBOX_STACK_SIZE;
	sbop->sbo_stackmem = mmap(0, sbop->sbo_stacklen,
	    PROT_READ | PROT_WRITE, MAP_ANON, -1, 0);
	if (sbop->sbo_stackmem == NULL) {
		free(sbop);
		return (-1);
	}

	/*
	 * Configure the object's stack before loading so that the stack
	 * capability can be installed into sandbox metadata.  Note that the
	 * capability is local (can't be shared) and can store local pointers
	 * (i.e., further stack-derived capabilities such as return
	 * addresses).
	 */
	sbop->sbo_stackcap = cheri_local(cheri_ptrperm(sbop->sbo_stackmem,
	    sbop->sbo_stacklen, CHERI_PERM_LOAD | CHERI_PERM_LOAD_CAP |
	    CHERI_PERM_STORE | CHERI_PERM_STORE_CAP |
	    CHERI_PERM_STORE_LOCAL_CAP));

	/*
	 * Set up the sandbox's code/data segments, sealed capabilities.
	 */
	error = sandbox_object_load(sbcp, sbop);
	if (error) {
		(void)munmap(sbop->sbo_stackmem, sbop->sbo_stacklen);
		free(sbop);
		return (-1);
	}

	/*
	 * Invoke object instance's constructors.  Note that, given the tight
	 * binding of class and object in the sandbox library currently, this
	 * will need to change in the future.  We also need to think more
	 * carefully about the mechanism here.
	 */
	if (cheri_invoke(sbop->sbo_cheri_object_rtld,
	    SANDBOX_RUNTIME_CONSTRUCTORS,
	    0, 0, 0, 0, 0, 0, 0, 0,
	    cheri_zerocap(), cheri_zerocap(), cheri_zerocap(),
	    cheri_zerocap(), cheri_zerocap(), cheri_zerocap(),
	    cheri_zerocap(), cheri_zerocap()) != 0) {
		sandbox_object_unload(sbop);
		(void)munmap(sbop->sbo_stackmem, sbop->sbo_stacklen);
		return (-1);
	}

	/*
	 * Now that constructors have completed, return object.
	 */
	*sbopp = sbop;
	return (0);
}