Esempio n. 1
0
/*
 * Sandboxed magic_buffer invocation
 * 
 * a0 holds length of the output capabilty, a1 holds the length of the
 * magic data, and a2 holds the length of the input file buffer.  a3
 * indicates if timing data should be collected.
 */
int
invoke(register_t a0, register_t a1, register_t a2, register_t a3)
{
	int ret = 0;
	size_t outsize, magicsize, filesize;
	char *filebuf;
	const char *type, *errstr;
	void *magicbuf;
	magic_t magic;
	int dotimings;
	uint32_t timings[4];

	outsize = a0;
	magicsize = a1;
	filesize = a2;
	dotimings = a3;

	if (dotimings)
		timings[0] = sysarch(MIPS_GET_COUNT, NULL);

	if ((magicbuf = malloc(magicsize)) == NULL)
		return (-1);
	memcpy_fromcap(magicbuf, MINIFILE_MAGIC_CAP, 0, magicsize);
	magic = magic_open(MAGIC_MIME_TYPE);
        if (magic == NULL)
		return (-1);
        if (magic_load_buffers(magic, &magicbuf, &magicsize, 1) == -1) {
                magic_close(magic);
                return (-1);
        }

	if ((filebuf = malloc(filesize)) == NULL)
		return (-1);
	memcpy_fromcap(filebuf, MINIFILE_FILE_CAP, 0, filesize);

	if (dotimings)
		timings[1] = sysarch(MIPS_GET_COUNT, NULL);

        type = magic_buffer(magic, filebuf, filesize);
	if (type == NULL) {
		ret = -1;
		errstr = magic_error(magic);
		type = (errstr == NULL ? "badmagic" : errstr);
	}

	if (dotimings)
		timings[2] = sysarch(MIPS_GET_COUNT, NULL);

	memcpy_tocap(MINIFILE_OUT_CAP, type, 0, MIN(strlen(type) + 1, outsize));

	if (dotimings) {
		timings[3] = sysarch(MIPS_GET_COUNT, NULL);

		memcpy_tocap(MINIFILE_TIMING_CAP, timings, 0,
		    (4 * sizeof(uint32_t)));
	}

	return (ret);
}
Esempio n. 2
0
int
i386_set_ioperm(unsigned int start, unsigned int length, int enable) {
    struct i386_ioperm_args p;
    p.start = start;
    p.length = length;
    p.enable = enable;
    return (sysarch(I386_SET_IOPERM, &p));
}
Esempio n. 3
0
fp_except
fpsetmask(fp_except mask)
{
	struct alpha_fp_except_args a;

	a.mask = mask;
	return sysarch(ALPHA_FPSETMASK, &a);
}
Esempio n. 4
0
fp_except
fpsetsticky(fp_except sticky)
{
	struct alpha_fp_except_args a;

	a.mask = sticky;
	return sysarch(ALPHA_FPSETSTICKY, &a);
}
Esempio n. 5
0
int
x86_64_get_mtrr(struct mtrr *mtrrp, int *n)
{
	struct x86_64_get_mtrr_args a;

	a.mtrrp = mtrrp;
	a.n = n;
	return sysarch(X86_64_GET_MTRR, (void *)&a);
}
Esempio n. 6
0
int
i386_iopl(int iopl)
{
	struct i386_iopl_args p;

	p.iopl = iopl;

	return sysarch(I386_IOPL, &p);
}
Esempio n. 7
0
int
arm_sync_icache(uintptr_t addr, size_t len)
{
	struct arm_sync_icache_args p;

	p.addr = addr;
	p.len = len;

	return sysarch(ARM_SYNC_ICACHE, (void *)&p);
}
Esempio n. 8
0
int
cacheflush(void *addr, int nbytes, int cache)
{
	struct mips64_cacheflush_args args;

	args.va = (vaddr_t)addr;
	args.sz = (size_t)nbytes;
	args.which = cache;
	return sysarch(MIPS64_CACHEFLUSH, (void *)&args);
}
Esempio n. 9
0
void
_set_tp(void *tp)
{

#ifdef ARM_TP_ADDRESS
	*((struct tcb **)ARM_TP_ADDRESS) = tp;
#else
	sysarch(ARM_SET_TP, tp);
#endif
}
Esempio n. 10
0
int
_cacheflush(void *addr, size_t nbytes, int whichcache)
{
	struct mips_cacheflush_args cfa;

	cfa.va = (vaddr_t)(intptr_t)addr;
	cfa.nbytes = nbytes;
	cfa.whichcache = whichcache;
	return sysarch(MIPS_CACHEFLUSH, (void *)&cfa);
}
Esempio n. 11
0
int
amd64_set_fsbase(void *addr)
{

	if (amd64_detect_rdfsgsbase() == RDFSGS_SUPPORTED) {
		wrfsbase((uintptr_t)addr);
		return (0);
	}
	return (sysarch(AMD64_SET_FSBASE, &addr));
}
Esempio n. 12
0
int
x86_pkru_unprotect_range(void *addr, unsigned long len)
{
	struct amd64_set_pkru a64pkru;

	memset(&a64pkru, 0, sizeof(a64pkru));
	a64pkru.addr = addr;
	a64pkru.len = len;
	return (sysarch(X86_CLEAR_PKRU, &a64pkru));
}
Esempio n. 13
0
int
i386_vm86(int fcn, void *data)
{
	struct i386_vm86_args p;

	p.sub_op = fcn;
	p.sub_args = (char *)data;

	return (sysarch(I386_VM86, &p));
}
Esempio n. 14
0
int
i386_set_ldt(int start, union descriptor *descs, int num)
{
	struct i386_ldt_args p;

	p.start = start;
	p.descs = descs;
	p.num   = num;

	return sysarch(I386_SET_LDT, &p);
}
Esempio n. 15
0
int
x86_pkru_protect_range(void *addr, unsigned long len, u_int keyidx, int flags)
{
	struct amd64_set_pkru a64pkru;

	memset(&a64pkru, 0, sizeof(a64pkru));
	a64pkru.addr = addr;
	a64pkru.len = len;
	a64pkru.keyidx = keyidx;
	a64pkru.flags = flags;
	return (sysarch(X86_SET_PKRU, &a64pkru));
}
Esempio n. 16
0
void *
__tls_get_addr(tls_index* ti)
{
	Elf_Addr** tls;
	char *p;

	sysarch(MIPS_GET_TLS, &tls);

	p = tls_get_addr_common(tls, ti->ti_module, ti->ti_offset + TLS_DTP_OFFSET);

	return (p);
}
Esempio n. 17
0
static struct pci_io_handle *
pci_device_openbsd_open_legacy_io(struct pci_io_handle *ret,
    struct pci_device *dev, pciaddr_t base, pciaddr_t size)
{
#if defined(__i386__)
	struct i386_iopl_args ia;
#elif defined(__amd64__)
	struct amd64_iopl_args ia;
#endif

	/* With X server privilege separation, i/o access is 
	   enabled early and never disabled, allow recursive, 
	   privilege-less calls */
	if (legacy_io_handle != NULL) {
		ret->base = legacy_io_handle->base;
		ret->size = legacy_io_handle->size;
		ret->memory = legacy_io_handle->memory;
		return ret;
	}
#if defined(__i386__)
	ia.iopl = 1;
	if (sysarch(I386_IOPL, &ia))
		return NULL;
#elif defined(__amd64__)
	ia.iopl = 1;
	if (sysarch(AMD64_IOPL, &ia))
		return NULL;
#elif defined(PCI_MAGIC_IO_RANGE)
	ret->memory = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
	    aperturefd, PCI_MAGIC_IO_RANGE + base);
	if (ret->memory == MAP_FAILED)
		return NULL;
#else
	return NULL;
#endif
	ret->base = base;
	ret->size = size;
	legacy_io_handle = ret;
	return ret;
}
Esempio n. 18
0
static int
sethae(u_int64_t hae)
{
#ifdef __FreeBSD__
#ifndef ALPHA_SETHAE
#define ALPHA_SETHAE 0
#endif
	struct parms p;
	p.hae = hae;
	return (sysarch(ALPHA_SETHAE, (char *)&p));
#endif
#ifdef __OpenBSD__
	return -1;
#endif
}
Esempio n. 19
0
void
allocate_initial_tls(Obj_Entry *objs)
{
	char *tls;
	
	/*
	 * Fix the size of the static TLS block by using the maximum
	 * offset allocated so far and adding a bit for dynamic modules to
	 * use.
	 */
	tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA;

	tls = (char *) allocate_tls(objs, NULL, TLS_TCB_SIZE, 8);

	sysarch(MIPS_SET_TLS, tls);
}
Esempio n. 20
0
int
i386_get_ioperm(unsigned int start, unsigned int *length, int *enable)
{
	struct i386_ioperm_args p;
	int error;

	p.start = start;
	p.length = *length;
	p.enable = *enable;

	error = sysarch(I386_GET_IOPERM, &p);

	*length = p.length;
	*enable = p.enable;

	return (error);
}
Esempio n. 21
0
int
__sparc_utrap_install(utrap_entry_t type, utrap_handler_t new_precise,
    utrap_handler_t new_deferred, utrap_handler_t *old_precise,
    utrap_handler_t *old_deferred)
{
	struct sparc_utrap_install_args uia;
	struct sparc_utrap_args ua[1];

	ua[0].type = type;
	ua[0].new_precise = new_precise;
	ua[0].new_deferred = new_deferred;
	ua[0].old_precise = old_precise;
	ua[0].old_deferred = old_deferred;
	uia.num = 1;
	uia.handlers = ua;
	return (sysarch(SPARC_UTRAP_INSTALL, &uia));
}
Esempio n. 22
0
register_t
cheritest_libcheri_userfn_getstack(void)
{
	struct cheri_stack cs;
	struct cheri_stack_frame *csfp;
	u_int stack_depth;
	int retval;

	retval = sysarch(CHERI_GET_STACK, &cs);
	if (retval != 0)
		cheritest_failure_err("sysarch(CHERI_GET_STACK) failed");

	/* Does stack layout look sensible enough to continue? */
	if ((cs.cs_tsize % CHERI_FRAME_SIZE) != 0)
		cheritest_failure_errx(
		    "stack size (%ld) not a multiple of frame size",
		    cs.cs_tsize);
	stack_depth = cs.cs_tsize / CHERI_FRAME_SIZE;

	if ((cs.cs_tsp % CHERI_FRAME_SIZE) != 0)
		cheritest_failure_errx(
		    "stack pointer (%ld) not a multiple of frame size",
		    cs.cs_tsp);

	/* Validate that two stack frames are found. */
	if (cs.cs_tsp != cs.cs_tsize - (register_t)(2 * CHERI_FRAME_SIZE))
		cheritest_failure_errx("stack contains %d frames; expected "
		    "2", (cs.cs_tsize - (2 * CHERI_FRAME_SIZE)) /
		    CHERI_FRAME_SIZE);

	/* Validate that the first is a saved ambient context. */
	csfp = &cs.cs_frames[stack_depth - 1];
	if (cheri_getbase(csfp->csf_pcc) != cheri_getbase(cheri_getpcc()) ||
	    cheri_getlen(csfp->csf_pcc) != cheri_getlen(cheri_getpcc()))
		cheritest_failure_errx("frame 0: not global code cap");

	/* Validate that the second is cheritest_objectp. */
	csfp = &cs.cs_frames[stack_depth - 2];
	if ((cheri_getbase(csfp->csf_pcc) != cheri_getbase(
	    sandbox_object_getobject(cheritest_objectp).co_codecap)) ||
	    cheri_getlen(csfp->csf_pcc) != cheri_getlen(
	    sandbox_object_getobject(cheritest_objectp).co_codecap))
		cheritest_failure_errx("frame 1: not sandbox code cap");
	return (0);
}
Esempio n. 23
0
/*
 * Return the number of frames on the trusted stack.
 */
int
cheri_stack_numframes(int *numframesp)
{
	struct cheri_stack cs;

	/*
	 * Retrieve trusted stack and validate before returning a frame count.
	 */
	if (sysarch(CHERI_GET_STACK, &cs) != 0)
		return (-1);
	if ((cs.cs_tsize % CHERI_FRAME_SIZE) != 0 ||
	    (cs.cs_tsp > cs.cs_tsize) ||
	    (cs.cs_tsp % CHERI_FRAME_SIZE) != 0) {
		errno = ERANGE;
		return (-1);
	}
	*numframesp = (cs.cs_tsize - cs.cs_tsp) / CHERI_FRAME_SIZE;
	return (0);
}
Esempio n. 24
0
int
__fillcontextx2(char *ctx)
{
	struct ucontextx *ucxp;
	ucontext_t	 *ucp;
	mcontext_vfp_t	 *mvp;
	struct arm_get_vfpstate_args vfp_arg;

	ucxp = (struct ucontextx *)ctx;
	ucp = &ucxp->ucontext;
	mvp = &ucxp->mcontext_vfp;

	vfp_arg.mc_vfp_size = sizeof(mcontext_vfp_t);
	vfp_arg.mc_vfp = mvp;
	if (sysarch(ARM_GET_VFPSTATE, &vfp_arg) == -1)
			return (-1);
	ucp->uc_mcontext.mc_vfp_size = sizeof(mcontext_vfp_t);
	ucp->uc_mcontext.mc_vfp_ptr = mvp;
	return (0);
}
Esempio n. 25
0
int
__fillcontextx2(char *ctx)
{
	struct amd64_get_xfpustate xfpu;
	ucontext_t *ucp;

	ucp = (ucontext_t *)ctx;
	if (xstate_sz != 0) {
		xfpu.addr = (char *)(ucp + 1);
		xfpu.len = xstate_sz;
		if (sysarch(AMD64_GET_XFPUSTATE, &xfpu) == -1)
			return (-1);
		ucp->uc_mcontext.mc_xfpustate = (__register_t)xfpu.addr;
		ucp->uc_mcontext.mc_xfpustate_len = xstate_sz;
		ucp->uc_mcontext.mc_flags |= _MC_HASFPXSTATE;
	} else {
		ucp->uc_mcontext.mc_xfpustate = 0;
		ucp->uc_mcontext.mc_xfpustate_len = 0;
	}
	return (0);
}
Esempio n. 26
0
void __clear_cache(void* start, void* end)
{
#if __i386__ || __x86_64__
/*
 * Intel processors have a unified instruction and data cache
 * so there is nothing to do
 */
#elif defined(__NetBSD__) && defined(__arm__)
  struct arm_sync_icache_args arg;

  arg.addr = (uintptr_t)start;
  arg.len = (uintptr_t)end - (uintptr_t)start;

  sysarch(ARM_SYNC_ICACHE, &arg);
#else
    #if __APPLE__
        /* On Darwin, sys_icache_invalidate() provides this functionality */
        sys_icache_invalidate(start, end-start);
    #else
        compilerrt_abort();
    #endif
#endif
}
Esempio n. 27
0
int
freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap)
{
	struct sysarch_args uap1;
	struct i386_ldt_args uapl;
	struct i386_ldt_args32 uapl32;
	int error;

	if (uap->op == I386_SET_LDT || uap->op == I386_GET_LDT) {
		if ((error = copyin(uap->parms, &uapl32, sizeof(uapl32))) != 0)
			return (error);
		uap1.op = uap->op;
		uap1.parms = (char *)&uapl;
		uapl.start = uapl32.start;
		uapl.descs = (struct user_segment_descriptor *)(uintptr_t)
		    uapl32.descs;
		uapl.num = uapl32.num;
		return (sysarch_ldt(td, &uap1, UIO_SYSSPACE));
	} else {
		uap1.op = uap->op;
		uap1.parms = uap->parms;
		return (sysarch(td, &uap1));
	}
}
Esempio n. 28
0
void
__sparc_utrap_setup(void)
{

	sysarch(SPARC_UTRAP_INSTALL, (void *)&uia);
}
Esempio n. 29
0
fp_except
fpgetsticky(void)
{
	return sysarch(ALPHA_FPGETSTICKY, 0L);
}
Esempio n. 30
0
int
gc_cheri_get_ts(_gc_cap struct gc_ts *buf)
{

	return (sysarch(CHERI_GET_STACK, (void *)&buf->gts_cs));
}