Ejemplo n.º 1
0
/*
 *	To do a single step ddb needs to know the next address
 *	that we will get to. It means that we need to find out
 *	both the address for a branch taken and for not taken, NOT! :-)
 *	MipsEmulateBranch will do the job to find out _exactly_ which
 *	address we will end up at so the 'dual bp' method is not
 *	requiered.
 */
db_addr_t
next_instr_address(db_addr_t pc, boolean_t bd)
{
	db_addr_t next;

	next = (db_addr_t)MipsEmulateBranch(kdb_frame, pc, 0, 0);
	return (next);
}
Ejemplo n.º 2
0
int
ptrace_single_step(struct thread *td)
{
	unsigned va;
	struct trapframe *locr0 = td->td_frame;
	int i;
	int bpinstr = MIPS_BREAK_SSTEP;
	int curinstr;
	struct proc *p;

	p = td->td_proc;
	PROC_UNLOCK(p);
	/*
	 * Fetch what's at the current location.
	 */
	ptrace_read_int(td,  (off_t)locr0->pc, &curinstr);

	/* compute next address after current location */
	if(curinstr != 0) {
		va = MipsEmulateBranch(locr0, locr0->pc, locr0->fsr,
		    (uintptr_t)&curinstr);
	} else {
		va = locr0->pc + 4;
	}
	if (td->td_md.md_ss_addr) {
		printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
		    p->p_comm, p->p_pid, td->td_md.md_ss_addr, va); /* XXX */
		return (EFAULT);
	}
	td->td_md.md_ss_addr = va;
	/*
	 * Fetch what's at the current location.
	 */
	ptrace_read_int(td, (off_t)va, &td->td_md.md_ss_instr);

	/*
	 * Store breakpoint instruction at the "next" location now.
	 */
	i = ptrace_write_int (td, va, bpinstr);

	/*
	 * The sync'ing of I & D caches is done by procfs_domem()
	 * through procfs_rwmem().
	 */

	PROC_LOCK(p);
	if (i < 0)
		return (EFAULT);
#if 0
	printf("SS %s (%d): breakpoint set at %x: %x (pc %x) br %x\n",
	    p->p_comm, p->p_pid, p->p_md.md_ss_addr,
	    p->p_md.md_ss_instr, locr0->pc, curinstr); /* XXX */
#endif
	return (0);
}
Ejemplo n.º 3
0
/*
 * Return the next pc if the given branch is taken.
 * MachEmulateBranch() runs analysis for branch delay slot.
 */
db_addr_t
branch_taken(int inst, db_addr_t pc)
{
	db_addr_t ra;
	register_t fpucsr;

	/* TBD: when is fsr set */
	fpucsr = (curthread) ? curthread->td_pcb->pcb_regs.fsr : 0;
	ra = (db_addr_t)MipsEmulateBranch(kdb_frame, pc, fpucsr, 0);
	return (ra);
}
Ejemplo n.º 4
0
static int
cheriabi_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
{
	struct trapframe *locr0 = td->td_frame;	 /* aka td->td_pcb->pcv_regs */
	struct sysentvec *se;
#ifdef OLD_ARG_HANDLING
	register_t intargs[8];
	uintptr_t ptrargs[8];
	u_int tag;
	int i, isaved, psaved, curint, curptr, nintargs, nptrargs;
#endif
	int error;

	error = 0;

	bzero(sa->args, sizeof(sa->args));

	/* compute next PC after syscall instruction */
	td->td_pcb->pcb_tpc = sa->trapframe->pc; /* Remember if restart */
	if (DELAYBRANCH(sa->trapframe->cause))	 /* Check BD bit */
		locr0->pc = MipsEmulateBranch(locr0, sa->trapframe->pc, 0, 0);
	else
		locr0->pc += sizeof(int);
	sa->code = locr0->v0;

	se = td->td_proc->p_sysent;
	if (se->sv_mask)
		sa->code &= se->sv_mask;

	if (sa->code >= se->sv_size)
		sa->callp = &se->sv_table[0];
	else
		sa->callp = &se->sv_table[sa->code];

	sa->narg = sa->callp->sy_narg;

#ifndef OLD_ARG_HANDLING
	error = cheriabi_dispatch_fill_uap(td, sa->code, sa->args);
#else

	intargs[0] = locr0->a0;
	intargs[1] = locr0->a1;
	intargs[2] = locr0->a2;
	intargs[3] = locr0->a3;
	intargs[4] = locr0->a4;
	intargs[5] = locr0->a5;
	intargs[6] = locr0->a6;
	intargs[7] = locr0->a7;
	isaved = 8;

#if defined(CPU_CHERI_CHERI0) || defined (CPU_CHERI_CHERI8) || defined(CPU_CHERI_CHERI16)
#error	CHERIABI does not support fewer than 8 argument registers
#endif
	/*
	 * XXXBD: We should ideally use a user capability rather than $kdc
	 * to generate the pointers, but then we have to answer: which one?
	 *
	 * XXXRW: The kernel cannot distinguish between pointers with tags vs.
	 * untagged (possible) integers, which is problematic when a
	 * system-call argument is an intptr_t.  We used to just use CToPtr
	 * here, but this caused untagged integer arguments to be lost.  Now
	 * we pick one of CToPtr and CToInt based on the tag -- but this is
	 * not really ideal.  Instead, we'd prefer that the kernel could
	 * differentiate between the two explicitly using tagged capabilities,
	 * which we're not yet ready to do.
	 */
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &locr0->c3, 0);
	CHERI_CGETTAG(tag, CHERI_CR_CTEMP0);
	if (tag)
		CHERI_CTOPTR(ptrargs[0], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	else
		CHERI_CTOINT(ptrargs[0], CHERI_CR_CTEMP0);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &locr0->c4, 0);
	CHERI_CGETTAG(tag, CHERI_CR_CTEMP0);
	if (tag)
		CHERI_CTOPTR(ptrargs[1], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	else
		CHERI_CTOINT(ptrargs[1], CHERI_CR_CTEMP0);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &locr0->c5, 0);
	CHERI_CGETTAG(tag, CHERI_CR_CTEMP0);
	if (tag)
		CHERI_CTOPTR(ptrargs[2], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	else
		CHERI_CTOINT(ptrargs[2], CHERI_CR_CTEMP0);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &locr0->c6, 0);
	CHERI_CGETTAG(tag, CHERI_CR_CTEMP0);
	if (tag)
		CHERI_CTOPTR(ptrargs[3], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	else
		CHERI_CTOINT(ptrargs[3], CHERI_CR_CTEMP0);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &locr0->c7, 0);
	CHERI_CGETTAG(tag, CHERI_CR_CTEMP0);
	if (tag)
		CHERI_CTOPTR(ptrargs[4], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	else
		CHERI_CTOINT(ptrargs[4], CHERI_CR_CTEMP0);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &locr0->c8, 0);
	CHERI_CGETTAG(tag, CHERI_CR_CTEMP0);
	if (tag)
		CHERI_CTOPTR(ptrargs[5], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	else
		CHERI_CTOINT(ptrargs[5], CHERI_CR_CTEMP0);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &locr0->c9, 0);
	CHERI_CGETTAG(tag, CHERI_CR_CTEMP0);
	if (tag)
		CHERI_CTOPTR(ptrargs[6], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	else
		CHERI_CTOINT(ptrargs[6], CHERI_CR_CTEMP0);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &locr0->c10, 0);
	CHERI_CGETTAG(tag, CHERI_CR_CTEMP0);
	if (tag)
		CHERI_CTOPTR(ptrargs[7], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	else
		CHERI_CTOINT(ptrargs[7], CHERI_CR_CTEMP0);
	psaved = 8;

#ifdef TRAP_DEBUG
	if (trap_debug)
		printf("SYSCALL #%d pid:%u\n", sa->code, td->td_proc->p_pid);
#endif

	nptrargs = bitcount(CHERIABI_SYS_argmap[sa->code].sam_ptrmask);
	nintargs = sa->narg - nptrargs;
	KASSERT(nintargs <= isaved,
	    ("SYSCALL #%u pid:%u, nintargs (%u) > isaved (%u).\n",
	     sa->code, td->td_proc->p_pid, nintargs, isaved));
	KASSERT(nptrargs <= psaved,
	    ("SYSCALL #%u pid:%u, nptrargs (%u) > psaved (%u).\n",
	     sa->code, td->td_proc->p_pid, nptrargs, psaved));

	/*
	 * Check each argument to see if it is a pointer and pop an argument
	 * off the appropriate list.
	 */
	curint = curptr = 0;
	for (i = 0; i < sa->narg; i++)
		sa->args[i] =
		    (CHERIABI_SYS_argmap[sa->code].sam_ptrmask & 1 << i) ?
		    ptrargs[curptr++] : intargs[curint++];
#endif /* OLD_ARG_HANDLING */

	td->td_retval[0] = 0;
	td->td_retval[1] = locr0->v1;

	return (error);
}
Ejemplo n.º 5
0
/*
 * Handle a single exception.
 */
void
itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
    int type)
{
	int i;
	unsigned ucode = 0;
	vm_prot_t ftype;
	extern vaddr_t onfault_table[];
	int onfault;
	int typ = 0;
	union sigval sv;
	struct pcb *pcb;

	switch (type) {
	case T_TLB_MOD:
		/* check for kernel address */
		if (trapframe->badvaddr < 0) {
			pt_entry_t *pte, entry;
			paddr_t pa;
			vm_page_t pg;

			pte = kvtopte(trapframe->badvaddr);
			entry = *pte;
#ifdef DIAGNOSTIC
			if (!(entry & PG_V) || (entry & PG_M))
				panic("trap: ktlbmod: invalid pte");
#endif
			if (pmap_is_page_ro(pmap_kernel(),
			    trunc_page(trapframe->badvaddr), entry)) {
				/* write to read only page in the kernel */
				ftype = VM_PROT_WRITE;
				pcb = &p->p_addr->u_pcb;
				goto kernel_fault;
			}
			entry |= PG_M;
			*pte = entry;
			KERNEL_LOCK();
			pmap_update_kernel_page(trapframe->badvaddr & ~PGOFSET,
			    entry);
			pa = pfn_to_pad(entry);
			pg = PHYS_TO_VM_PAGE(pa);
			if (pg == NULL)
				panic("trap: ktlbmod: unmanaged page");
			pmap_set_modify(pg);
			KERNEL_UNLOCK();
			return;
		}
		/* FALLTHROUGH */

	case T_TLB_MOD+T_USER:
	    {
		pt_entry_t *pte, entry;
		paddr_t pa;
		vm_page_t pg;
		pmap_t pmap = p->p_vmspace->vm_map.pmap;

		if (!(pte = pmap_segmap(pmap, trapframe->badvaddr)))
			panic("trap: utlbmod: invalid segmap");
		pte += uvtopte(trapframe->badvaddr);
		entry = *pte;
#ifdef DIAGNOSTIC
		if (!(entry & PG_V) || (entry & PG_M))
			panic("trap: utlbmod: invalid pte");
#endif
		if (pmap_is_page_ro(pmap,
		    trunc_page(trapframe->badvaddr), entry)) {
			/* write to read only page */
			ftype = VM_PROT_WRITE;
			pcb = &p->p_addr->u_pcb;
			goto fault_common_no_miss;
		}
		entry |= PG_M;
		*pte = entry;
		KERNEL_LOCK();
		pmap_update_user_page(pmap, (trapframe->badvaddr & ~PGOFSET), 
		    entry);
		pa = pfn_to_pad(entry);
		pg = PHYS_TO_VM_PAGE(pa);
		if (pg == NULL)
			panic("trap: utlbmod: unmanaged page");
		pmap_set_modify(pg);
		KERNEL_UNLOCK();
		return;
	    }

	case T_TLB_LD_MISS:
	case T_TLB_ST_MISS:
		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
		pcb = &p->p_addr->u_pcb;
		/* check for kernel address */
		if (trapframe->badvaddr < 0) {
			vaddr_t va;
			int rv;

	kernel_fault:
			va = trunc_page((vaddr_t)trapframe->badvaddr);
			onfault = pcb->pcb_onfault;
			pcb->pcb_onfault = 0;
			KERNEL_LOCK();
			rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype);
			KERNEL_UNLOCK();
			pcb->pcb_onfault = onfault;
			if (rv == 0)
				return;
			if (onfault != 0) {
				pcb->pcb_onfault = 0;
				trapframe->pc = onfault_table[onfault];
				return;
			}
			goto err;
		}
		/*
		 * It is an error for the kernel to access user space except
		 * through the copyin/copyout routines.
		 */
		if (pcb->pcb_onfault != 0) {
			/*
			 * We want to resolve the TLB fault before invoking
			 * pcb_onfault if necessary.
			 */
			goto fault_common;
		} else {
			goto err;
		}

	case T_TLB_LD_MISS+T_USER:
		ftype = VM_PROT_READ;
		pcb = &p->p_addr->u_pcb;
		goto fault_common;

	case T_TLB_ST_MISS+T_USER:
		ftype = VM_PROT_WRITE;
		pcb = &p->p_addr->u_pcb;
fault_common:

#ifdef CPU_R4000
		if (r4000_errata != 0) {
			if (eop_tlb_miss_handler(trapframe, ci, p) != 0)
				return;
		}
#endif

fault_common_no_miss:

#ifdef CPU_R4000
		if (r4000_errata != 0) {
			eop_cleanup(trapframe, p);
		}
#endif

	    {
		vaddr_t va;
		struct vmspace *vm;
		vm_map_t map;
		int rv;

		vm = p->p_vmspace;
		map = &vm->vm_map;
		va = trunc_page((vaddr_t)trapframe->badvaddr);

		onfault = pcb->pcb_onfault;
		pcb->pcb_onfault = 0;
		KERNEL_LOCK();

		rv = uvm_fault(map, va, 0, ftype);
		pcb->pcb_onfault = onfault;

		/*
		 * If this was a stack access we keep track of the maximum
		 * accessed stack size.  Also, if vm_fault gets a protection
		 * failure it is due to accessing the stack region outside
		 * the current limit and we need to reflect that as an access
		 * error.
		 */
		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (rv == 0)
				uvm_grow(p, va);
			else if (rv == EACCES)
				rv = EFAULT;
		}
		KERNEL_UNLOCK();
		if (rv == 0)
			return;
		if (!USERMODE(trapframe->sr)) {
			if (onfault != 0) {
				pcb->pcb_onfault = 0;
				trapframe->pc =  onfault_table[onfault];
				return;
			}
			goto err;
		}

		ucode = ftype;
		i = SIGSEGV;
		typ = SEGV_MAPERR;
		break;
	    }

	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
		ucode = 0;		/* XXX should be VM_PROT_something */
		i = SIGBUS;
		typ = BUS_ADRALN;
		break;
	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
		ucode = 0;		/* XXX should be VM_PROT_something */
		i = SIGBUS;
		typ = BUS_OBJERR;
		break;

	case T_SYSCALL+T_USER:
	    {
		struct trap_frame *locr0 = p->p_md.md_regs;
		struct sysent *callp;
		unsigned int code;
		register_t tpc;
		int numsys, error;
		struct args {
			register_t i[8];
		} args;
		register_t rval[2];

		atomic_add_int(&uvmexp.syscalls, 1);

		/* compute next PC after syscall instruction */
		tpc = trapframe->pc; /* Remember if restart */
		if (trapframe->cause & CR_BR_DELAY)
			locr0->pc = MipsEmulateBranch(locr0,
			    trapframe->pc, 0, 0);
		else
			locr0->pc += 4;
		callp = p->p_p->ps_emul->e_sysent;
		numsys = p->p_p->ps_emul->e_nsysent;
		code = locr0->v0;
		switch (code) {
		case SYS_syscall:
		case SYS___syscall:
			/*
			 * Code is first argument, followed by actual args.
			 * __syscall provides the code as a quad to maintain
			 * proper alignment of 64-bit arguments on 32-bit
			 * platforms, which doesn't change anything here.
			 */
			code = locr0->a0;
			if (code >= numsys)
				callp += p->p_p->ps_emul->e_nosys; /* (illegal) */
			else
				callp += code;
			i = callp->sy_argsize / sizeof(register_t);
			args.i[0] = locr0->a1;
			args.i[1] = locr0->a2;
			args.i[2] = locr0->a3;
			if (i > 3) {
				args.i[3] = locr0->a4;
				args.i[4] = locr0->a5;
				args.i[5] = locr0->a6;
				args.i[6] = locr0->a7;
				if (i > 7)
					if ((error = copyin((void *)locr0->sp,
					    &args.i[7], sizeof(register_t))))
						goto bad;
			}
			break;
		default:
			if (code >= numsys)
				callp += p->p_p->ps_emul->e_nosys; /* (illegal) */
			else
				callp += code;

			i = callp->sy_narg;
			args.i[0] = locr0->a0;
			args.i[1] = locr0->a1;
			args.i[2] = locr0->a2;
			args.i[3] = locr0->a3;
			if (i > 4) {
				args.i[4] = locr0->a4;
				args.i[5] = locr0->a5;
				args.i[6] = locr0->a6;
				args.i[7] = locr0->a7;
			}
		}

		rval[0] = 0;
		rval[1] = locr0->v1;

#if defined(DDB) || defined(DEBUG)
		trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ?
		    TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code;
#endif

		error = mi_syscall(p, code, callp, args.i, rval);

		switch (error) {
		case 0:
			locr0->v0 = rval[0];
			locr0->v1 = rval[1];
			locr0->a3 = 0;
			break;

		case ERESTART:
			locr0->pc = tpc;
			break;

		case EJUSTRETURN:
			break;	/* nothing to do */

		default:
		bad:
			locr0->v0 = error;
			locr0->a3 = 1;
		}

		mi_syscall_return(p, code, error, rval);

		return;
	    }

	case T_BREAK:
#ifdef DDB
		kdb_trap(type, trapframe);
#endif
		/* Reenable interrupts if necessary */
		if (trapframe->sr & SR_INT_ENAB) {
			enableintr();
		}
		return;

	case T_BREAK+T_USER:
	    {
		caddr_t va;
		u_int32_t instr;
		struct trap_frame *locr0 = p->p_md.md_regs;

		/* compute address of break instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;

		/* read break instruction */
		copyin(va, &instr, sizeof(int32_t));

		switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) {
		case 6:	/* gcc range error */
			i = SIGFPE;
			typ = FPE_FLTSUB;
			/* skip instruction */
			if (trapframe->cause & CR_BR_DELAY)
				locr0->pc = MipsEmulateBranch(locr0,
				    trapframe->pc, 0, 0);
			else
				locr0->pc += 4;
			break;
		case 7:	/* gcc3 divide by zero */
			i = SIGFPE;
			typ = FPE_INTDIV;
			/* skip instruction */
			if (trapframe->cause & CR_BR_DELAY)
				locr0->pc = MipsEmulateBranch(locr0,
				    trapframe->pc, 0, 0);
			else
				locr0->pc += 4;
			break;
#ifdef PTRACE
		case BREAK_SSTEP_VAL:
			if (p->p_md.md_ss_addr == (long)va) {
#ifdef DEBUG
				printf("trap: %s (%d): breakpoint at %p "
				    "(insn %08x)\n",
				    p->p_comm, p->p_pid,
				    (void *)p->p_md.md_ss_addr,
				    p->p_md.md_ss_instr);
#endif

				/* Restore original instruction and clear BP */
				process_sstep(p, 0);
				typ = TRAP_BRKPT;
			} else {
				typ = TRAP_TRACE;
			}
			i = SIGTRAP;
			break;
#endif
#ifdef FPUEMUL
		case BREAK_FPUEMUL_VAL:
			/*
			 * If this is a genuine FP emulation break,
			 * resume execution to our branch destination.
			 */
			if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
			    p->p_md.md_fppgva + 4 == (vaddr_t)va) {
				struct vm_map *map = &p->p_vmspace->vm_map;

				p->p_md.md_flags &= ~MDP_FPUSED;
				locr0->pc = p->p_md.md_fpbranchva;

				/*
				 * Prevent access to the relocation page.
				 * XXX needs to be fixed to work with rthreads
				 */
				uvm_fault_unwire(map, p->p_md.md_fppgva,
				    p->p_md.md_fppgva + PAGE_SIZE);
				(void)uvm_map_protect(map, p->p_md.md_fppgva,
				    p->p_md.md_fppgva + PAGE_SIZE,
				    UVM_PROT_NONE, FALSE);
				return;
			}
			/* FALLTHROUGH */
#endif
		default:
			typ = TRAP_TRACE;
			i = SIGTRAP;
			break;
		}
		break;
	    }

	case T_IWATCH+T_USER:
	case T_DWATCH+T_USER:
	    {
		caddr_t va;
		/* compute address of trapped instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		printf("watch exception @ %p\n", va);
#ifdef RM7K_PERFCNTR
		if (rm7k_watchintr(trapframe)) {
			/* Return to user, don't add any more overhead */
			return;
		}
#endif
		i = SIGTRAP;
		typ = TRAP_BRKPT;
		break;
	    }

	case T_TRAP+T_USER:
	    {
		caddr_t va;
		u_int32_t instr;
		struct trap_frame *locr0 = p->p_md.md_regs;

		/* compute address of trap instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		/* read break instruction */
		copyin(va, &instr, sizeof(int32_t));

		if (trapframe->cause & CR_BR_DELAY)
			locr0->pc = MipsEmulateBranch(locr0,
			    trapframe->pc, 0, 0);
		else
			locr0->pc += 4;
#ifdef RM7K_PERFCNTR
		if (instr == 0x040c0000) { /* Performance cntr trap */
			int result;

			result = rm7k_perfcntr(trapframe->a0, trapframe->a1,
						trapframe->a2, trapframe->a3);
			locr0->v0 = -result;
			/* Return to user, don't add any more overhead */
			return;
		} else
#endif
		/*
		 * GCC 4 uses teq with code 7 to signal divide by
	 	 * zero at runtime. This is one instruction shorter
		 * than the BEQ + BREAK combination used by gcc 3.
		 */
		if ((instr & 0xfc00003f) == 0x00000034 /* teq */ &&
		    (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) {
			i = SIGFPE;
			typ = FPE_INTDIV;
		} else {
			i = SIGEMT;	/* Stuff it with something for now */
			typ = 0;
		}
		break;
	    }

	case T_RES_INST+T_USER:
		i = SIGILL;
		typ = ILL_ILLOPC;
		break;

	case T_COP_UNUSABLE+T_USER:
		/*
		 * Note MIPS IV COP1X instructions issued with FPU
		 * disabled correctly report coprocessor 1 as the
		 * unusable coprocessor number.
		 */
		if ((trapframe->cause & CR_COP_ERR) != CR_COP1_ERR) {
			i = SIGILL;	/* only FPU instructions allowed */
			typ = ILL_ILLOPC;
			break;
		}
#ifdef FPUEMUL
		MipsFPTrap(trapframe);
#else
		enable_fpu(p);
#endif
		return;

	case T_FPE:
		printf("FPU Trap: PC %lx CR %lx SR %lx\n",
			trapframe->pc, trapframe->cause, trapframe->sr);
		goto err;

	case T_FPE+T_USER:
		MipsFPTrap(trapframe);
		return;

	case T_OVFLOW+T_USER:
		i = SIGFPE;
		typ = FPE_FLTOVF;
		break;

	case T_ADDR_ERR_LD:	/* misaligned access */
	case T_ADDR_ERR_ST:	/* misaligned access */
	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
		pcb = &p->p_addr->u_pcb;
		if ((onfault = pcb->pcb_onfault) != 0) {
			pcb->pcb_onfault = 0;
			trapframe->pc = onfault_table[onfault];
			return;
		}
		goto err;

	default:
	err:
		disableintr();
#if !defined(DDB) && defined(DEBUG)
		trapDump("trap", printf);
#endif
		printf("\nTrap cause = %d Frame %p\n", type, trapframe);
		printf("Trap PC %p RA %p fault %p\n",
		    (void *)trapframe->pc, (void *)trapframe->ra,
		    (void *)trapframe->badvaddr);
#ifdef DDB
		stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs);
		kdb_trap(type, trapframe);
#endif
		panic("trap");
	}

#ifdef FPUEMUL
	/*
	 * If a relocated delay slot causes an exception, blame the
	 * original delay slot address - userland is not supposed to
	 * know anything about emulation bowels.
	 */
	if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
	    trapframe->badvaddr == p->p_md.md_fppgva)
		trapframe->badvaddr = p->p_md.md_fpslotva;
#endif
	p->p_md.md_regs->pc = trapframe->pc;
	p->p_md.md_regs->cause = trapframe->cause;
	p->p_md.md_regs->badvaddr = trapframe->badvaddr;
	sv.sival_ptr = (void *)trapframe->badvaddr;
	KERNEL_LOCK();
	trapsignal(p, i, ucode, typ, sv);
	KERNEL_UNLOCK();
}
Ejemplo n.º 6
0
static int
cheriabi_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
{
	struct trapframe *locr0 = td->td_frame;	 /* aka td->td_pcb->pcv_regs */
	struct cheri_frame *capreg = &td->td_pcb->pcb_cheriframe;
	register_t intargs[8];
	uintptr_t ptrargs[8];
	struct sysentvec *se;
	int error, i, isaved, psaved, curint, curptr, nintargs, nptrargs;

	error = 0;

	bzero(sa->args, sizeof(sa->args));

	/* compute next PC after syscall instruction */
	td->td_pcb->pcb_tpc = sa->trapframe->pc; /* Remember if restart */
	if (DELAYBRANCH(sa->trapframe->cause))	 /* Check BD bit */
		locr0->pc = MipsEmulateBranch(locr0, sa->trapframe->pc, 0, 0);
	else
		locr0->pc += sizeof(int);
	sa->code = locr0->v0;

	switch (sa->code) {
	case CHERIABI_SYS___syscall:
	case CHERIABI_SYS_syscall:
		/*
		 * This is an indirect syscall, in which the code is the first
		 * argument.
		 */
		sa->code = locr0->a0;
		intargs[0] = locr0->a1;
		intargs[1] = locr0->a2;
		intargs[2] = locr0->a3;
		intargs[3] = locr0->a4;
		intargs[4] = locr0->a5;
		intargs[5] = locr0->a6;
		intargs[6] = locr0->a7;
		isaved = 7;
		break;
	default:
		/*
		 * A direct syscall, arguments are just parameters to the syscall.
		 */
		intargs[0] = locr0->a0;
		intargs[1] = locr0->a1;
		intargs[2] = locr0->a2;
		intargs[3] = locr0->a3;
		intargs[4] = locr0->a4;
		intargs[5] = locr0->a5;
		intargs[6] = locr0->a6;
		intargs[7] = locr0->a7;
		isaved = 8;
		break;
	}

#if defined(CPU_CHERI_CHERI0) || defined (CPU_CHERI_CHERI8) || defined(CPU_CHERI_CHERI16)
#error	CHERIABI does not support fewer than 8 argument registers
#endif
	/*
	 * XXXBD: we should idealy use a user capability rather than KDC
	 * to generate the pointers, but then we have to answer: which one?
	 */
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &capreg->cf_c3, 0);
	CHERI_CTOPTR(ptrargs[0], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &capreg->cf_c4, 0);
	CHERI_CTOPTR(ptrargs[1], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &capreg->cf_c5, 0);
	CHERI_CTOPTR(ptrargs[2], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &capreg->cf_c6, 0);
	CHERI_CTOPTR(ptrargs[3], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &capreg->cf_c7, 0);
	CHERI_CTOPTR(ptrargs[4], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &capreg->cf_c8, 0);
	CHERI_CTOPTR(ptrargs[5], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &capreg->cf_c9, 0);
	CHERI_CTOPTR(ptrargs[6], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &capreg->cf_c10, 0);
	CHERI_CTOPTR(ptrargs[7], CHERI_CR_CTEMP0, CHERI_CR_KDC);
	psaved = 8;

#ifdef TRAP_DEBUG
	if (trap_debug)
		printf("SYSCALL #%d pid:%u\n", sa->code, td->td_proc->p_pid);
#endif

	se = td->td_proc->p_sysent;
	/*
	 * XXX
	 * Shouldn't this go before switching on the code?
	 */
	if (se->sv_mask)
		sa->code &= se->sv_mask;

	if (sa->code >= se->sv_size)
		sa->callp = &se->sv_table[0];
	else
		sa->callp = &se->sv_table[sa->code];

	sa->narg = sa->callp->sy_narg;

	nptrargs = bitcount(CHERIABI_SYS_argmap[sa->code].sam_ptrmask);
	nintargs = sa->narg - nintargs;
	KASSERT(nintargs <= isaved,
	    ("SYSCALL #%u pid:%u, nintargs (%u) > isaved (%u).\n",
	     sa->code, td->td_proc->p_pid, nintargs, isaved));
	KASSERT(nptrargs <= psaved,
	    ("SYSCALL #%u pid:%u, nptrargs (%u) > psaved (%u).\n",
	     sa->code, td->td_proc->p_pid, nptrargs, psaved));

	/*
	 * Check each argument to see if it is a pointer and pop an argument
	 * off the appropriate list.
	 */
	curint = curptr = 0;
	for (i = 0; i < sa->narg; i++)
		sa->args[i] =
		    (CHERIABI_SYS_argmap[sa->code].sam_ptrmask & 1 << i) ?
		    ptrargs[curptr++] : intargs[curint++];

	td->td_retval[0] = 0;
	td->td_retval[1] = locr0->v1;

	return (error);
}